aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net
diff options
context:
space:
mode:
authorWey-Yi Guy <wey-yi.w.guy@intel.com>2011-02-21 14:27:26 -0500
committerWey-Yi Guy <wey-yi.w.guy@intel.com>2011-02-21 14:27:26 -0500
commitbe663ab67077fac8e23eb8e231a8c1c94cb32e54 (patch)
treec1d80a72f86be20135d3e57178e99b9d855f622f /drivers/net
parent4bc85c1324aaa4a8bb0171e332ff762b6230bdfe (diff)
iwlwifi: split the drivers for agn and legacy devices 3945/4965
Intel WiFi devices 3945 and 4965 now have their own driver in the folder drivers/net/wireless/iwlegacy Add support to build these drivers independently of the driver for AGN devices. Selecting the 3945 builds iwl3945.ko and iwl_legacy.ko, and selecting the 4965 builds iwl4965.ko and iwl_legacy.ko. iwl-legacy.ko contains code shared between both devices. The 3945 is an ABG/BG device, with no support for 802.11n. The 4965 is a 2x3 ABGN device. Signed-off-by: Meenakshi Venkataraman <meenakshi.venkataraman@intel.com> Acked-by: Johannes Berg <johannes.berg@intel.com> Signed-off-by: Wey-Yi Guy <wey-yi.w.guy@intel.com>
Diffstat (limited to 'drivers/net')
-rw-r--r--drivers/net/wireless/Kconfig1
-rw-r--r--drivers/net/wireless/Makefile3
-rw-r--r--drivers/net/wireless/iwlegacy/Kconfig116
-rw-r--r--drivers/net/wireless/iwlegacy/Makefile25
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-3945-debugfs.c (renamed from drivers/net/wireless/iwlwifi/iwl-3945-debugfs.c)11
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-3945-debugfs.h (renamed from drivers/net/wireless/iwlwifi/iwl-3945-debugfs.h)4
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-3945-fh.h (renamed from drivers/net/wireless/iwlwifi/iwl-3945-fh.h)5
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-3945-hw.h (renamed from drivers/net/wireless/iwlwifi/iwl-3945-hw.h)9
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-3945-led.c (renamed from drivers/net/wireless/iwlwifi/iwl-3945-led.c)4
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-3945-led.h (renamed from drivers/net/wireless/iwlwifi/iwl-3945-led.h)2
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-3945-rs.c (renamed from drivers/net/wireless/iwlwifi/iwl-3945-rs.c)39
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-3945.c (renamed from drivers/net/wireless/iwlwifi/iwl-3945.c)319
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-3945.h (renamed from drivers/net/wireless/iwlwifi/iwl-3945.h)12
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-4965-calib.c967
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-4965-calib.h (renamed from drivers/net/wireless/iwlwifi/iwl-legacy.h)30
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-4965-debugfs.c774
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-4965-debugfs.h59
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-4965-eeprom.c154
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-4965-hw.h (renamed from drivers/net/wireless/iwlwifi/iwl-4965-hw.h)26
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-4965-led.c74
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-4965-led.h33
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-4965-lib.c1260
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-4965-rs.c2870
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-4965-rx.c291
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-4965-sta.c720
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-4965-tx.c1359
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-4965-ucode.c166
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-4965.c (renamed from drivers/net/wireless/iwlwifi/iwl-4965.c)806
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-4965.h282
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-commands.h3405
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-core.c2668
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-core.h646
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-csr.h422
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-debug.h198
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-debugfs.c1467
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-dev.h1426
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-devtrace.c45
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-devtrace.h270
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-eeprom.c561
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-eeprom.h344
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-fh.h513
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-hcmd.c271
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-helpers.h181
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-io.h545
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-led.c188
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-led.h56
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-legacy-rs.h456
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-power.c165
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-power.h55
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-prph.h523
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-rx.c302
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-scan.c625
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-spectrum.h92
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-sta.c816
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-sta.h148
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-tx.c637
-rw-r--r--drivers/net/wireless/iwlegacy/iwl3945-base.c (renamed from drivers/net/wireless/iwlwifi/iwl3945-base.c)536
-rw-r--r--drivers/net/wireless/iwlegacy/iwl4965-base.c3633
-rw-r--r--drivers/net/wireless/iwlwifi/Kconfig124
-rw-r--r--drivers/net/wireless/iwlwifi/Makefile39
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn.c18
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-core.c54
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-debugfs.c2
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-dev.h4
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-eeprom.c8
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-hcmd.c5
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-led.c2
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-legacy.c657
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-power.c3
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-rx.c6
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-scan.c10
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-sta.c11
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-tx.c7
73 files changed, 30482 insertions, 2083 deletions
diff --git a/drivers/net/wireless/Kconfig b/drivers/net/wireless/Kconfig
index b4338f389394..7aeb113cbb90 100644
--- a/drivers/net/wireless/Kconfig
+++ b/drivers/net/wireless/Kconfig
@@ -274,6 +274,7 @@ source "drivers/net/wireless/b43legacy/Kconfig"
274source "drivers/net/wireless/hostap/Kconfig" 274source "drivers/net/wireless/hostap/Kconfig"
275source "drivers/net/wireless/ipw2x00/Kconfig" 275source "drivers/net/wireless/ipw2x00/Kconfig"
276source "drivers/net/wireless/iwlwifi/Kconfig" 276source "drivers/net/wireless/iwlwifi/Kconfig"
277source "drivers/net/wireless/iwlegacy/Kconfig"
277source "drivers/net/wireless/iwmc3200wifi/Kconfig" 278source "drivers/net/wireless/iwmc3200wifi/Kconfig"
278source "drivers/net/wireless/libertas/Kconfig" 279source "drivers/net/wireless/libertas/Kconfig"
279source "drivers/net/wireless/orinoco/Kconfig" 280source "drivers/net/wireless/orinoco/Kconfig"
diff --git a/drivers/net/wireless/Makefile b/drivers/net/wireless/Makefile
index 9760561a27a5..cd0c7e2aed43 100644
--- a/drivers/net/wireless/Makefile
+++ b/drivers/net/wireless/Makefile
@@ -41,7 +41,8 @@ obj-$(CONFIG_ADM8211) += adm8211.o
41 41
42obj-$(CONFIG_MWL8K) += mwl8k.o 42obj-$(CONFIG_MWL8K) += mwl8k.o
43 43
44obj-$(CONFIG_IWLWIFI) += iwlwifi/ 44obj-$(CONFIG_IWLAGN) += iwlwifi/
45obj-$(CONFIG_IWLWIFI_LEGACY) += iwlegacy/
45obj-$(CONFIG_RT2X00) += rt2x00/ 46obj-$(CONFIG_RT2X00) += rt2x00/
46 47
47obj-$(CONFIG_P54_COMMON) += p54/ 48obj-$(CONFIG_P54_COMMON) += p54/
diff --git a/drivers/net/wireless/iwlegacy/Kconfig b/drivers/net/wireless/iwlegacy/Kconfig
new file mode 100644
index 000000000000..2a45dd44cc12
--- /dev/null
+++ b/drivers/net/wireless/iwlegacy/Kconfig
@@ -0,0 +1,116 @@
1config IWLWIFI_LEGACY
2 tristate "Intel Wireless Wifi legacy devices"
3 depends on PCI && MAC80211
4 select FW_LOADER
5 select NEW_LEDS
6 select LEDS_CLASS
7 select LEDS_TRIGGERS
8 select MAC80211_LEDS
9
10menu "Debugging Options"
11 depends on IWLWIFI_LEGACY
12
13config IWLWIFI_LEGACY_DEBUG
14 bool "Enable full debugging output in 4965 and 3945 drivers"
15 depends on IWLWIFI_LEGACY
16 ---help---
17 This option will enable debug tracing output for the iwlwifilegacy
18 drivers.
19
20 This will result in the kernel module being ~100k larger. You can
21 control which debug output is sent to the kernel log by setting the
22 value in
23
24 /sys/class/net/wlan0/device/debug_level
25
26 This entry will only exist if this option is enabled.
27
28 To set a value, simply echo an 8-byte hex value to the same file:
29
30 % echo 0x43fff > /sys/class/net/wlan0/device/debug_level
31
32 You can find the list of debug mask values in:
33 drivers/net/wireless/iwlwifilegacy/iwl-debug.h
34
35 If this is your first time using this driver, you should say Y here
36 as the debug information can assist others in helping you resolve
37 any problems you may encounter.
38
39config IWLWIFI_LEGACY_DEBUGFS
40 bool "4965 and 3945 debugfs support"
41 depends on IWLWIFI_LEGACY && MAC80211_DEBUGFS
42 ---help---
43 Enable creation of debugfs files for the iwlwifilegacy drivers. This
44 is a low-impact option that allows getting insight into the
45 driver's state at runtime.
46
47config IWLWIFI_LEGACY_DEVICE_TRACING
48 bool "iwlwifilegacy legacy device access tracing"
49 depends on IWLWIFI_LEGACY
50 depends on EVENT_TRACING
51 help
52 Say Y here to trace all commands, including TX frames and IO
53 accesses, sent to the device. If you say yes, iwlwifilegacy will
54 register with the ftrace framework for event tracing and dump
55 all this information to the ringbuffer, you may need to
56 increase the ringbuffer size. See the ftrace documentation
57 for more information.
58
59 When tracing is not enabled, this option still has some
60 (though rather small) overhead.
61
62 If unsure, say Y so we can help you better when problems
63 occur.
64endmenu
65
66config IWL4965
67 tristate "Intel Wireless WiFi 4965AGN (iwl4965)"
68 depends on IWLWIFI_LEGACY
69 ---help---
70 This option enables support for
71
72 Select to build the driver supporting the:
73
74 Intel Wireless WiFi Link 4965AGN
75
76 This driver uses the kernel's mac80211 subsystem.
77
78 In order to use this driver, you will need a microcode (uCode)
79 image for it. You can obtain the microcode from:
80
81 <http://intellinuxwireless.org/>.
82
83 The microcode is typically installed in /lib/firmware. You can
84 look in the hotplug script /etc/hotplug/firmware.agent to
85 determine which directory FIRMWARE_DIR is set to when the script
86 runs.
87
88 If you want to compile the driver as a module ( = code which can be
89 inserted in and removed from the running kernel whenever you want),
90 say M here and read <file:Documentation/kbuild/modules.txt>. The
91 module will be called iwl4965.
92
93config IWL3945
94 tristate "Intel PRO/Wireless 3945ABG/BG Network Connection (iwl3945)"
95 depends on IWLWIFI_LEGACY
96 ---help---
97 Select to build the driver supporting the:
98
99 Intel PRO/Wireless 3945ABG/BG Network Connection
100
101 This driver uses the kernel's mac80211 subsystem.
102
103 In order to use this driver, you will need a microcode (uCode)
104 image for it. You can obtain the microcode from:
105
106 <http://intellinuxwireless.org/>.
107
108 The microcode is typically installed in /lib/firmware. You can
109 look in the hotplug script /etc/hotplug/firmware.agent to
110 determine which directory FIRMWARE_DIR is set to when the script
111 runs.
112
113 If you want to compile the driver as a module ( = code which can be
114 inserted in and removed from the running kernel whenever you want),
115 say M here and read <file:Documentation/kbuild/modules.txt>. The
116 module will be called iwl3945.
diff --git a/drivers/net/wireless/iwlegacy/Makefile b/drivers/net/wireless/iwlegacy/Makefile
new file mode 100644
index 000000000000..d56aeb38c211
--- /dev/null
+++ b/drivers/net/wireless/iwlegacy/Makefile
@@ -0,0 +1,25 @@
1obj-$(CONFIG_IWLWIFI_LEGACY) += iwl-legacy.o
2iwl-legacy-objs := iwl-core.o iwl-eeprom.o iwl-hcmd.o iwl-power.o
3iwl-legacy-objs += iwl-rx.o iwl-tx.o iwl-sta.o
4iwl-legacy-objs += iwl-scan.o iwl-led.o
5iwl-legacy-$(CONFIG_IWLWIFI_LEGACY_DEBUGFS) += iwl-debugfs.o
6iwl-legacy-$(CONFIG_IWLWIFI_LEGACY_DEVICE_TRACING) += iwl-devtrace.o
7
8iwl-legacy-objs += $(iwl-legacy-m)
9
10CFLAGS_iwl-devtrace.o := -I$(src)
11
12# 4965
13obj-$(CONFIG_IWL4965) += iwl4965.o
14iwl4965-objs := iwl-4965.o iwl4965-base.o iwl-4965-rs.o iwl-4965-led.o
15iwl4965-objs += iwl-4965-ucode.o iwl-4965-tx.o
16iwl4965-objs += iwl-4965-lib.o iwl-4965-rx.o iwl-4965-calib.o
17iwl4965-objs += iwl-4965-sta.o iwl-4965-eeprom.o
18iwl4965-$(CONFIG_IWLWIFI_LEGACY_DEBUGFS) += iwl-4965-debugfs.o
19
20# 3945
21obj-$(CONFIG_IWL3945) += iwl3945.o
22iwl3945-objs := iwl3945-base.o iwl-3945.o iwl-3945-rs.o iwl-3945-led.o
23iwl3945-$(CONFIG_IWLWIFI_LEGACY_DEBUGFS) += iwl-3945-debugfs.o
24
25ccflags-y += -D__CHECK_ENDIAN__
diff --git a/drivers/net/wireless/iwlwifi/iwl-3945-debugfs.c b/drivers/net/wireless/iwlegacy/iwl-3945-debugfs.c
index ef0835b01b6b..cfabb38793ab 100644
--- a/drivers/net/wireless/iwlwifi/iwl-3945-debugfs.c
+++ b/drivers/net/wireless/iwlegacy/iwl-3945-debugfs.c
@@ -2,7 +2,7 @@
2 * 2 *
3 * GPL LICENSE SUMMARY 3 * GPL LICENSE SUMMARY
4 * 4 *
5 * Copyright(c) 2008 - 2010 Intel Corporation. All rights reserved. 5 * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
6 * 6 *
7 * This program is free software; you can redistribute it and/or modify 7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of version 2 of the GNU General Public License as 8 * it under the terms of version 2 of the GNU General Public License as
@@ -60,12 +60,13 @@ ssize_t iwl3945_ucode_rx_stats_read(struct file *file,
60 int bufsz = sizeof(struct iwl39_statistics_rx_phy) * 40 + 60 int bufsz = sizeof(struct iwl39_statistics_rx_phy) * 40 +
61 sizeof(struct iwl39_statistics_rx_non_phy) * 40 + 400; 61 sizeof(struct iwl39_statistics_rx_non_phy) * 40 + 400;
62 ssize_t ret; 62 ssize_t ret;
63 struct iwl39_statistics_rx_phy *ofdm, *accum_ofdm, *delta_ofdm, *max_ofdm; 63 struct iwl39_statistics_rx_phy *ofdm, *accum_ofdm, *delta_ofdm,
64 *max_ofdm;
64 struct iwl39_statistics_rx_phy *cck, *accum_cck, *delta_cck, *max_cck; 65 struct iwl39_statistics_rx_phy *cck, *accum_cck, *delta_cck, *max_cck;
65 struct iwl39_statistics_rx_non_phy *general, *accum_general; 66 struct iwl39_statistics_rx_non_phy *general, *accum_general;
66 struct iwl39_statistics_rx_non_phy *delta_general, *max_general; 67 struct iwl39_statistics_rx_non_phy *delta_general, *max_general;
67 68
68 if (!iwl_is_alive(priv)) 69 if (!iwl_legacy_is_alive(priv))
69 return -EAGAIN; 70 return -EAGAIN;
70 71
71 buf = kzalloc(bufsz, GFP_KERNEL); 72 buf = kzalloc(bufsz, GFP_KERNEL);
@@ -335,7 +336,7 @@ ssize_t iwl3945_ucode_tx_stats_read(struct file *file,
335 ssize_t ret; 336 ssize_t ret;
336 struct iwl39_statistics_tx *tx, *accum_tx, *delta_tx, *max_tx; 337 struct iwl39_statistics_tx *tx, *accum_tx, *delta_tx, *max_tx;
337 338
338 if (!iwl_is_alive(priv)) 339 if (!iwl_legacy_is_alive(priv))
339 return -EAGAIN; 340 return -EAGAIN;
340 341
341 buf = kzalloc(bufsz, GFP_KERNEL); 342 buf = kzalloc(bufsz, GFP_KERNEL);
@@ -434,7 +435,7 @@ ssize_t iwl3945_ucode_general_stats_read(struct file *file,
434 struct statistics_dbg *dbg, *accum_dbg, *delta_dbg, *max_dbg; 435 struct statistics_dbg *dbg, *accum_dbg, *delta_dbg, *max_dbg;
435 struct iwl39_statistics_div *div, *accum_div, *delta_div, *max_div; 436 struct iwl39_statistics_div *div, *accum_div, *delta_div, *max_div;
436 437
437 if (!iwl_is_alive(priv)) 438 if (!iwl_legacy_is_alive(priv))
438 return -EAGAIN; 439 return -EAGAIN;
439 440
440 buf = kzalloc(bufsz, GFP_KERNEL); 441 buf = kzalloc(bufsz, GFP_KERNEL);
diff --git a/drivers/net/wireless/iwlwifi/iwl-3945-debugfs.h b/drivers/net/wireless/iwlegacy/iwl-3945-debugfs.h
index 70809c53c215..8fef4b32b447 100644
--- a/drivers/net/wireless/iwlwifi/iwl-3945-debugfs.h
+++ b/drivers/net/wireless/iwlegacy/iwl-3945-debugfs.h
@@ -2,7 +2,7 @@
2 * 2 *
3 * GPL LICENSE SUMMARY 3 * GPL LICENSE SUMMARY
4 * 4 *
5 * Copyright(c) 2008 - 2010 Intel Corporation. All rights reserved. 5 * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
6 * 6 *
7 * This program is free software; you can redistribute it and/or modify 7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of version 2 of the GNU General Public License as 8 * it under the terms of version 2 of the GNU General Public License as
@@ -30,7 +30,7 @@
30#include "iwl-core.h" 30#include "iwl-core.h"
31#include "iwl-debug.h" 31#include "iwl-debug.h"
32 32
33#ifdef CONFIG_IWLWIFI_DEBUGFS 33#ifdef CONFIG_IWLWIFI_LEGACY_DEBUGFS
34ssize_t iwl3945_ucode_rx_stats_read(struct file *file, char __user *user_buf, 34ssize_t iwl3945_ucode_rx_stats_read(struct file *file, char __user *user_buf,
35 size_t count, loff_t *ppos); 35 size_t count, loff_t *ppos);
36ssize_t iwl3945_ucode_tx_stats_read(struct file *file, char __user *user_buf, 36ssize_t iwl3945_ucode_tx_stats_read(struct file *file, char __user *user_buf,
diff --git a/drivers/net/wireless/iwlwifi/iwl-3945-fh.h b/drivers/net/wireless/iwlegacy/iwl-3945-fh.h
index 2c9ed2b502a3..836c9919f82e 100644
--- a/drivers/net/wireless/iwlwifi/iwl-3945-fh.h
+++ b/drivers/net/wireless/iwlegacy/iwl-3945-fh.h
@@ -5,7 +5,7 @@
5 * 5 *
6 * GPL LICENSE SUMMARY 6 * GPL LICENSE SUMMARY
7 * 7 *
8 * Copyright(c) 2005 - 2010 Intel Corporation. All rights reserved. 8 * Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved.
9 * 9 *
10 * This program is free software; you can redistribute it and/or modify 10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of version 2 of the GNU General Public License as 11 * it under the terms of version 2 of the GNU General Public License as
@@ -30,7 +30,7 @@
30 * 30 *
31 * BSD LICENSE 31 * BSD LICENSE
32 * 32 *
33 * Copyright(c) 2005 - 2010 Intel Corporation. All rights reserved. 33 * Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved.
34 * All rights reserved. 34 * All rights reserved.
35 * 35 *
36 * Redistribution and use in source and binary forms, with or without 36 * Redistribution and use in source and binary forms, with or without
@@ -185,4 +185,3 @@ struct iwl3945_tfd {
185 185
186 186
187#endif /* __iwl_3945_fh_h__ */ 187#endif /* __iwl_3945_fh_h__ */
188
diff --git a/drivers/net/wireless/iwlwifi/iwl-3945-hw.h b/drivers/net/wireless/iwlegacy/iwl-3945-hw.h
index 65b5834da28c..779d3cb86e2c 100644
--- a/drivers/net/wireless/iwlwifi/iwl-3945-hw.h
+++ b/drivers/net/wireless/iwlegacy/iwl-3945-hw.h
@@ -5,7 +5,7 @@
5 * 5 *
6 * GPL LICENSE SUMMARY 6 * GPL LICENSE SUMMARY
7 * 7 *
8 * Copyright(c) 2005 - 2010 Intel Corporation. All rights reserved. 8 * Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved.
9 * 9 *
10 * This program is free software; you can redistribute it and/or modify 10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of version 2 of the GNU General Public License as 11 * it under the terms of version 2 of the GNU General Public License as
@@ -30,7 +30,7 @@
30 * 30 *
31 * BSD LICENSE 31 * BSD LICENSE
32 * 32 *
33 * Copyright(c) 2005 - 2010 Intel Corporation. All rights reserved. 33 * Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved.
34 * All rights reserved. 34 * All rights reserved.
35 * 35 *
36 * Redistribution and use in source and binary forms, with or without 36 * Redistribution and use in source and binary forms, with or without
@@ -164,12 +164,11 @@ struct iwl3945_eeprom {
164/* 164/*
165 * Per-channel regulatory data. 165 * Per-channel regulatory data.
166 * 166 *
167 * Each channel that *might* be supported by 3945 or 4965 has a fixed location 167 * Each channel that *might* be supported by 3945 has a fixed location
168 * in EEPROM containing EEPROM_CHANNEL_* usage flags (LSB) and max regulatory 168 * in EEPROM containing EEPROM_CHANNEL_* usage flags (LSB) and max regulatory
169 * txpower (MSB). 169 * txpower (MSB).
170 * 170 *
171 * Entries immediately below are for 20 MHz channel width. HT40 (40 MHz) 171 * Entries immediately below are for 20 MHz channel width.
172 * channels (only for 4965, not supported by 3945) appear later in the EEPROM.
173 * 172 *
174 * 2.4 GHz channels 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14 173 * 2.4 GHz channels 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14
175 */ 174 */
diff --git a/drivers/net/wireless/iwlwifi/iwl-3945-led.c b/drivers/net/wireless/iwlegacy/iwl-3945-led.c
index dc7c3a4167a9..abd923558d48 100644
--- a/drivers/net/wireless/iwlwifi/iwl-3945-led.c
+++ b/drivers/net/wireless/iwlegacy/iwl-3945-led.c
@@ -1,6 +1,6 @@
1/****************************************************************************** 1/******************************************************************************
2 * 2 *
3 * Copyright(c) 2003 - 2010 Intel Corporation. All rights reserved. 3 * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved.
4 * 4 *
5 * This program is free software; you can redistribute it and/or modify it 5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of version 2 of the GNU General Public License as 6 * under the terms of version 2 of the GNU General Public License as
@@ -56,7 +56,7 @@ static int iwl3945_send_led_cmd(struct iwl_priv *priv,
56 .callback = NULL, 56 .callback = NULL,
57 }; 57 };
58 58
59 return iwl_send_cmd(priv, &cmd); 59 return iwl_legacy_send_cmd(priv, &cmd);
60} 60}
61 61
62const struct iwl_led_ops iwl3945_led_ops = { 62const struct iwl_led_ops iwl3945_led_ops = {
diff --git a/drivers/net/wireless/iwlwifi/iwl-3945-led.h b/drivers/net/wireless/iwlegacy/iwl-3945-led.h
index ce990adc51e7..96716276eb0d 100644
--- a/drivers/net/wireless/iwlwifi/iwl-3945-led.h
+++ b/drivers/net/wireless/iwlegacy/iwl-3945-led.h
@@ -1,6 +1,6 @@
1/****************************************************************************** 1/******************************************************************************
2 * 2 *
3 * Copyright(c) 2003 - 2010 Intel Corporation. All rights reserved. 3 * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved.
4 * 4 *
5 * This program is free software; you can redistribute it and/or modify it 5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of version 2 of the GNU General Public License as 6 * under the terms of version 2 of the GNU General Public License as
diff --git a/drivers/net/wireless/iwlwifi/iwl-3945-rs.c b/drivers/net/wireless/iwlegacy/iwl-3945-rs.c
index 1f3e7e34fbc7..4fabc5439858 100644
--- a/drivers/net/wireless/iwlwifi/iwl-3945-rs.c
+++ b/drivers/net/wireless/iwlegacy/iwl-3945-rs.c
@@ -1,6 +1,6 @@
1/****************************************************************************** 1/******************************************************************************
2 * 2 *
3 * Copyright(c) 2005 - 2010 Intel Corporation. All rights reserved. 3 * Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved.
4 * 4 *
5 * This program is free software; you can redistribute it and/or modify it 5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of version 2 of the GNU General Public License as 6 * under the terms of version 2 of the GNU General Public License as
@@ -89,7 +89,7 @@ static struct iwl3945_tpt_entry iwl3945_tpt_table_g[] = {
89}; 89};
90 90
91#define IWL_RATE_MAX_WINDOW 62 91#define IWL_RATE_MAX_WINDOW 62
92#define IWL_RATE_FLUSH (3*HZ) 92#define IWL_RATE_FLUSH (3*HZ)
93#define IWL_RATE_WIN_FLUSH (HZ/2) 93#define IWL_RATE_WIN_FLUSH (HZ/2)
94#define IWL39_RATE_HIGH_TH 11520 94#define IWL39_RATE_HIGH_TH 11520
95#define IWL_SUCCESS_UP_TH 8960 95#define IWL_SUCCESS_UP_TH 8960
@@ -394,18 +394,18 @@ out:
394 IWL_DEBUG_INFO(priv, "leave\n"); 394 IWL_DEBUG_INFO(priv, "leave\n");
395} 395}
396 396
397static void *rs_alloc(struct ieee80211_hw *hw, struct dentry *debugfsdir) 397static void *iwl3945_rs_alloc(struct ieee80211_hw *hw, struct dentry *debugfsdir)
398{ 398{
399 return hw->priv; 399 return hw->priv;
400} 400}
401 401
402/* rate scale requires free function to be implemented */ 402/* rate scale requires free function to be implemented */
403static void rs_free(void *priv) 403static void iwl3945_rs_free(void *priv)
404{ 404{
405 return; 405 return;
406} 406}
407 407
408static void *rs_alloc_sta(void *iwl_priv, struct ieee80211_sta *sta, gfp_t gfp) 408static void *iwl3945_rs_alloc_sta(void *iwl_priv, struct ieee80211_sta *sta, gfp_t gfp)
409{ 409{
410 struct iwl3945_rs_sta *rs_sta; 410 struct iwl3945_rs_sta *rs_sta;
411 struct iwl3945_sta_priv *psta = (void *) sta->drv_priv; 411 struct iwl3945_sta_priv *psta = (void *) sta->drv_priv;
@@ -423,7 +423,7 @@ static void *rs_alloc_sta(void *iwl_priv, struct ieee80211_sta *sta, gfp_t gfp)
423 return rs_sta; 423 return rs_sta;
424} 424}
425 425
426static void rs_free_sta(void *iwl_priv, struct ieee80211_sta *sta, 426static void iwl3945_rs_free_sta(void *iwl_priv, struct ieee80211_sta *sta,
427 void *priv_sta) 427 void *priv_sta)
428{ 428{
429 struct iwl3945_rs_sta *rs_sta = priv_sta; 429 struct iwl3945_rs_sta *rs_sta = priv_sta;
@@ -438,12 +438,12 @@ static void rs_free_sta(void *iwl_priv, struct ieee80211_sta *sta,
438 438
439 439
440/** 440/**
441 * rs_tx_status - Update rate control values based on Tx results 441 * iwl3945_rs_tx_status - Update rate control values based on Tx results
442 * 442 *
443 * NOTE: Uses iwl_priv->retry_rate for the # of retries attempted by 443 * NOTE: Uses iwl_priv->retry_rate for the # of retries attempted by
444 * the hardware for each rate. 444 * the hardware for each rate.
445 */ 445 */
446static void rs_tx_status(void *priv_rate, struct ieee80211_supported_band *sband, 446static void iwl3945_rs_tx_status(void *priv_rate, struct ieee80211_supported_band *sband,
447 struct ieee80211_sta *sta, void *priv_sta, 447 struct ieee80211_sta *sta, void *priv_sta,
448 struct sk_buff *skb) 448 struct sk_buff *skb)
449{ 449{
@@ -612,7 +612,7 @@ static u16 iwl3945_get_adjacent_rate(struct iwl3945_rs_sta *rs_sta,
612} 612}
613 613
614/** 614/**
615 * rs_get_rate - find the rate for the requested packet 615 * iwl3945_rs_get_rate - find the rate for the requested packet
616 * 616 *
617 * Returns the ieee80211_rate structure allocated by the driver. 617 * Returns the ieee80211_rate structure allocated by the driver.
618 * 618 *
@@ -627,7 +627,7 @@ static u16 iwl3945_get_adjacent_rate(struct iwl3945_rs_sta *rs_sta,
627 * rate table and must reference the driver allocated rate table 627 * rate table and must reference the driver allocated rate table
628 * 628 *
629 */ 629 */
630static void rs_get_rate(void *priv_r, struct ieee80211_sta *sta, 630static void iwl3945_rs_get_rate(void *priv_r, struct ieee80211_sta *sta,
631 void *priv_sta, struct ieee80211_tx_rate_control *txrc) 631 void *priv_sta, struct ieee80211_tx_rate_control *txrc)
632{ 632{
633 struct ieee80211_supported_band *sband = txrc->sband; 633 struct ieee80211_supported_band *sband = txrc->sband;
@@ -899,7 +899,8 @@ static void iwl3945_remove_debugfs(void *priv, void *priv_sta)
899 * the station is added. Since mac80211 calls this function before a 899 * the station is added. Since mac80211 calls this function before a
900 * station is added we ignore it. 900 * station is added we ignore it.
901 */ 901 */
902static void rs_rate_init_stub(void *priv_r, struct ieee80211_supported_band *sband, 902static void iwl3945_rs_rate_init_stub(void *priv_r,
903 struct ieee80211_supported_band *sband,
903 struct ieee80211_sta *sta, void *priv_sta) 904 struct ieee80211_sta *sta, void *priv_sta)
904{ 905{
905} 906}
@@ -907,13 +908,13 @@ static void rs_rate_init_stub(void *priv_r, struct ieee80211_supported_band *sba
907static struct rate_control_ops rs_ops = { 908static struct rate_control_ops rs_ops = {
908 .module = NULL, 909 .module = NULL,
909 .name = RS_NAME, 910 .name = RS_NAME,
910 .tx_status = rs_tx_status, 911 .tx_status = iwl3945_rs_tx_status,
911 .get_rate = rs_get_rate, 912 .get_rate = iwl3945_rs_get_rate,
912 .rate_init = rs_rate_init_stub, 913 .rate_init = iwl3945_rs_rate_init_stub,
913 .alloc = rs_alloc, 914 .alloc = iwl3945_rs_alloc,
914 .free = rs_free, 915 .free = iwl3945_rs_free,
915 .alloc_sta = rs_alloc_sta, 916 .alloc_sta = iwl3945_rs_alloc_sta,
916 .free_sta = rs_free_sta, 917 .free_sta = iwl3945_rs_free_sta,
917#ifdef CONFIG_MAC80211_DEBUGFS 918#ifdef CONFIG_MAC80211_DEBUGFS
918 .add_sta_debugfs = iwl3945_add_debugfs, 919 .add_sta_debugfs = iwl3945_add_debugfs,
919 .remove_sta_debugfs = iwl3945_remove_debugfs, 920 .remove_sta_debugfs = iwl3945_remove_debugfs,
@@ -991,5 +992,3 @@ void iwl3945_rate_control_unregister(void)
991{ 992{
992 ieee80211_rate_control_unregister(&rs_ops); 993 ieee80211_rate_control_unregister(&rs_ops);
993} 994}
994
995
diff --git a/drivers/net/wireless/iwlwifi/iwl-3945.c b/drivers/net/wireless/iwlegacy/iwl-3945.c
index 5b6932c2193a..8359594839e2 100644
--- a/drivers/net/wireless/iwlwifi/iwl-3945.c
+++ b/drivers/net/wireless/iwlegacy/iwl-3945.c
@@ -1,6 +1,6 @@
1/****************************************************************************** 1/******************************************************************************
2 * 2 *
3 * Copyright(c) 2003 - 2010 Intel Corporation. All rights reserved. 3 * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved.
4 * 4 *
5 * This program is free software; you can redistribute it and/or modify it 5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of version 2 of the GNU General Public License as 6 * under the terms of version 2 of the GNU General Public License as
@@ -51,7 +51,6 @@
51#include "iwl-led.h" 51#include "iwl-led.h"
52#include "iwl-3945-led.h" 52#include "iwl-3945-led.h"
53#include "iwl-3945-debugfs.h" 53#include "iwl-3945-debugfs.h"
54#include "iwl-legacy.h"
55 54
56#define IWL_DECLARE_RATE_INFO(r, ip, in, rp, rn, pp, np) \ 55#define IWL_DECLARE_RATE_INFO(r, ip, in, rp, rn, pp, np) \
57 [IWL_RATE_##r##M_INDEX] = { IWL_RATE_##r##M_PLCP, \ 56 [IWL_RATE_##r##M_INDEX] = { IWL_RATE_##r##M_PLCP, \
@@ -172,14 +171,14 @@ void iwl3945_disable_events(struct iwl_priv *priv)
172 return; 171 return;
173 } 172 }
174 173
175 disable_ptr = iwl_read_targ_mem(priv, base + (4 * sizeof(u32))); 174 disable_ptr = iwl_legacy_read_targ_mem(priv, base + (4 * sizeof(u32)));
176 array_size = iwl_read_targ_mem(priv, base + (5 * sizeof(u32))); 175 array_size = iwl_legacy_read_targ_mem(priv, base + (5 * sizeof(u32)));
177 176
178 if (IWL_EVT_DISABLE && (array_size == IWL_EVT_DISABLE_SIZE)) { 177 if (IWL_EVT_DISABLE && (array_size == IWL_EVT_DISABLE_SIZE)) {
179 IWL_DEBUG_INFO(priv, "Disabling selected uCode log events at 0x%x\n", 178 IWL_DEBUG_INFO(priv, "Disabling selected uCode log events at 0x%x\n",
180 disable_ptr); 179 disable_ptr);
181 for (i = 0; i < IWL_EVT_DISABLE_SIZE; i++) 180 for (i = 0; i < IWL_EVT_DISABLE_SIZE; i++)
182 iwl_write_targ_mem(priv, 181 iwl_legacy_write_targ_mem(priv,
183 disable_ptr + (i * sizeof(u32)), 182 disable_ptr + (i * sizeof(u32)),
184 evt_disable[i]); 183 evt_disable[i]);
185 184
@@ -202,7 +201,7 @@ static int iwl3945_hwrate_to_plcp_idx(u8 plcp)
202 return -1; 201 return -1;
203} 202}
204 203
205#ifdef CONFIG_IWLWIFI_DEBUG 204#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
206#define TX_STATUS_ENTRY(x) case TX_3945_STATUS_FAIL_ ## x: return #x 205#define TX_STATUS_ENTRY(x) case TX_3945_STATUS_FAIL_ ## x: return #x
207 206
208static const char *iwl3945_get_tx_fail_reason(u32 status) 207static const char *iwl3945_get_tx_fail_reason(u32 status)
@@ -255,7 +254,7 @@ int iwl3945_rs_next_rate(struct iwl_priv *priv, int rate)
255 break; 254 break;
256 case IEEE80211_BAND_2GHZ: 255 case IEEE80211_BAND_2GHZ:
257 if (!(priv->_3945.sta_supp_rates & IWL_OFDM_RATES_MASK) && 256 if (!(priv->_3945.sta_supp_rates & IWL_OFDM_RATES_MASK) &&
258 iwl_is_associated(priv, IWL_RXON_CTX_BSS)) { 257 iwl_legacy_is_associated(priv, IWL_RXON_CTX_BSS)) {
259 if (rate == IWL_RATE_11M_INDEX) 258 if (rate == IWL_RATE_11M_INDEX)
260 next_rate = IWL_RATE_5M_INDEX; 259 next_rate = IWL_RATE_5M_INDEX;
261 } 260 }
@@ -285,8 +284,9 @@ static void iwl3945_tx_queue_reclaim(struct iwl_priv *priv,
285 284
286 BUG_ON(txq_id == IWL39_CMD_QUEUE_NUM); 285 BUG_ON(txq_id == IWL39_CMD_QUEUE_NUM);
287 286
288 for (index = iwl_queue_inc_wrap(index, q->n_bd); q->read_ptr != index; 287 for (index = iwl_legacy_queue_inc_wrap(index, q->n_bd);
289 q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd)) { 288 q->read_ptr != index;
289 q->read_ptr = iwl_legacy_queue_inc_wrap(q->read_ptr, q->n_bd)) {
290 290
291 tx_info = &txq->txb[txq->q.read_ptr]; 291 tx_info = &txq->txb[txq->q.read_ptr];
292 ieee80211_tx_status_irqsafe(priv->hw, tx_info->skb); 292 ieee80211_tx_status_irqsafe(priv->hw, tx_info->skb);
@@ -294,10 +294,10 @@ static void iwl3945_tx_queue_reclaim(struct iwl_priv *priv,
294 priv->cfg->ops->lib->txq_free_tfd(priv, txq); 294 priv->cfg->ops->lib->txq_free_tfd(priv, txq);
295 } 295 }
296 296
297 if (iwl_queue_space(q) > q->low_mark && (txq_id >= 0) && 297 if (iwl_legacy_queue_space(q) > q->low_mark && (txq_id >= 0) &&
298 (txq_id != IWL39_CMD_QUEUE_NUM) && 298 (txq_id != IWL39_CMD_QUEUE_NUM) &&
299 priv->mac80211_registered) 299 priv->mac80211_registered)
300 iwl_wake_queue(priv, txq); 300 iwl_legacy_wake_queue(priv, txq);
301} 301}
302 302
303/** 303/**
@@ -317,7 +317,7 @@ static void iwl3945_rx_reply_tx(struct iwl_priv *priv,
317 int rate_idx; 317 int rate_idx;
318 int fail; 318 int fail;
319 319
320 if ((index >= txq->q.n_bd) || (iwl_queue_used(&txq->q, index) == 0)) { 320 if ((index >= txq->q.n_bd) || (iwl_legacy_queue_used(&txq->q, index) == 0)) {
321 IWL_ERR(priv, "Read index for DMA queue txq_id (%d) index %d " 321 IWL_ERR(priv, "Read index for DMA queue txq_id (%d) index %d "
322 "is out of range [0-%d] %d %d\n", txq_id, 322 "is out of range [0-%d] %d %d\n", txq_id,
323 index, txq->q.n_bd, txq->q.write_ptr, 323 index, txq->q.n_bd, txq->q.write_ptr,
@@ -363,12 +363,7 @@ static void iwl3945_rx_reply_tx(struct iwl_priv *priv,
363 * RX handler implementations 363 * RX handler implementations
364 * 364 *
365 *****************************************************************************/ 365 *****************************************************************************/
366#ifdef CONFIG_IWLWIFI_DEBUGFS 366#ifdef CONFIG_IWLWIFI_LEGACY_DEBUGFS
367/*
368 * based on the assumption of all statistics counter are in DWORD
369 * FIXME: This function is for debugging, do not deal with
370 * the case of counters roll-over.
371 */
372static void iwl3945_accumulative_statistics(struct iwl_priv *priv, 367static void iwl3945_accumulative_statistics(struct iwl_priv *priv,
373 __le32 *stats) 368 __le32 *stats)
374{ 369{
@@ -402,72 +397,6 @@ static void iwl3945_accumulative_statistics(struct iwl_priv *priv,
402} 397}
403#endif 398#endif
404 399
405/**
406 * iwl3945_good_plcp_health - checks for plcp error.
407 *
408 * When the plcp error is exceeding the thresholds, reset the radio
409 * to improve the throughput.
410 */
411static bool iwl3945_good_plcp_health(struct iwl_priv *priv,
412 struct iwl_rx_packet *pkt)
413{
414 bool rc = true;
415 struct iwl3945_notif_statistics current_stat;
416 int combined_plcp_delta;
417 unsigned int plcp_msec;
418 unsigned long plcp_received_jiffies;
419
420 if (priv->cfg->base_params->plcp_delta_threshold ==
421 IWL_MAX_PLCP_ERR_THRESHOLD_DISABLE) {
422 IWL_DEBUG_RADIO(priv, "plcp_err check disabled\n");
423 return rc;
424 }
425 memcpy(&current_stat, pkt->u.raw, sizeof(struct
426 iwl3945_notif_statistics));
427 /*
428 * check for plcp_err and trigger radio reset if it exceeds
429 * the plcp error threshold plcp_delta.
430 */
431 plcp_received_jiffies = jiffies;
432 plcp_msec = jiffies_to_msecs((long) plcp_received_jiffies -
433 (long) priv->plcp_jiffies);
434 priv->plcp_jiffies = plcp_received_jiffies;
435 /*
436 * check to make sure plcp_msec is not 0 to prevent division
437 * by zero.
438 */
439 if (plcp_msec) {
440 combined_plcp_delta =
441 (le32_to_cpu(current_stat.rx.ofdm.plcp_err) -
442 le32_to_cpu(priv->_3945.statistics.rx.ofdm.plcp_err));
443
444 if ((combined_plcp_delta > 0) &&
445 ((combined_plcp_delta * 100) / plcp_msec) >
446 priv->cfg->base_params->plcp_delta_threshold) {
447 /*
448 * if plcp_err exceed the threshold, the following
449 * data is printed in csv format:
450 * Text: plcp_err exceeded %d,
451 * Received ofdm.plcp_err,
452 * Current ofdm.plcp_err,
453 * combined_plcp_delta,
454 * plcp_msec
455 */
456 IWL_DEBUG_RADIO(priv, "plcp_err exceeded %u, "
457 "%u, %d, %u mSecs\n",
458 priv->cfg->base_params->plcp_delta_threshold,
459 le32_to_cpu(current_stat.rx.ofdm.plcp_err),
460 combined_plcp_delta, plcp_msec);
461 /*
462 * Reset the RF radio due to the high plcp
463 * error rate
464 */
465 rc = false;
466 }
467 }
468 return rc;
469}
470
471void iwl3945_hw_rx_statistics(struct iwl_priv *priv, 400void iwl3945_hw_rx_statistics(struct iwl_priv *priv,
472 struct iwl_rx_mem_buffer *rxb) 401 struct iwl_rx_mem_buffer *rxb)
473{ 402{
@@ -476,10 +405,10 @@ void iwl3945_hw_rx_statistics(struct iwl_priv *priv,
476 IWL_DEBUG_RX(priv, "Statistics notification received (%d vs %d).\n", 405 IWL_DEBUG_RX(priv, "Statistics notification received (%d vs %d).\n",
477 (int)sizeof(struct iwl3945_notif_statistics), 406 (int)sizeof(struct iwl3945_notif_statistics),
478 le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK); 407 le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK);
479#ifdef CONFIG_IWLWIFI_DEBUGFS 408#ifdef CONFIG_IWLWIFI_LEGACY_DEBUGFS
480 iwl3945_accumulative_statistics(priv, (__le32 *)&pkt->u.raw); 409 iwl3945_accumulative_statistics(priv, (__le32 *)&pkt->u.raw);
481#endif 410#endif
482 iwl_recover_from_statistics(priv, pkt); 411 iwl_legacy_recover_from_statistics(priv, pkt);
483 412
484 memcpy(&priv->_3945.statistics, pkt->u.raw, sizeof(priv->_3945.statistics)); 413 memcpy(&priv->_3945.statistics, pkt->u.raw, sizeof(priv->_3945.statistics));
485} 414}
@@ -491,7 +420,7 @@ void iwl3945_reply_statistics(struct iwl_priv *priv,
491 __le32 *flag = (__le32 *)&pkt->u.raw; 420 __le32 *flag = (__le32 *)&pkt->u.raw;
492 421
493 if (le32_to_cpu(*flag) & UCODE_STATISTICS_CLEAR_MSK) { 422 if (le32_to_cpu(*flag) & UCODE_STATISTICS_CLEAR_MSK) {
494#ifdef CONFIG_IWLWIFI_DEBUGFS 423#ifdef CONFIG_IWLWIFI_LEGACY_DEBUGFS
495 memset(&priv->_3945.accum_statistics, 0, 424 memset(&priv->_3945.accum_statistics, 0,
496 sizeof(struct iwl3945_notif_statistics)); 425 sizeof(struct iwl3945_notif_statistics));
497 memset(&priv->_3945.delta_statistics, 0, 426 memset(&priv->_3945.delta_statistics, 0,
@@ -562,14 +491,14 @@ static void iwl3945_pass_packet_to_mac80211(struct iwl_priv *priv,
562 } 491 }
563 492
564 if (!iwl3945_mod_params.sw_crypto) 493 if (!iwl3945_mod_params.sw_crypto)
565 iwl_set_decrypted_flag(priv, 494 iwl_legacy_set_decrypted_flag(priv,
566 (struct ieee80211_hdr *)rxb_addr(rxb), 495 (struct ieee80211_hdr *)rxb_addr(rxb),
567 le32_to_cpu(rx_end->status), stats); 496 le32_to_cpu(rx_end->status), stats);
568 497
569 skb_add_rx_frag(skb, 0, rxb->page, 498 skb_add_rx_frag(skb, 0, rxb->page,
570 (void *)rx_hdr->payload - (void *)pkt, len); 499 (void *)rx_hdr->payload - (void *)pkt, len);
571 500
572 iwl_update_stats(priv, false, fc, len); 501 iwl_legacy_update_stats(priv, false, fc, len);
573 memcpy(IEEE80211_SKB_RXCB(skb), stats, sizeof(*stats)); 502 memcpy(IEEE80211_SKB_RXCB(skb), stats, sizeof(*stats));
574 503
575 ieee80211_rx(priv->hw, skb); 504 ieee80211_rx(priv->hw, skb);
@@ -642,7 +571,8 @@ static void iwl3945_rx_reply_rx(struct iwl_priv *priv,
642 rx_status.signal, rx_status.signal, 571 rx_status.signal, rx_status.signal,
643 rx_status.rate_idx); 572 rx_status.rate_idx);
644 573
645 iwl_dbg_log_rx_data_frame(priv, le16_to_cpu(rx_hdr->len), header); 574 iwl_legacy_dbg_log_rx_data_frame(priv, le16_to_cpu(rx_hdr->len),
575 header);
646 576
647 if (network_packet) { 577 if (network_packet) {
648 priv->_3945.last_beacon_time = 578 priv->_3945.last_beacon_time =
@@ -810,7 +740,7 @@ static u8 iwl3945_sync_sta(struct iwl_priv *priv, int sta_id, u16 tx_rate)
810 station->sta.sta.modify_mask = STA_MODIFY_TX_RATE_MSK; 740 station->sta.sta.modify_mask = STA_MODIFY_TX_RATE_MSK;
811 station->sta.rate_n_flags = cpu_to_le16(tx_rate); 741 station->sta.rate_n_flags = cpu_to_le16(tx_rate);
812 station->sta.mode = STA_CONTROL_MODIFY_MSK; 742 station->sta.mode = STA_CONTROL_MODIFY_MSK;
813 iwl_send_add_sta(priv, &station->sta, CMD_ASYNC); 743 iwl_legacy_send_add_sta(priv, &station->sta, CMD_ASYNC);
814 spin_unlock_irqrestore(&priv->sta_lock, flags_spin); 744 spin_unlock_irqrestore(&priv->sta_lock, flags_spin);
815 745
816 IWL_DEBUG_RATE(priv, "SCALE sync station %d to rate %d\n", 746 IWL_DEBUG_RATE(priv, "SCALE sync station %d to rate %d\n",
@@ -825,7 +755,7 @@ static void iwl3945_set_pwr_vmain(struct iwl_priv *priv)
825 * to set power to V_AUX, do 755 * to set power to V_AUX, do
826 756
827 if (pci_pme_capable(priv->pci_dev, PCI_D3cold)) { 757 if (pci_pme_capable(priv->pci_dev, PCI_D3cold)) {
828 iwl_set_bits_mask_prph(priv, APMG_PS_CTRL_REG, 758 iwl_legacy_set_bits_mask_prph(priv, APMG_PS_CTRL_REG,
829 APMG_PS_CTRL_VAL_PWR_SRC_VAUX, 759 APMG_PS_CTRL_VAL_PWR_SRC_VAUX,
830 ~APMG_PS_CTRL_MSK_PWR_SRC); 760 ~APMG_PS_CTRL_MSK_PWR_SRC);
831 761
@@ -835,7 +765,7 @@ static void iwl3945_set_pwr_vmain(struct iwl_priv *priv)
835 } 765 }
836 */ 766 */
837 767
838 iwl_set_bits_mask_prph(priv, APMG_PS_CTRL_REG, 768 iwl_legacy_set_bits_mask_prph(priv, APMG_PS_CTRL_REG,
839 APMG_PS_CTRL_VAL_PWR_SRC_VMAIN, 769 APMG_PS_CTRL_VAL_PWR_SRC_VMAIN,
840 ~APMG_PS_CTRL_MSK_PWR_SRC); 770 ~APMG_PS_CTRL_MSK_PWR_SRC);
841 771
@@ -845,10 +775,11 @@ static void iwl3945_set_pwr_vmain(struct iwl_priv *priv)
845 775
846static int iwl3945_rx_init(struct iwl_priv *priv, struct iwl_rx_queue *rxq) 776static int iwl3945_rx_init(struct iwl_priv *priv, struct iwl_rx_queue *rxq)
847{ 777{
848 iwl_write_direct32(priv, FH39_RCSR_RBD_BASE(0), rxq->bd_dma); 778 iwl_legacy_write_direct32(priv, FH39_RCSR_RBD_BASE(0), rxq->bd_dma);
849 iwl_write_direct32(priv, FH39_RCSR_RPTR_ADDR(0), rxq->rb_stts_dma); 779 iwl_legacy_write_direct32(priv, FH39_RCSR_RPTR_ADDR(0),
850 iwl_write_direct32(priv, FH39_RCSR_WPTR(0), 0); 780 rxq->rb_stts_dma);
851 iwl_write_direct32(priv, FH39_RCSR_CONFIG(0), 781 iwl_legacy_write_direct32(priv, FH39_RCSR_WPTR(0), 0);
782 iwl_legacy_write_direct32(priv, FH39_RCSR_CONFIG(0),
852 FH39_RCSR_RX_CONFIG_REG_VAL_DMA_CHNL_EN_ENABLE | 783 FH39_RCSR_RX_CONFIG_REG_VAL_DMA_CHNL_EN_ENABLE |
853 FH39_RCSR_RX_CONFIG_REG_VAL_RDRBD_EN_ENABLE | 784 FH39_RCSR_RX_CONFIG_REG_VAL_RDRBD_EN_ENABLE |
854 FH39_RCSR_RX_CONFIG_REG_BIT_WR_STTS_EN | 785 FH39_RCSR_RX_CONFIG_REG_BIT_WR_STTS_EN |
@@ -859,7 +790,7 @@ static int iwl3945_rx_init(struct iwl_priv *priv, struct iwl_rx_queue *rxq)
859 FH39_RCSR_RX_CONFIG_REG_VAL_MSG_MODE_FH); 790 FH39_RCSR_RX_CONFIG_REG_VAL_MSG_MODE_FH);
860 791
861 /* fake read to flush all prev I/O */ 792 /* fake read to flush all prev I/O */
862 iwl_read_direct32(priv, FH39_RSSR_CTRL); 793 iwl_legacy_read_direct32(priv, FH39_RSSR_CTRL);
863 794
864 return 0; 795 return 0;
865} 796}
@@ -868,23 +799,23 @@ static int iwl3945_tx_reset(struct iwl_priv *priv)
868{ 799{
869 800
870 /* bypass mode */ 801 /* bypass mode */
871 iwl_write_prph(priv, ALM_SCD_MODE_REG, 0x2); 802 iwl_legacy_write_prph(priv, ALM_SCD_MODE_REG, 0x2);
872 803
873 /* RA 0 is active */ 804 /* RA 0 is active */
874 iwl_write_prph(priv, ALM_SCD_ARASTAT_REG, 0x01); 805 iwl_legacy_write_prph(priv, ALM_SCD_ARASTAT_REG, 0x01);
875 806
876 /* all 6 fifo are active */ 807 /* all 6 fifo are active */
877 iwl_write_prph(priv, ALM_SCD_TXFACT_REG, 0x3f); 808 iwl_legacy_write_prph(priv, ALM_SCD_TXFACT_REG, 0x3f);
878 809
879 iwl_write_prph(priv, ALM_SCD_SBYP_MODE_1_REG, 0x010000); 810 iwl_legacy_write_prph(priv, ALM_SCD_SBYP_MODE_1_REG, 0x010000);
880 iwl_write_prph(priv, ALM_SCD_SBYP_MODE_2_REG, 0x030002); 811 iwl_legacy_write_prph(priv, ALM_SCD_SBYP_MODE_2_REG, 0x030002);
881 iwl_write_prph(priv, ALM_SCD_TXF4MF_REG, 0x000004); 812 iwl_legacy_write_prph(priv, ALM_SCD_TXF4MF_REG, 0x000004);
882 iwl_write_prph(priv, ALM_SCD_TXF5MF_REG, 0x000005); 813 iwl_legacy_write_prph(priv, ALM_SCD_TXF5MF_REG, 0x000005);
883 814
884 iwl_write_direct32(priv, FH39_TSSR_CBB_BASE, 815 iwl_legacy_write_direct32(priv, FH39_TSSR_CBB_BASE,
885 priv->_3945.shared_phys); 816 priv->_3945.shared_phys);
886 817
887 iwl_write_direct32(priv, FH39_TSSR_MSG_CONFIG, 818 iwl_legacy_write_direct32(priv, FH39_TSSR_MSG_CONFIG,
888 FH39_TSSR_TX_MSG_CONFIG_REG_VAL_SNOOP_RD_TXPD_ON | 819 FH39_TSSR_TX_MSG_CONFIG_REG_VAL_SNOOP_RD_TXPD_ON |
889 FH39_TSSR_TX_MSG_CONFIG_REG_VAL_ORDER_RD_TXPD_ON | 820 FH39_TSSR_TX_MSG_CONFIG_REG_VAL_ORDER_RD_TXPD_ON |
890 FH39_TSSR_TX_MSG_CONFIG_REG_VAL_MAX_FRAG_SIZE_128B | 821 FH39_TSSR_TX_MSG_CONFIG_REG_VAL_MAX_FRAG_SIZE_128B |
@@ -910,7 +841,7 @@ static int iwl3945_txq_ctx_reset(struct iwl_priv *priv)
910 iwl3945_hw_txq_ctx_free(priv); 841 iwl3945_hw_txq_ctx_free(priv);
911 842
912 /* allocate tx queue structure */ 843 /* allocate tx queue structure */
913 rc = iwl_alloc_txq_mem(priv); 844 rc = iwl_legacy_alloc_txq_mem(priv);
914 if (rc) 845 if (rc)
915 return rc; 846 return rc;
916 847
@@ -923,8 +854,8 @@ static int iwl3945_txq_ctx_reset(struct iwl_priv *priv)
923 for (txq_id = 0; txq_id < priv->hw_params.max_txq_num; txq_id++) { 854 for (txq_id = 0; txq_id < priv->hw_params.max_txq_num; txq_id++) {
924 slots_num = (txq_id == IWL39_CMD_QUEUE_NUM) ? 855 slots_num = (txq_id == IWL39_CMD_QUEUE_NUM) ?
925 TFD_CMD_SLOTS : TFD_TX_CMD_SLOTS; 856 TFD_CMD_SLOTS : TFD_TX_CMD_SLOTS;
926 rc = iwl_tx_queue_init(priv, &priv->txq[txq_id], slots_num, 857 rc = iwl_legacy_tx_queue_init(priv, &priv->txq[txq_id],
927 txq_id); 858 slots_num, txq_id);
928 if (rc) { 859 if (rc) {
929 IWL_ERR(priv, "Tx %d queue init failed\n", txq_id); 860 IWL_ERR(priv, "Tx %d queue init failed\n", txq_id);
930 goto error; 861 goto error;
@@ -941,21 +872,23 @@ static int iwl3945_txq_ctx_reset(struct iwl_priv *priv)
941 872
942/* 873/*
943 * Start up 3945's basic functionality after it has been reset 874 * Start up 3945's basic functionality after it has been reset
944 * (e.g. after platform boot, or shutdown via iwl_apm_stop()) 875 * (e.g. after platform boot, or shutdown via iwl_legacy_apm_stop())
945 * NOTE: This does not load uCode nor start the embedded processor 876 * NOTE: This does not load uCode nor start the embedded processor
946 */ 877 */
947static int iwl3945_apm_init(struct iwl_priv *priv) 878static int iwl3945_apm_init(struct iwl_priv *priv)
948{ 879{
949 int ret = iwl_apm_init(priv); 880 int ret = iwl_legacy_apm_init(priv);
950 881
951 /* Clear APMG (NIC's internal power management) interrupts */ 882 /* Clear APMG (NIC's internal power management) interrupts */
952 iwl_write_prph(priv, APMG_RTC_INT_MSK_REG, 0x0); 883 iwl_legacy_write_prph(priv, APMG_RTC_INT_MSK_REG, 0x0);
953 iwl_write_prph(priv, APMG_RTC_INT_STT_REG, 0xFFFFFFFF); 884 iwl_legacy_write_prph(priv, APMG_RTC_INT_STT_REG, 0xFFFFFFFF);
954 885
955 /* Reset radio chip */ 886 /* Reset radio chip */
956 iwl_set_bits_prph(priv, APMG_PS_CTRL_REG, APMG_PS_CTRL_VAL_RESET_REQ); 887 iwl_legacy_set_bits_prph(priv, APMG_PS_CTRL_REG,
888 APMG_PS_CTRL_VAL_RESET_REQ);
957 udelay(5); 889 udelay(5);
958 iwl_clear_bits_prph(priv, APMG_PS_CTRL_REG, APMG_PS_CTRL_VAL_RESET_REQ); 890 iwl_legacy_clear_bits_prph(priv, APMG_PS_CTRL_REG,
891 APMG_PS_CTRL_VAL_RESET_REQ);
959 892
960 return ret; 893 return ret;
961} 894}
@@ -977,17 +910,17 @@ static void iwl3945_nic_config(struct iwl_priv *priv)
977 IWL_DEBUG_INFO(priv, "RTP type\n"); 910 IWL_DEBUG_INFO(priv, "RTP type\n");
978 else if (rev_id & PCI_CFG_REV_ID_BIT_BASIC_SKU) { 911 else if (rev_id & PCI_CFG_REV_ID_BIT_BASIC_SKU) {
979 IWL_DEBUG_INFO(priv, "3945 RADIO-MB type\n"); 912 IWL_DEBUG_INFO(priv, "3945 RADIO-MB type\n");
980 iwl_set_bit(priv, CSR_HW_IF_CONFIG_REG, 913 iwl_legacy_set_bit(priv, CSR_HW_IF_CONFIG_REG,
981 CSR39_HW_IF_CONFIG_REG_BIT_3945_MB); 914 CSR39_HW_IF_CONFIG_REG_BIT_3945_MB);
982 } else { 915 } else {
983 IWL_DEBUG_INFO(priv, "3945 RADIO-MM type\n"); 916 IWL_DEBUG_INFO(priv, "3945 RADIO-MM type\n");
984 iwl_set_bit(priv, CSR_HW_IF_CONFIG_REG, 917 iwl_legacy_set_bit(priv, CSR_HW_IF_CONFIG_REG,
985 CSR39_HW_IF_CONFIG_REG_BIT_3945_MM); 918 CSR39_HW_IF_CONFIG_REG_BIT_3945_MM);
986 } 919 }
987 920
988 if (EEPROM_SKU_CAP_OP_MODE_MRC == eeprom->sku_cap) { 921 if (EEPROM_SKU_CAP_OP_MODE_MRC == eeprom->sku_cap) {
989 IWL_DEBUG_INFO(priv, "SKU OP mode is mrc\n"); 922 IWL_DEBUG_INFO(priv, "SKU OP mode is mrc\n");
990 iwl_set_bit(priv, CSR_HW_IF_CONFIG_REG, 923 iwl_legacy_set_bit(priv, CSR_HW_IF_CONFIG_REG,
991 CSR39_HW_IF_CONFIG_REG_BIT_SKU_MRC); 924 CSR39_HW_IF_CONFIG_REG_BIT_SKU_MRC);
992 } else 925 } else
993 IWL_DEBUG_INFO(priv, "SKU OP mode is basic\n"); 926 IWL_DEBUG_INFO(priv, "SKU OP mode is basic\n");
@@ -995,24 +928,24 @@ static void iwl3945_nic_config(struct iwl_priv *priv)
995 if ((eeprom->board_revision & 0xF0) == 0xD0) { 928 if ((eeprom->board_revision & 0xF0) == 0xD0) {
996 IWL_DEBUG_INFO(priv, "3945ABG revision is 0x%X\n", 929 IWL_DEBUG_INFO(priv, "3945ABG revision is 0x%X\n",
997 eeprom->board_revision); 930 eeprom->board_revision);
998 iwl_set_bit(priv, CSR_HW_IF_CONFIG_REG, 931 iwl_legacy_set_bit(priv, CSR_HW_IF_CONFIG_REG,
999 CSR39_HW_IF_CONFIG_REG_BIT_BOARD_TYPE); 932 CSR39_HW_IF_CONFIG_REG_BIT_BOARD_TYPE);
1000 } else { 933 } else {
1001 IWL_DEBUG_INFO(priv, "3945ABG revision is 0x%X\n", 934 IWL_DEBUG_INFO(priv, "3945ABG revision is 0x%X\n",
1002 eeprom->board_revision); 935 eeprom->board_revision);
1003 iwl_clear_bit(priv, CSR_HW_IF_CONFIG_REG, 936 iwl_legacy_clear_bit(priv, CSR_HW_IF_CONFIG_REG,
1004 CSR39_HW_IF_CONFIG_REG_BIT_BOARD_TYPE); 937 CSR39_HW_IF_CONFIG_REG_BIT_BOARD_TYPE);
1005 } 938 }
1006 939
1007 if (eeprom->almgor_m_version <= 1) { 940 if (eeprom->almgor_m_version <= 1) {
1008 iwl_set_bit(priv, CSR_HW_IF_CONFIG_REG, 941 iwl_legacy_set_bit(priv, CSR_HW_IF_CONFIG_REG,
1009 CSR39_HW_IF_CONFIG_REG_BITS_SILICON_TYPE_A); 942 CSR39_HW_IF_CONFIG_REG_BITS_SILICON_TYPE_A);
1010 IWL_DEBUG_INFO(priv, "Card M type A version is 0x%X\n", 943 IWL_DEBUG_INFO(priv, "Card M type A version is 0x%X\n",
1011 eeprom->almgor_m_version); 944 eeprom->almgor_m_version);
1012 } else { 945 } else {
1013 IWL_DEBUG_INFO(priv, "Card M type B version is 0x%X\n", 946 IWL_DEBUG_INFO(priv, "Card M type B version is 0x%X\n",
1014 eeprom->almgor_m_version); 947 eeprom->almgor_m_version);
1015 iwl_set_bit(priv, CSR_HW_IF_CONFIG_REG, 948 iwl_legacy_set_bit(priv, CSR_HW_IF_CONFIG_REG,
1016 CSR39_HW_IF_CONFIG_REG_BITS_SILICON_TYPE_B); 949 CSR39_HW_IF_CONFIG_REG_BITS_SILICON_TYPE_B);
1017 } 950 }
1018 spin_unlock_irqrestore(&priv->lock, flags); 951 spin_unlock_irqrestore(&priv->lock, flags);
@@ -1040,7 +973,7 @@ int iwl3945_hw_nic_init(struct iwl_priv *priv)
1040 973
1041 /* Allocate the RX queue, or reset if it is already allocated */ 974 /* Allocate the RX queue, or reset if it is already allocated */
1042 if (!rxq->bd) { 975 if (!rxq->bd) {
1043 rc = iwl_rx_queue_alloc(priv); 976 rc = iwl_legacy_rx_queue_alloc(priv);
1044 if (rc) { 977 if (rc) {
1045 IWL_ERR(priv, "Unable to initialize Rx queue\n"); 978 IWL_ERR(priv, "Unable to initialize Rx queue\n");
1046 return -ENOMEM; 979 return -ENOMEM;
@@ -1055,10 +988,10 @@ int iwl3945_hw_nic_init(struct iwl_priv *priv)
1055 988
1056 /* Look at using this instead: 989 /* Look at using this instead:
1057 rxq->need_update = 1; 990 rxq->need_update = 1;
1058 iwl_rx_queue_update_write_ptr(priv, rxq); 991 iwl_legacy_rx_queue_update_write_ptr(priv, rxq);
1059 */ 992 */
1060 993
1061 iwl_write_direct32(priv, FH39_RCSR_WPTR(0), rxq->write & ~7); 994 iwl_legacy_write_direct32(priv, FH39_RCSR_WPTR(0), rxq->write & ~7);
1062 995
1063 rc = iwl3945_txq_ctx_reset(priv); 996 rc = iwl3945_txq_ctx_reset(priv);
1064 if (rc) 997 if (rc)
@@ -1083,12 +1016,12 @@ void iwl3945_hw_txq_ctx_free(struct iwl_priv *priv)
1083 for (txq_id = 0; txq_id < priv->hw_params.max_txq_num; 1016 for (txq_id = 0; txq_id < priv->hw_params.max_txq_num;
1084 txq_id++) 1017 txq_id++)
1085 if (txq_id == IWL39_CMD_QUEUE_NUM) 1018 if (txq_id == IWL39_CMD_QUEUE_NUM)
1086 iwl_cmd_queue_free(priv); 1019 iwl_legacy_cmd_queue_free(priv);
1087 else 1020 else
1088 iwl_tx_queue_free(priv, txq_id); 1021 iwl_legacy_tx_queue_free(priv, txq_id);
1089 1022
1090 /* free tx queue structure */ 1023 /* free tx queue structure */
1091 iwl_free_txq_mem(priv); 1024 iwl_legacy_txq_mem(priv);
1092} 1025}
1093 1026
1094void iwl3945_hw_txq_ctx_stop(struct iwl_priv *priv) 1027void iwl3945_hw_txq_ctx_stop(struct iwl_priv *priv)
@@ -1096,12 +1029,12 @@ void iwl3945_hw_txq_ctx_stop(struct iwl_priv *priv)
1096 int txq_id; 1029 int txq_id;
1097 1030
1098 /* stop SCD */ 1031 /* stop SCD */
1099 iwl_write_prph(priv, ALM_SCD_MODE_REG, 0); 1032 iwl_legacy_write_prph(priv, ALM_SCD_MODE_REG, 0);
1100 iwl_write_prph(priv, ALM_SCD_TXFACT_REG, 0); 1033 iwl_legacy_write_prph(priv, ALM_SCD_TXFACT_REG, 0);
1101 1034
1102 /* reset TFD queues */ 1035 /* reset TFD queues */
1103 for (txq_id = 0; txq_id < priv->hw_params.max_txq_num; txq_id++) { 1036 for (txq_id = 0; txq_id < priv->hw_params.max_txq_num; txq_id++) {
1104 iwl_write_direct32(priv, FH39_TCSR_CONFIG(txq_id), 0x0); 1037 iwl_legacy_write_direct32(priv, FH39_TCSR_CONFIG(txq_id), 0x0);
1105 iwl_poll_direct_bit(priv, FH39_TSSR_TX_STATUS, 1038 iwl_poll_direct_bit(priv, FH39_TSSR_TX_STATUS,
1106 FH39_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(txq_id), 1039 FH39_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(txq_id),
1107 1000); 1040 1000);
@@ -1168,12 +1101,12 @@ static int iwl3945_hw_reg_txpower_get_temperature(struct iwl_priv *priv)
1168#define IWL_TEMPERATURE_LIMIT_TIMER 6 1101#define IWL_TEMPERATURE_LIMIT_TIMER 6
1169 1102
1170/** 1103/**
1171 * is_temp_calib_needed - determines if new calibration is needed 1104 * iwl3945_is_temp_calib_needed - determines if new calibration is needed
1172 * 1105 *
1173 * records new temperature in tx_mgr->temperature. 1106 * records new temperature in tx_mgr->temperature.
1174 * replaces tx_mgr->last_temperature *only* if calib needed 1107 * replaces tx_mgr->last_temperature *only* if calib needed
1175 * (assumes caller will actually do the calibration!). */ 1108 * (assumes caller will actually do the calibration!). */
1176static int is_temp_calib_needed(struct iwl_priv *priv) 1109static int iwl3945_is_temp_calib_needed(struct iwl_priv *priv)
1177{ 1110{
1178 int temp_diff; 1111 int temp_diff;
1179 1112
@@ -1404,9 +1337,6 @@ static void iwl3945_hw_reg_set_scan_power(struct iwl_priv *priv, u32 scan_tbl_in
1404 * based on eeprom channel data) for this channel. */ 1337 * based on eeprom channel data) for this channel. */
1405 power = min(ch_info->scan_power, clip_pwrs[IWL_RATE_6M_INDEX_TABLE]); 1338 power = min(ch_info->scan_power, clip_pwrs[IWL_RATE_6M_INDEX_TABLE]);
1406 1339
1407 /* further limit to user's max power preference.
1408 * FIXME: Other spectrum management power limitations do not
1409 * seem to apply?? */
1410 power = min(power, priv->tx_power_user_lmt); 1340 power = min(power, priv->tx_power_user_lmt);
1411 scan_power_info->requested_power = power; 1341 scan_power_info->requested_power = power;
1412 1342
@@ -1460,7 +1390,7 @@ static int iwl3945_send_tx_power(struct iwl_priv *priv)
1460 chan = le16_to_cpu(priv->contexts[IWL_RXON_CTX_BSS].active.channel); 1390 chan = le16_to_cpu(priv->contexts[IWL_RXON_CTX_BSS].active.channel);
1461 1391
1462 txpower.band = (priv->band == IEEE80211_BAND_5GHZ) ? 0 : 1; 1392 txpower.band = (priv->band == IEEE80211_BAND_5GHZ) ? 0 : 1;
1463 ch_info = iwl_get_channel_info(priv, priv->band, chan); 1393 ch_info = iwl_legacy_get_channel_info(priv, priv->band, chan);
1464 if (!ch_info) { 1394 if (!ch_info) {
1465 IWL_ERR(priv, 1395 IWL_ERR(priv,
1466 "Failed to get channel info for channel %d [%d]\n", 1396 "Failed to get channel info for channel %d [%d]\n",
@@ -1468,7 +1398,7 @@ static int iwl3945_send_tx_power(struct iwl_priv *priv)
1468 return -EINVAL; 1398 return -EINVAL;
1469 } 1399 }
1470 1400
1471 if (!is_channel_valid(ch_info)) { 1401 if (!iwl_legacy_is_channel_valid(ch_info)) {
1472 IWL_DEBUG_POWER(priv, "Not calling TX_PWR_TABLE_CMD on " 1402 IWL_DEBUG_POWER(priv, "Not calling TX_PWR_TABLE_CMD on "
1473 "non-Tx channel.\n"); 1403 "non-Tx channel.\n");
1474 return 0; 1404 return 0;
@@ -1503,7 +1433,7 @@ static int iwl3945_send_tx_power(struct iwl_priv *priv)
1503 txpower.power[i].rate); 1433 txpower.power[i].rate);
1504 } 1434 }
1505 1435
1506 return iwl_send_cmd_pdu(priv, REPLY_TX_PWR_TABLE_CMD, 1436 return iwl_legacy_send_cmd_pdu(priv, REPLY_TX_PWR_TABLE_CMD,
1507 sizeof(struct iwl3945_txpowertable_cmd), 1437 sizeof(struct iwl3945_txpowertable_cmd),
1508 &txpower); 1438 &txpower);
1509 1439
@@ -1637,7 +1567,7 @@ static int iwl3945_hw_reg_comp_txpower_temp(struct iwl_priv *priv)
1637 /* set up new Tx power info for each and every channel, 2.4 and 5.x */ 1567 /* set up new Tx power info for each and every channel, 2.4 and 5.x */
1638 for (i = 0; i < priv->channel_count; i++) { 1568 for (i = 0; i < priv->channel_count; i++) {
1639 ch_info = &priv->channel_info[i]; 1569 ch_info = &priv->channel_info[i];
1640 a_band = is_channel_a_band(ch_info); 1570 a_band = iwl_legacy_is_channel_a_band(ch_info);
1641 1571
1642 /* Get this chnlgrp's factory calibration temperature */ 1572 /* Get this chnlgrp's factory calibration temperature */
1643 ref_temp = (s16)eeprom->groups[ch_info->group_index]. 1573 ref_temp = (s16)eeprom->groups[ch_info->group_index].
@@ -1703,7 +1633,7 @@ int iwl3945_hw_reg_set_txpower(struct iwl_priv *priv, s8 power)
1703 1633
1704 for (i = 0; i < priv->channel_count; i++) { 1634 for (i = 0; i < priv->channel_count; i++) {
1705 ch_info = &priv->channel_info[i]; 1635 ch_info = &priv->channel_info[i];
1706 a_band = is_channel_a_band(ch_info); 1636 a_band = iwl_legacy_is_channel_a_band(ch_info);
1707 1637
1708 /* find minimum power of all user and regulatory constraints 1638 /* find minimum power of all user and regulatory constraints
1709 * (does not consider h/w clipping limitations) */ 1639 * (does not consider h/w clipping limitations) */
@@ -1719,7 +1649,7 @@ int iwl3945_hw_reg_set_txpower(struct iwl_priv *priv, s8 power)
1719 1649
1720 /* update txpower settings for all channels, 1650 /* update txpower settings for all channels,
1721 * send to NIC if associated. */ 1651 * send to NIC if associated. */
1722 is_temp_calib_needed(priv); 1652 iwl3945_is_temp_calib_needed(priv);
1723 iwl3945_hw_reg_comp_txpower_temp(priv); 1653 iwl3945_hw_reg_comp_txpower_temp(priv);
1724 1654
1725 return 0; 1655 return 0;
@@ -1737,8 +1667,8 @@ static int iwl3945_send_rxon_assoc(struct iwl_priv *priv,
1737 .flags = CMD_WANT_SKB, 1667 .flags = CMD_WANT_SKB,
1738 .data = &rxon_assoc, 1668 .data = &rxon_assoc,
1739 }; 1669 };
1740 const struct iwl_rxon_cmd *rxon1 = &ctx->staging; 1670 const struct iwl_legacy_rxon_cmd *rxon1 = &ctx->staging;
1741 const struct iwl_rxon_cmd *rxon2 = &ctx->active; 1671 const struct iwl_legacy_rxon_cmd *rxon2 = &ctx->active;
1742 1672
1743 if ((rxon1->flags == rxon2->flags) && 1673 if ((rxon1->flags == rxon2->flags) &&
1744 (rxon1->filter_flags == rxon2->filter_flags) && 1674 (rxon1->filter_flags == rxon2->filter_flags) &&
@@ -1754,7 +1684,7 @@ static int iwl3945_send_rxon_assoc(struct iwl_priv *priv,
1754 rxon_assoc.cck_basic_rates = ctx->staging.cck_basic_rates; 1684 rxon_assoc.cck_basic_rates = ctx->staging.cck_basic_rates;
1755 rxon_assoc.reserved = 0; 1685 rxon_assoc.reserved = 0;
1756 1686
1757 rc = iwl_send_cmd_sync(priv, &cmd); 1687 rc = iwl_legacy_send_cmd_sync(priv, &cmd);
1758 if (rc) 1688 if (rc)
1759 return rc; 1689 return rc;
1760 1690
@@ -1764,7 +1694,7 @@ static int iwl3945_send_rxon_assoc(struct iwl_priv *priv,
1764 rc = -EIO; 1694 rc = -EIO;
1765 } 1695 }
1766 1696
1767 iwl_free_pages(priv, cmd.reply_page); 1697 iwl_legacy_free_pages(priv, cmd.reply_page);
1768 1698
1769 return rc; 1699 return rc;
1770} 1700}
@@ -1788,7 +1718,7 @@ int iwl3945_commit_rxon(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
1788 if (test_bit(STATUS_EXIT_PENDING, &priv->status)) 1718 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
1789 return -EINVAL; 1719 return -EINVAL;
1790 1720
1791 if (!iwl_is_alive(priv)) 1721 if (!iwl_legacy_is_alive(priv))
1792 return -1; 1722 return -1;
1793 1723
1794 /* always get timestamp with Rx frame */ 1724 /* always get timestamp with Rx frame */
@@ -1799,7 +1729,7 @@ int iwl3945_commit_rxon(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
1799 ~(RXON_FLG_DIS_DIV_MSK | RXON_FLG_ANT_SEL_MSK); 1729 ~(RXON_FLG_DIS_DIV_MSK | RXON_FLG_ANT_SEL_MSK);
1800 staging_rxon->flags |= iwl3945_get_antenna_flags(priv); 1730 staging_rxon->flags |= iwl3945_get_antenna_flags(priv);
1801 1731
1802 rc = iwl_check_rxon_cmd(priv, ctx); 1732 rc = iwl_legacy_check_rxon_cmd(priv, ctx);
1803 if (rc) { 1733 if (rc) {
1804 IWL_ERR(priv, "Invalid RXON configuration. Not committing.\n"); 1734 IWL_ERR(priv, "Invalid RXON configuration. Not committing.\n");
1805 return -EINVAL; 1735 return -EINVAL;
@@ -1808,8 +1738,9 @@ int iwl3945_commit_rxon(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
1808 /* If we don't need to send a full RXON, we can use 1738 /* If we don't need to send a full RXON, we can use
1809 * iwl3945_rxon_assoc_cmd which is used to reconfigure filter 1739 * iwl3945_rxon_assoc_cmd which is used to reconfigure filter
1810 * and other flags for the current radio configuration. */ 1740 * and other flags for the current radio configuration. */
1811 if (!iwl_full_rxon_required(priv, &priv->contexts[IWL_RXON_CTX_BSS])) { 1741 if (!iwl_legacy_full_rxon_required(priv,
1812 rc = iwl_send_rxon_assoc(priv, 1742 &priv->contexts[IWL_RXON_CTX_BSS])) {
1743 rc = iwl_legacy_send_rxon_assoc(priv,
1813 &priv->contexts[IWL_RXON_CTX_BSS]); 1744 &priv->contexts[IWL_RXON_CTX_BSS]);
1814 if (rc) { 1745 if (rc) {
1815 IWL_ERR(priv, "Error setting RXON_ASSOC " 1746 IWL_ERR(priv, "Error setting RXON_ASSOC "
@@ -1826,7 +1757,7 @@ int iwl3945_commit_rxon(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
1826 * an RXON_ASSOC and the new config wants the associated mask enabled, 1757 * an RXON_ASSOC and the new config wants the associated mask enabled,
1827 * we must clear the associated from the active configuration 1758 * we must clear the associated from the active configuration
1828 * before we apply the new config */ 1759 * before we apply the new config */
1829 if (iwl_is_associated(priv, IWL_RXON_CTX_BSS) && new_assoc) { 1760 if (iwl_legacy_is_associated(priv, IWL_RXON_CTX_BSS) && new_assoc) {
1830 IWL_DEBUG_INFO(priv, "Toggling associated bit on current RXON\n"); 1761 IWL_DEBUG_INFO(priv, "Toggling associated bit on current RXON\n");
1831 active_rxon->filter_flags &= ~RXON_FILTER_ASSOC_MSK; 1762 active_rxon->filter_flags &= ~RXON_FILTER_ASSOC_MSK;
1832 1763
@@ -1836,7 +1767,7 @@ int iwl3945_commit_rxon(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
1836 */ 1767 */
1837 active_rxon->reserved4 = 0; 1768 active_rxon->reserved4 = 0;
1838 active_rxon->reserved5 = 0; 1769 active_rxon->reserved5 = 0;
1839 rc = iwl_send_cmd_pdu(priv, REPLY_RXON, 1770 rc = iwl_legacy_send_cmd_pdu(priv, REPLY_RXON,
1840 sizeof(struct iwl3945_rxon_cmd), 1771 sizeof(struct iwl3945_rxon_cmd),
1841 &priv->contexts[IWL_RXON_CTX_BSS].active); 1772 &priv->contexts[IWL_RXON_CTX_BSS].active);
1842 1773
@@ -1848,9 +1779,10 @@ int iwl3945_commit_rxon(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
1848 "configuration (%d).\n", rc); 1779 "configuration (%d).\n", rc);
1849 return rc; 1780 return rc;
1850 } 1781 }
1851 iwl_clear_ucode_stations(priv, 1782 iwl_legacy_clear_ucode_stations(priv,
1783 &priv->contexts[IWL_RXON_CTX_BSS]);
1784 iwl_legacy_restore_stations(priv,
1852 &priv->contexts[IWL_RXON_CTX_BSS]); 1785 &priv->contexts[IWL_RXON_CTX_BSS]);
1853 iwl_restore_stations(priv, &priv->contexts[IWL_RXON_CTX_BSS]);
1854 } 1786 }
1855 1787
1856 IWL_DEBUG_INFO(priv, "Sending RXON\n" 1788 IWL_DEBUG_INFO(priv, "Sending RXON\n"
@@ -1868,10 +1800,10 @@ int iwl3945_commit_rxon(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
1868 staging_rxon->reserved4 = 0; 1800 staging_rxon->reserved4 = 0;
1869 staging_rxon->reserved5 = 0; 1801 staging_rxon->reserved5 = 0;
1870 1802
1871 iwl_set_rxon_hwcrypto(priv, ctx, !iwl3945_mod_params.sw_crypto); 1803 iwl_legacy_set_rxon_hwcrypto(priv, ctx, !iwl3945_mod_params.sw_crypto);
1872 1804
1873 /* Apply the new configuration */ 1805 /* Apply the new configuration */
1874 rc = iwl_send_cmd_pdu(priv, REPLY_RXON, 1806 rc = iwl_legacy_send_cmd_pdu(priv, REPLY_RXON,
1875 sizeof(struct iwl3945_rxon_cmd), 1807 sizeof(struct iwl3945_rxon_cmd),
1876 staging_rxon); 1808 staging_rxon);
1877 if (rc) { 1809 if (rc) {
@@ -1882,14 +1814,15 @@ int iwl3945_commit_rxon(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
1882 memcpy(active_rxon, staging_rxon, sizeof(*active_rxon)); 1814 memcpy(active_rxon, staging_rxon, sizeof(*active_rxon));
1883 1815
1884 if (!new_assoc) { 1816 if (!new_assoc) {
1885 iwl_clear_ucode_stations(priv, 1817 iwl_legacy_clear_ucode_stations(priv,
1886 &priv->contexts[IWL_RXON_CTX_BSS]); 1818 &priv->contexts[IWL_RXON_CTX_BSS]);
1887 iwl_restore_stations(priv, &priv->contexts[IWL_RXON_CTX_BSS]); 1819 iwl_legacy_restore_stations(priv,
1820 &priv->contexts[IWL_RXON_CTX_BSS]);
1888 } 1821 }
1889 1822
1890 /* If we issue a new RXON command which required a tune then we must 1823 /* If we issue a new RXON command which required a tune then we must
1891 * send a new TXPOWER command or we won't be able to Tx any frames */ 1824 * send a new TXPOWER command or we won't be able to Tx any frames */
1892 rc = iwl_set_tx_power(priv, priv->tx_power_next, true); 1825 rc = iwl_legacy_set_tx_power(priv, priv->tx_power_next, true);
1893 if (rc) { 1826 if (rc) {
1894 IWL_ERR(priv, "Error setting Tx power (%d).\n", rc); 1827 IWL_ERR(priv, "Error setting Tx power (%d).\n", rc);
1895 return rc; 1828 return rc;
@@ -1919,7 +1852,7 @@ void iwl3945_reg_txpower_periodic(struct iwl_priv *priv)
1919{ 1852{
1920 /* This will kick in the "brute force" 1853 /* This will kick in the "brute force"
1921 * iwl3945_hw_reg_comp_txpower_temp() below */ 1854 * iwl3945_hw_reg_comp_txpower_temp() below */
1922 if (!is_temp_calib_needed(priv)) 1855 if (!iwl3945_is_temp_calib_needed(priv))
1923 goto reschedule; 1856 goto reschedule;
1924 1857
1925 /* Set up a new set of temp-adjusted TxPowers, send to NIC. 1858 /* Set up a new set of temp-adjusted TxPowers, send to NIC.
@@ -1966,7 +1899,7 @@ static u16 iwl3945_hw_reg_get_ch_grp_index(struct iwl_priv *priv,
1966 u8 grp_channel; 1899 u8 grp_channel;
1967 1900
1968 /* Find the group index for the channel ... don't use index 1(?) */ 1901 /* Find the group index for the channel ... don't use index 1(?) */
1969 if (is_channel_a_band(ch_info)) { 1902 if (iwl_legacy_is_channel_a_band(ch_info)) {
1970 for (group = 1; group < 5; group++) { 1903 for (group = 1; group < 5; group++) {
1971 grp_channel = ch_grp[group].group_channel; 1904 grp_channel = ch_grp[group].group_channel;
1972 if (ch_info->channel <= grp_channel) { 1905 if (ch_info->channel <= grp_channel) {
@@ -2146,8 +2079,8 @@ int iwl3945_txpower_set_from_eeprom(struct iwl_priv *priv)
2146 /* initialize Tx power info for each and every channel, 2.4 and 5.x */ 2079 /* initialize Tx power info for each and every channel, 2.4 and 5.x */
2147 for (i = 0, ch_info = priv->channel_info; i < priv->channel_count; 2080 for (i = 0, ch_info = priv->channel_info; i < priv->channel_count;
2148 i++, ch_info++) { 2081 i++, ch_info++) {
2149 a_band = is_channel_a_band(ch_info); 2082 a_band = iwl_legacy_is_channel_a_band(ch_info);
2150 if (!is_channel_valid(ch_info)) 2083 if (!iwl_legacy_is_channel_valid(ch_info))
2151 continue; 2084 continue;
2152 2085
2153 /* find this channel's channel group (*not* "band") index */ 2086 /* find this channel's channel group (*not* "band") index */
@@ -2250,7 +2183,7 @@ int iwl3945_hw_rxq_stop(struct iwl_priv *priv)
2250{ 2183{
2251 int rc; 2184 int rc;
2252 2185
2253 iwl_write_direct32(priv, FH39_RCSR_CONFIG(0), 0); 2186 iwl_legacy_write_direct32(priv, FH39_RCSR_CONFIG(0), 0);
2254 rc = iwl_poll_direct_bit(priv, FH39_RSSR_STATUS, 2187 rc = iwl_poll_direct_bit(priv, FH39_RSSR_STATUS,
2255 FH39_RSSR_CHNL0_RX_STATUS_CHNL_IDLE, 1000); 2188 FH39_RSSR_CHNL0_RX_STATUS_CHNL_IDLE, 1000);
2256 if (rc < 0) 2189 if (rc < 0)
@@ -2267,10 +2200,10 @@ int iwl3945_hw_tx_queue_init(struct iwl_priv *priv, struct iwl_tx_queue *txq)
2267 2200
2268 shared_data->tx_base_ptr[txq_id] = cpu_to_le32((u32)txq->q.dma_addr); 2201 shared_data->tx_base_ptr[txq_id] = cpu_to_le32((u32)txq->q.dma_addr);
2269 2202
2270 iwl_write_direct32(priv, FH39_CBCC_CTRL(txq_id), 0); 2203 iwl_legacy_write_direct32(priv, FH39_CBCC_CTRL(txq_id), 0);
2271 iwl_write_direct32(priv, FH39_CBCC_BASE(txq_id), 0); 2204 iwl_legacy_write_direct32(priv, FH39_CBCC_BASE(txq_id), 0);
2272 2205
2273 iwl_write_direct32(priv, FH39_TCSR_CONFIG(txq_id), 2206 iwl_legacy_write_direct32(priv, FH39_TCSR_CONFIG(txq_id),
2274 FH39_TCSR_TX_CONFIG_REG_VAL_CIRQ_RTC_NOINT | 2207 FH39_TCSR_TX_CONFIG_REG_VAL_CIRQ_RTC_NOINT |
2275 FH39_TCSR_TX_CONFIG_REG_VAL_MSG_MODE_TXF | 2208 FH39_TCSR_TX_CONFIG_REG_VAL_MSG_MODE_TXF |
2276 FH39_TCSR_TX_CONFIG_REG_VAL_CIRQ_HOST_IFTFD | 2209 FH39_TCSR_TX_CONFIG_REG_VAL_CIRQ_HOST_IFTFD |
@@ -2299,7 +2232,8 @@ static u16 iwl3945_get_hcmd_size(u8 cmd_id, u16 len)
2299} 2232}
2300 2233
2301 2234
2302static u16 iwl3945_build_addsta_hcmd(const struct iwl_addsta_cmd *cmd, u8 *data) 2235static u16 iwl3945_build_addsta_hcmd(const struct iwl_legacy_addsta_cmd *cmd,
2236 u8 *data)
2303{ 2237{
2304 struct iwl3945_addsta_cmd *addsta = (struct iwl3945_addsta_cmd *)data; 2238 struct iwl3945_addsta_cmd *addsta = (struct iwl3945_addsta_cmd *)data;
2305 addsta->mode = cmd->mode; 2239 addsta->mode = cmd->mode;
@@ -2327,7 +2261,7 @@ static int iwl3945_add_bssid_station(struct iwl_priv *priv,
2327 if (sta_id_r) 2261 if (sta_id_r)
2328 *sta_id_r = IWL_INVALID_STATION; 2262 *sta_id_r = IWL_INVALID_STATION;
2329 2263
2330 ret = iwl_add_station_common(priv, ctx, addr, 0, NULL, &sta_id); 2264 ret = iwl_legacy_add_station_common(priv, ctx, addr, 0, NULL, &sta_id);
2331 if (ret) { 2265 if (ret) {
2332 IWL_ERR(priv, "Unable to add station %pM\n", addr); 2266 IWL_ERR(priv, "Unable to add station %pM\n", addr);
2333 return ret; 2267 return ret;
@@ -2362,7 +2296,7 @@ static int iwl3945_manage_ibss_station(struct iwl_priv *priv,
2362 return 0; 2296 return 0;
2363 } 2297 }
2364 2298
2365 return iwl_remove_station(priv, vif_priv->ibss_bssid_sta_id, 2299 return iwl_legacy_remove_station(priv, vif_priv->ibss_bssid_sta_id,
2366 vif->bss_conf.bssid); 2300 vif->bss_conf.bssid);
2367} 2301}
2368 2302
@@ -2413,7 +2347,7 @@ int iwl3945_init_hw_rate_table(struct iwl_priv *priv)
2413 * 1M CCK rates */ 2347 * 1M CCK rates */
2414 2348
2415 if (!(priv->_3945.sta_supp_rates & IWL_OFDM_RATES_MASK) && 2349 if (!(priv->_3945.sta_supp_rates & IWL_OFDM_RATES_MASK) &&
2416 iwl_is_associated(priv, IWL_RXON_CTX_BSS)) { 2350 iwl_legacy_is_associated(priv, IWL_RXON_CTX_BSS)) {
2417 2351
2418 index = IWL_FIRST_CCK_RATE; 2352 index = IWL_FIRST_CCK_RATE;
2419 for (i = IWL_RATE_6M_INDEX_TABLE; 2353 for (i = IWL_RATE_6M_INDEX_TABLE;
@@ -2434,14 +2368,14 @@ int iwl3945_init_hw_rate_table(struct iwl_priv *priv)
2434 2368
2435 /* Update the rate scaling for control frame Tx */ 2369 /* Update the rate scaling for control frame Tx */
2436 rate_cmd.table_id = 0; 2370 rate_cmd.table_id = 0;
2437 rc = iwl_send_cmd_pdu(priv, REPLY_RATE_SCALE, sizeof(rate_cmd), 2371 rc = iwl_legacy_send_cmd_pdu(priv, REPLY_RATE_SCALE, sizeof(rate_cmd),
2438 &rate_cmd); 2372 &rate_cmd);
2439 if (rc) 2373 if (rc)
2440 return rc; 2374 return rc;
2441 2375
2442 /* Update the rate scaling for data frame Tx */ 2376 /* Update the rate scaling for data frame Tx */
2443 rate_cmd.table_id = 1; 2377 rate_cmd.table_id = 1;
2444 return iwl_send_cmd_pdu(priv, REPLY_RATE_SCALE, sizeof(rate_cmd), 2378 return iwl_legacy_send_cmd_pdu(priv, REPLY_RATE_SCALE, sizeof(rate_cmd),
2445 &rate_cmd); 2379 &rate_cmd);
2446} 2380}
2447 2381
@@ -2541,11 +2475,11 @@ static int iwl3945_verify_bsm(struct iwl_priv *priv)
2541 IWL_DEBUG_INFO(priv, "Begin verify bsm\n"); 2475 IWL_DEBUG_INFO(priv, "Begin verify bsm\n");
2542 2476
2543 /* verify BSM SRAM contents */ 2477 /* verify BSM SRAM contents */
2544 val = iwl_read_prph(priv, BSM_WR_DWCOUNT_REG); 2478 val = iwl_legacy_read_prph(priv, BSM_WR_DWCOUNT_REG);
2545 for (reg = BSM_SRAM_LOWER_BOUND; 2479 for (reg = BSM_SRAM_LOWER_BOUND;
2546 reg < BSM_SRAM_LOWER_BOUND + len; 2480 reg < BSM_SRAM_LOWER_BOUND + len;
2547 reg += sizeof(u32), image++) { 2481 reg += sizeof(u32), image++) {
2548 val = iwl_read_prph(priv, reg); 2482 val = iwl_legacy_read_prph(priv, reg);
2549 if (val != le32_to_cpu(*image)) { 2483 if (val != le32_to_cpu(*image)) {
2550 IWL_ERR(priv, "BSM uCode verification failed at " 2484 IWL_ERR(priv, "BSM uCode verification failed at "
2551 "addr 0x%08X+%u (of %u), is 0x%x, s/b 0x%x\n", 2485 "addr 0x%08X+%u (of %u), is 0x%x, s/b 0x%x\n",
@@ -2578,7 +2512,7 @@ static int iwl3945_verify_bsm(struct iwl_priv *priv)
2578 */ 2512 */
2579static int iwl3945_eeprom_acquire_semaphore(struct iwl_priv *priv) 2513static int iwl3945_eeprom_acquire_semaphore(struct iwl_priv *priv)
2580{ 2514{
2581 _iwl_clear_bit(priv, CSR_EEPROM_GP, CSR_EEPROM_GP_IF_OWNER_MSK); 2515 _iwl_legacy_clear_bit(priv, CSR_EEPROM_GP, CSR_EEPROM_GP_IF_OWNER_MSK);
2582 return 0; 2516 return 0;
2583} 2517}
2584 2518
@@ -2649,16 +2583,16 @@ static int iwl3945_load_bsm(struct iwl_priv *priv)
2649 inst_len = priv->ucode_init.len; 2583 inst_len = priv->ucode_init.len;
2650 data_len = priv->ucode_init_data.len; 2584 data_len = priv->ucode_init_data.len;
2651 2585
2652 iwl_write_prph(priv, BSM_DRAM_INST_PTR_REG, pinst); 2586 iwl_legacy_write_prph(priv, BSM_DRAM_INST_PTR_REG, pinst);
2653 iwl_write_prph(priv, BSM_DRAM_DATA_PTR_REG, pdata); 2587 iwl_legacy_write_prph(priv, BSM_DRAM_DATA_PTR_REG, pdata);
2654 iwl_write_prph(priv, BSM_DRAM_INST_BYTECOUNT_REG, inst_len); 2588 iwl_legacy_write_prph(priv, BSM_DRAM_INST_BYTECOUNT_REG, inst_len);
2655 iwl_write_prph(priv, BSM_DRAM_DATA_BYTECOUNT_REG, data_len); 2589 iwl_legacy_write_prph(priv, BSM_DRAM_DATA_BYTECOUNT_REG, data_len);
2656 2590
2657 /* Fill BSM memory with bootstrap instructions */ 2591 /* Fill BSM memory with bootstrap instructions */
2658 for (reg_offset = BSM_SRAM_LOWER_BOUND; 2592 for (reg_offset = BSM_SRAM_LOWER_BOUND;
2659 reg_offset < BSM_SRAM_LOWER_BOUND + len; 2593 reg_offset < BSM_SRAM_LOWER_BOUND + len;
2660 reg_offset += sizeof(u32), image++) 2594 reg_offset += sizeof(u32), image++)
2661 _iwl_write_prph(priv, reg_offset, 2595 _iwl_legacy_write_prph(priv, reg_offset,
2662 le32_to_cpu(*image)); 2596 le32_to_cpu(*image));
2663 2597
2664 rc = iwl3945_verify_bsm(priv); 2598 rc = iwl3945_verify_bsm(priv);
@@ -2666,19 +2600,19 @@ static int iwl3945_load_bsm(struct iwl_priv *priv)
2666 return rc; 2600 return rc;
2667 2601
2668 /* Tell BSM to copy from BSM SRAM into instruction SRAM, when asked */ 2602 /* Tell BSM to copy from BSM SRAM into instruction SRAM, when asked */
2669 iwl_write_prph(priv, BSM_WR_MEM_SRC_REG, 0x0); 2603 iwl_legacy_write_prph(priv, BSM_WR_MEM_SRC_REG, 0x0);
2670 iwl_write_prph(priv, BSM_WR_MEM_DST_REG, 2604 iwl_legacy_write_prph(priv, BSM_WR_MEM_DST_REG,
2671 IWL39_RTC_INST_LOWER_BOUND); 2605 IWL39_RTC_INST_LOWER_BOUND);
2672 iwl_write_prph(priv, BSM_WR_DWCOUNT_REG, len / sizeof(u32)); 2606 iwl_legacy_write_prph(priv, BSM_WR_DWCOUNT_REG, len / sizeof(u32));
2673 2607
2674 /* Load bootstrap code into instruction SRAM now, 2608 /* Load bootstrap code into instruction SRAM now,
2675 * to prepare to load "initialize" uCode */ 2609 * to prepare to load "initialize" uCode */
2676 iwl_write_prph(priv, BSM_WR_CTRL_REG, 2610 iwl_legacy_write_prph(priv, BSM_WR_CTRL_REG,
2677 BSM_WR_CTRL_REG_BIT_START); 2611 BSM_WR_CTRL_REG_BIT_START);
2678 2612
2679 /* Wait for load of bootstrap uCode to finish */ 2613 /* Wait for load of bootstrap uCode to finish */
2680 for (i = 0; i < 100; i++) { 2614 for (i = 0; i < 100; i++) {
2681 done = iwl_read_prph(priv, BSM_WR_CTRL_REG); 2615 done = iwl_legacy_read_prph(priv, BSM_WR_CTRL_REG);
2682 if (!(done & BSM_WR_CTRL_REG_BIT_START)) 2616 if (!(done & BSM_WR_CTRL_REG_BIT_START))
2683 break; 2617 break;
2684 udelay(10); 2618 udelay(10);
@@ -2692,7 +2626,7 @@ static int iwl3945_load_bsm(struct iwl_priv *priv)
2692 2626
2693 /* Enable future boot loads whenever power management unit triggers it 2627 /* Enable future boot loads whenever power management unit triggers it
2694 * (e.g. when powering back up after power-save shutdown) */ 2628 * (e.g. when powering back up after power-save shutdown) */
2695 iwl_write_prph(priv, BSM_WR_CTRL_REG, 2629 iwl_legacy_write_prph(priv, BSM_WR_CTRL_REG,
2696 BSM_WR_CTRL_REG_BIT_START_EN); 2630 BSM_WR_CTRL_REG_BIT_START_EN);
2697 2631
2698 return 0; 2632 return 0;
@@ -2701,7 +2635,6 @@ static int iwl3945_load_bsm(struct iwl_priv *priv)
2701static struct iwl_hcmd_ops iwl3945_hcmd = { 2635static struct iwl_hcmd_ops iwl3945_hcmd = {
2702 .rxon_assoc = iwl3945_send_rxon_assoc, 2636 .rxon_assoc = iwl3945_send_rxon_assoc,
2703 .commit_rxon = iwl3945_commit_rxon, 2637 .commit_rxon = iwl3945_commit_rxon,
2704 .send_bt_config = iwl_send_bt_config,
2705}; 2638};
2706 2639
2707static struct iwl_lib_ops iwl3945_lib = { 2640static struct iwl_lib_ops iwl3945_lib = {
@@ -2727,13 +2660,9 @@ static struct iwl_lib_ops iwl3945_lib = {
2727 }, 2660 },
2728 .acquire_semaphore = iwl3945_eeprom_acquire_semaphore, 2661 .acquire_semaphore = iwl3945_eeprom_acquire_semaphore,
2729 .release_semaphore = iwl3945_eeprom_release_semaphore, 2662 .release_semaphore = iwl3945_eeprom_release_semaphore,
2730 .query_addr = iwlcore_eeprom_query_addr,
2731 }, 2663 },
2732 .send_tx_power = iwl3945_send_tx_power, 2664 .send_tx_power = iwl3945_send_tx_power,
2733 .is_valid_rtc_data_addr = iwl3945_hw_valid_rtc_data_addr, 2665 .is_valid_rtc_data_addr = iwl3945_hw_valid_rtc_data_addr,
2734 .isr_ops = {
2735 .isr = iwl_isr_legacy,
2736 },
2737 2666
2738 .debugfs_ops = { 2667 .debugfs_ops = {
2739 .rx_stats_read = iwl3945_ucode_rx_stats_read, 2668 .rx_stats_read = iwl3945_ucode_rx_stats_read,
@@ -2751,7 +2680,6 @@ static const struct iwl_legacy_ops iwl3945_legacy_ops = {
2751static struct iwl_hcmd_utils_ops iwl3945_hcmd_utils = { 2680static struct iwl_hcmd_utils_ops iwl3945_hcmd_utils = {
2752 .get_hcmd_size = iwl3945_get_hcmd_size, 2681 .get_hcmd_size = iwl3945_get_hcmd_size,
2753 .build_addsta_hcmd = iwl3945_build_addsta_hcmd, 2682 .build_addsta_hcmd = iwl3945_build_addsta_hcmd,
2754 .tx_cmd_protection = iwl_legacy_tx_cmd_protection,
2755 .request_scan = iwl3945_request_scan, 2683 .request_scan = iwl3945_request_scan,
2756 .post_scan = iwl3945_post_scan, 2684 .post_scan = iwl3945_post_scan,
2757}; 2685};
@@ -2771,13 +2699,10 @@ static struct iwl_base_params iwl3945_base_params = {
2771 .pll_cfg_val = CSR39_ANA_PLL_CFG_VAL, 2699 .pll_cfg_val = CSR39_ANA_PLL_CFG_VAL,
2772 .set_l0s = false, 2700 .set_l0s = false,
2773 .use_bsm = true, 2701 .use_bsm = true,
2774 .use_isr_legacy = true,
2775 .led_compensation = 64, 2702 .led_compensation = 64,
2776 .broken_powersave = true,
2777 .plcp_delta_threshold = IWL_MAX_PLCP_ERR_LONG_THRESHOLD_DEF, 2703 .plcp_delta_threshold = IWL_MAX_PLCP_ERR_LONG_THRESHOLD_DEF,
2778 .wd_timeout = IWL_DEF_WD_TIMEOUT, 2704 .wd_timeout = IWL_DEF_WD_TIMEOUT,
2779 .max_event_log_size = 512, 2705 .max_event_log_size = 512,
2780 .tx_power_by_driver = true,
2781}; 2706};
2782 2707
2783static struct iwl_cfg iwl3945_bg_cfg = { 2708static struct iwl_cfg iwl3945_bg_cfg = {
diff --git a/drivers/net/wireless/iwlwifi/iwl-3945.h b/drivers/net/wireless/iwlegacy/iwl-3945.h
index 3eef1eb74a78..b118b59b71de 100644
--- a/drivers/net/wireless/iwlwifi/iwl-3945.h
+++ b/drivers/net/wireless/iwlegacy/iwl-3945.h
@@ -1,6 +1,6 @@
1/****************************************************************************** 1/******************************************************************************
2 * 2 *
3 * Copyright(c) 2003 - 2010 Intel Corporation. All rights reserved. 3 * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved.
4 * 4 *
5 * This program is free software; you can redistribute it and/or modify it 5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of version 2 of the GNU General Public License as 6 * under the terms of version 2 of the GNU General Public License as
@@ -108,7 +108,7 @@ struct iwl3945_rs_sta {
108 108
109/* 109/*
110 * The common struct MUST be first because it is shared between 110 * The common struct MUST be first because it is shared between
111 * 3945 and agn! 111 * 3945 and 4965!
112 */ 112 */
113struct iwl3945_sta_priv { 113struct iwl3945_sta_priv {
114 struct iwl_station_priv_common common; 114 struct iwl_station_priv_common common;
@@ -201,7 +201,7 @@ struct iwl3945_ibss_seq {
201 201
202/****************************************************************************** 202/******************************************************************************
203 * 203 *
204 * Functions implemented in iwl-base.c which are forward declared here 204 * Functions implemented in iwl3945-base.c which are forward declared here
205 * for use by iwl-*.c 205 * for use by iwl-*.c
206 * 206 *
207 *****************************************************************************/ 207 *****************************************************************************/
@@ -209,7 +209,7 @@ extern int iwl3945_calc_db_from_ratio(int sig_ratio);
209extern void iwl3945_rx_replenish(void *data); 209extern void iwl3945_rx_replenish(void *data);
210extern void iwl3945_rx_queue_reset(struct iwl_priv *priv, struct iwl_rx_queue *rxq); 210extern void iwl3945_rx_queue_reset(struct iwl_priv *priv, struct iwl_rx_queue *rxq);
211extern unsigned int iwl3945_fill_beacon_frame(struct iwl_priv *priv, 211extern unsigned int iwl3945_fill_beacon_frame(struct iwl_priv *priv,
212 struct ieee80211_hdr *hdr,int left); 212 struct ieee80211_hdr *hdr, int left);
213extern int iwl3945_dump_nic_event_log(struct iwl_priv *priv, bool full_log, 213extern int iwl3945_dump_nic_event_log(struct iwl_priv *priv, bool full_log,
214 char **buf, bool display); 214 char **buf, bool display);
215extern void iwl3945_dump_nic_error_log(struct iwl_priv *priv); 215extern void iwl3945_dump_nic_error_log(struct iwl_priv *priv);
@@ -217,7 +217,7 @@ extern void iwl3945_dump_nic_error_log(struct iwl_priv *priv);
217/****************************************************************************** 217/******************************************************************************
218 * 218 *
219 * Functions implemented in iwl-[34]*.c which are forward declared here 219 * Functions implemented in iwl-[34]*.c which are forward declared here
220 * for use by iwl-base.c 220 * for use by iwl3945-base.c
221 * 221 *
222 * NOTE: The implementation of these functions are hardware specific 222 * NOTE: The implementation of these functions are hardware specific
223 * which is why they are in the hardware specific files (vs. iwl-base.c) 223 * which is why they are in the hardware specific files (vs. iwl-base.c)
@@ -283,7 +283,7 @@ extern u8 iwl3945_hw_find_station(struct iwl_priv *priv, const u8 *bssid);
283extern struct ieee80211_ops iwl3945_hw_ops; 283extern struct ieee80211_ops iwl3945_hw_ops;
284 284
285/* 285/*
286 * Forward declare iwl-3945.c functions for iwl-base.c 286 * Forward declare iwl-3945.c functions for iwl3945-base.c
287 */ 287 */
288extern __le32 iwl3945_get_antenna_flags(const struct iwl_priv *priv); 288extern __le32 iwl3945_get_antenna_flags(const struct iwl_priv *priv);
289extern int iwl3945_init_hw_rate_table(struct iwl_priv *priv); 289extern int iwl3945_init_hw_rate_table(struct iwl_priv *priv);
diff --git a/drivers/net/wireless/iwlegacy/iwl-4965-calib.c b/drivers/net/wireless/iwlegacy/iwl-4965-calib.c
new file mode 100644
index 000000000000..81d6a25eb04f
--- /dev/null
+++ b/drivers/net/wireless/iwlegacy/iwl-4965-calib.c
@@ -0,0 +1,967 @@
1/******************************************************************************
2 *
3 * This file is provided under a dual BSD/GPLv2 license. When using or
4 * redistributing this file, you may do so under either license.
5 *
6 * GPL LICENSE SUMMARY
7 *
8 * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of version 2 of the GNU General Public License as
12 * published by the Free Software Foundation.
13 *
14 * This program is distributed in the hope that it will be useful, but
15 * WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
22 * USA
23 *
24 * The full GNU General Public License is included in this distribution
25 * in the file called LICENSE.GPL.
26 *
27 * Contact Information:
28 * Intel Linux Wireless <ilw@linux.intel.com>
29 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
30 *
31 * BSD LICENSE
32 *
33 * Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved.
34 * All rights reserved.
35 *
36 * Redistribution and use in source and binary forms, with or without
37 * modification, are permitted provided that the following conditions
38 * are met:
39 *
40 * * Redistributions of source code must retain the above copyright
41 * notice, this list of conditions and the following disclaimer.
42 * * Redistributions in binary form must reproduce the above copyright
43 * notice, this list of conditions and the following disclaimer in
44 * the documentation and/or other materials provided with the
45 * distribution.
46 * * Neither the name Intel Corporation nor the names of its
47 * contributors may be used to endorse or promote products derived
48 * from this software without specific prior written permission.
49 *
50 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
51 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
52 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
53 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
54 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
55 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
56 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
57 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
58 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
59 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
60 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
61 *****************************************************************************/
62
63#include <linux/slab.h>
64#include <net/mac80211.h>
65
66#include "iwl-dev.h"
67#include "iwl-core.h"
68#include "iwl-4965-calib.h"
69
70/*****************************************************************************
71 * INIT calibrations framework
72 *****************************************************************************/
73
74struct statistics_general_data {
75 u32 beacon_silence_rssi_a;
76 u32 beacon_silence_rssi_b;
77 u32 beacon_silence_rssi_c;
78 u32 beacon_energy_a;
79 u32 beacon_energy_b;
80 u32 beacon_energy_c;
81};
82
83void iwl4965_calib_free_results(struct iwl_priv *priv)
84{
85 int i;
86
87 for (i = 0; i < IWL_CALIB_MAX; i++) {
88 kfree(priv->calib_results[i].buf);
89 priv->calib_results[i].buf = NULL;
90 priv->calib_results[i].buf_len = 0;
91 }
92}
93
94/*****************************************************************************
95 * RUNTIME calibrations framework
96 *****************************************************************************/
97
98/* "false alarms" are signals that our DSP tries to lock onto,
99 * but then determines that they are either noise, or transmissions
100 * from a distant wireless network (also "noise", really) that get
101 * "stepped on" by stronger transmissions within our own network.
102 * This algorithm attempts to set a sensitivity level that is high
103 * enough to receive all of our own network traffic, but not so
104 * high that our DSP gets too busy trying to lock onto non-network
105 * activity/noise. */
106static int iwl4965_sens_energy_cck(struct iwl_priv *priv,
107 u32 norm_fa,
108 u32 rx_enable_time,
109 struct statistics_general_data *rx_info)
110{
111 u32 max_nrg_cck = 0;
112 int i = 0;
113 u8 max_silence_rssi = 0;
114 u32 silence_ref = 0;
115 u8 silence_rssi_a = 0;
116 u8 silence_rssi_b = 0;
117 u8 silence_rssi_c = 0;
118 u32 val;
119
120 /* "false_alarms" values below are cross-multiplications to assess the
121 * numbers of false alarms within the measured period of actual Rx
122 * (Rx is off when we're txing), vs the min/max expected false alarms
123 * (some should be expected if rx is sensitive enough) in a
124 * hypothetical listening period of 200 time units (TU), 204.8 msec:
125 *
126 * MIN_FA/fixed-time < false_alarms/actual-rx-time < MAX_FA/beacon-time
127 *
128 * */
129 u32 false_alarms = norm_fa * 200 * 1024;
130 u32 max_false_alarms = MAX_FA_CCK * rx_enable_time;
131 u32 min_false_alarms = MIN_FA_CCK * rx_enable_time;
132 struct iwl_sensitivity_data *data = NULL;
133 const struct iwl_sensitivity_ranges *ranges = priv->hw_params.sens;
134
135 data = &(priv->sensitivity_data);
136
137 data->nrg_auto_corr_silence_diff = 0;
138
139 /* Find max silence rssi among all 3 receivers.
140 * This is background noise, which may include transmissions from other
141 * networks, measured during silence before our network's beacon */
142 silence_rssi_a = (u8)((rx_info->beacon_silence_rssi_a &
143 ALL_BAND_FILTER) >> 8);
144 silence_rssi_b = (u8)((rx_info->beacon_silence_rssi_b &
145 ALL_BAND_FILTER) >> 8);
146 silence_rssi_c = (u8)((rx_info->beacon_silence_rssi_c &
147 ALL_BAND_FILTER) >> 8);
148
149 val = max(silence_rssi_b, silence_rssi_c);
150 max_silence_rssi = max(silence_rssi_a, (u8) val);
151
152 /* Store silence rssi in 20-beacon history table */
153 data->nrg_silence_rssi[data->nrg_silence_idx] = max_silence_rssi;
154 data->nrg_silence_idx++;
155 if (data->nrg_silence_idx >= NRG_NUM_PREV_STAT_L)
156 data->nrg_silence_idx = 0;
157
158 /* Find max silence rssi across 20 beacon history */
159 for (i = 0; i < NRG_NUM_PREV_STAT_L; i++) {
160 val = data->nrg_silence_rssi[i];
161 silence_ref = max(silence_ref, val);
162 }
163 IWL_DEBUG_CALIB(priv, "silence a %u, b %u, c %u, 20-bcn max %u\n",
164 silence_rssi_a, silence_rssi_b, silence_rssi_c,
165 silence_ref);
166
167 /* Find max rx energy (min value!) among all 3 receivers,
168 * measured during beacon frame.
169 * Save it in 10-beacon history table. */
170 i = data->nrg_energy_idx;
171 val = min(rx_info->beacon_energy_b, rx_info->beacon_energy_c);
172 data->nrg_value[i] = min(rx_info->beacon_energy_a, val);
173
174 data->nrg_energy_idx++;
175 if (data->nrg_energy_idx >= 10)
176 data->nrg_energy_idx = 0;
177
178 /* Find min rx energy (max value) across 10 beacon history.
179 * This is the minimum signal level that we want to receive well.
180 * Add backoff (margin so we don't miss slightly lower energy frames).
181 * This establishes an upper bound (min value) for energy threshold. */
182 max_nrg_cck = data->nrg_value[0];
183 for (i = 1; i < 10; i++)
184 max_nrg_cck = (u32) max(max_nrg_cck, (data->nrg_value[i]));
185 max_nrg_cck += 6;
186
187 IWL_DEBUG_CALIB(priv, "rx energy a %u, b %u, c %u, 10-bcn max/min %u\n",
188 rx_info->beacon_energy_a, rx_info->beacon_energy_b,
189 rx_info->beacon_energy_c, max_nrg_cck - 6);
190
191 /* Count number of consecutive beacons with fewer-than-desired
192 * false alarms. */
193 if (false_alarms < min_false_alarms)
194 data->num_in_cck_no_fa++;
195 else
196 data->num_in_cck_no_fa = 0;
197 IWL_DEBUG_CALIB(priv, "consecutive bcns with few false alarms = %u\n",
198 data->num_in_cck_no_fa);
199
200 /* If we got too many false alarms this time, reduce sensitivity */
201 if ((false_alarms > max_false_alarms) &&
202 (data->auto_corr_cck > AUTO_CORR_MAX_TH_CCK)) {
203 IWL_DEBUG_CALIB(priv, "norm FA %u > max FA %u\n",
204 false_alarms, max_false_alarms);
205 IWL_DEBUG_CALIB(priv, "... reducing sensitivity\n");
206 data->nrg_curr_state = IWL_FA_TOO_MANY;
207 /* Store for "fewer than desired" on later beacon */
208 data->nrg_silence_ref = silence_ref;
209
210 /* increase energy threshold (reduce nrg value)
211 * to decrease sensitivity */
212 data->nrg_th_cck = data->nrg_th_cck - NRG_STEP_CCK;
213 /* Else if we got fewer than desired, increase sensitivity */
214 } else if (false_alarms < min_false_alarms) {
215 data->nrg_curr_state = IWL_FA_TOO_FEW;
216
217 /* Compare silence level with silence level for most recent
218 * healthy number or too many false alarms */
219 data->nrg_auto_corr_silence_diff = (s32)data->nrg_silence_ref -
220 (s32)silence_ref;
221
222 IWL_DEBUG_CALIB(priv,
223 "norm FA %u < min FA %u, silence diff %d\n",
224 false_alarms, min_false_alarms,
225 data->nrg_auto_corr_silence_diff);
226
227 /* Increase value to increase sensitivity, but only if:
228 * 1a) previous beacon did *not* have *too many* false alarms
229 * 1b) AND there's a significant difference in Rx levels
230 * from a previous beacon with too many, or healthy # FAs
231 * OR 2) We've seen a lot of beacons (100) with too few
232 * false alarms */
233 if ((data->nrg_prev_state != IWL_FA_TOO_MANY) &&
234 ((data->nrg_auto_corr_silence_diff > NRG_DIFF) ||
235 (data->num_in_cck_no_fa > MAX_NUMBER_CCK_NO_FA))) {
236
237 IWL_DEBUG_CALIB(priv, "... increasing sensitivity\n");
238 /* Increase nrg value to increase sensitivity */
239 val = data->nrg_th_cck + NRG_STEP_CCK;
240 data->nrg_th_cck = min((u32)ranges->min_nrg_cck, val);
241 } else {
242 IWL_DEBUG_CALIB(priv,
243 "... but not changing sensitivity\n");
244 }
245
246 /* Else we got a healthy number of false alarms, keep status quo */
247 } else {
248 IWL_DEBUG_CALIB(priv, " FA in safe zone\n");
249 data->nrg_curr_state = IWL_FA_GOOD_RANGE;
250
251 /* Store for use in "fewer than desired" with later beacon */
252 data->nrg_silence_ref = silence_ref;
253
254 /* If previous beacon had too many false alarms,
255 * give it some extra margin by reducing sensitivity again
256 * (but don't go below measured energy of desired Rx) */
257 if (IWL_FA_TOO_MANY == data->nrg_prev_state) {
258 IWL_DEBUG_CALIB(priv, "... increasing margin\n");
259 if (data->nrg_th_cck > (max_nrg_cck + NRG_MARGIN))
260 data->nrg_th_cck -= NRG_MARGIN;
261 else
262 data->nrg_th_cck = max_nrg_cck;
263 }
264 }
265
266 /* Make sure the energy threshold does not go above the measured
267 * energy of the desired Rx signals (reduced by backoff margin),
268 * or else we might start missing Rx frames.
269 * Lower value is higher energy, so we use max()!
270 */
271 data->nrg_th_cck = max(max_nrg_cck, data->nrg_th_cck);
272 IWL_DEBUG_CALIB(priv, "new nrg_th_cck %u\n", data->nrg_th_cck);
273
274 data->nrg_prev_state = data->nrg_curr_state;
275
276 /* Auto-correlation CCK algorithm */
277 if (false_alarms > min_false_alarms) {
278
279 /* increase auto_corr values to decrease sensitivity
280 * so the DSP won't be disturbed by the noise
281 */
282 if (data->auto_corr_cck < AUTO_CORR_MAX_TH_CCK)
283 data->auto_corr_cck = AUTO_CORR_MAX_TH_CCK + 1;
284 else {
285 val = data->auto_corr_cck + AUTO_CORR_STEP_CCK;
286 data->auto_corr_cck =
287 min((u32)ranges->auto_corr_max_cck, val);
288 }
289 val = data->auto_corr_cck_mrc + AUTO_CORR_STEP_CCK;
290 data->auto_corr_cck_mrc =
291 min((u32)ranges->auto_corr_max_cck_mrc, val);
292 } else if ((false_alarms < min_false_alarms) &&
293 ((data->nrg_auto_corr_silence_diff > NRG_DIFF) ||
294 (data->num_in_cck_no_fa > MAX_NUMBER_CCK_NO_FA))) {
295
296 /* Decrease auto_corr values to increase sensitivity */
297 val = data->auto_corr_cck - AUTO_CORR_STEP_CCK;
298 data->auto_corr_cck =
299 max((u32)ranges->auto_corr_min_cck, val);
300 val = data->auto_corr_cck_mrc - AUTO_CORR_STEP_CCK;
301 data->auto_corr_cck_mrc =
302 max((u32)ranges->auto_corr_min_cck_mrc, val);
303 }
304
305 return 0;
306}
307
308
309static int iwl4965_sens_auto_corr_ofdm(struct iwl_priv *priv,
310 u32 norm_fa,
311 u32 rx_enable_time)
312{
313 u32 val;
314 u32 false_alarms = norm_fa * 200 * 1024;
315 u32 max_false_alarms = MAX_FA_OFDM * rx_enable_time;
316 u32 min_false_alarms = MIN_FA_OFDM * rx_enable_time;
317 struct iwl_sensitivity_data *data = NULL;
318 const struct iwl_sensitivity_ranges *ranges = priv->hw_params.sens;
319
320 data = &(priv->sensitivity_data);
321
322 /* If we got too many false alarms this time, reduce sensitivity */
323 if (false_alarms > max_false_alarms) {
324
325 IWL_DEBUG_CALIB(priv, "norm FA %u > max FA %u)\n",
326 false_alarms, max_false_alarms);
327
328 val = data->auto_corr_ofdm + AUTO_CORR_STEP_OFDM;
329 data->auto_corr_ofdm =
330 min((u32)ranges->auto_corr_max_ofdm, val);
331
332 val = data->auto_corr_ofdm_mrc + AUTO_CORR_STEP_OFDM;
333 data->auto_corr_ofdm_mrc =
334 min((u32)ranges->auto_corr_max_ofdm_mrc, val);
335
336 val = data->auto_corr_ofdm_x1 + AUTO_CORR_STEP_OFDM;
337 data->auto_corr_ofdm_x1 =
338 min((u32)ranges->auto_corr_max_ofdm_x1, val);
339
340 val = data->auto_corr_ofdm_mrc_x1 + AUTO_CORR_STEP_OFDM;
341 data->auto_corr_ofdm_mrc_x1 =
342 min((u32)ranges->auto_corr_max_ofdm_mrc_x1, val);
343 }
344
345 /* Else if we got fewer than desired, increase sensitivity */
346 else if (false_alarms < min_false_alarms) {
347
348 IWL_DEBUG_CALIB(priv, "norm FA %u < min FA %u\n",
349 false_alarms, min_false_alarms);
350
351 val = data->auto_corr_ofdm - AUTO_CORR_STEP_OFDM;
352 data->auto_corr_ofdm =
353 max((u32)ranges->auto_corr_min_ofdm, val);
354
355 val = data->auto_corr_ofdm_mrc - AUTO_CORR_STEP_OFDM;
356 data->auto_corr_ofdm_mrc =
357 max((u32)ranges->auto_corr_min_ofdm_mrc, val);
358
359 val = data->auto_corr_ofdm_x1 - AUTO_CORR_STEP_OFDM;
360 data->auto_corr_ofdm_x1 =
361 max((u32)ranges->auto_corr_min_ofdm_x1, val);
362
363 val = data->auto_corr_ofdm_mrc_x1 - AUTO_CORR_STEP_OFDM;
364 data->auto_corr_ofdm_mrc_x1 =
365 max((u32)ranges->auto_corr_min_ofdm_mrc_x1, val);
366 } else {
367 IWL_DEBUG_CALIB(priv, "min FA %u < norm FA %u < max FA %u OK\n",
368 min_false_alarms, false_alarms, max_false_alarms);
369 }
370 return 0;
371}
372
373static void iwl4965_prepare_legacy_sensitivity_tbl(struct iwl_priv *priv,
374 struct iwl_sensitivity_data *data,
375 __le16 *tbl)
376{
377 tbl[HD_AUTO_CORR32_X4_TH_ADD_MIN_INDEX] =
378 cpu_to_le16((u16)data->auto_corr_ofdm);
379 tbl[HD_AUTO_CORR32_X4_TH_ADD_MIN_MRC_INDEX] =
380 cpu_to_le16((u16)data->auto_corr_ofdm_mrc);
381 tbl[HD_AUTO_CORR32_X1_TH_ADD_MIN_INDEX] =
382 cpu_to_le16((u16)data->auto_corr_ofdm_x1);
383 tbl[HD_AUTO_CORR32_X1_TH_ADD_MIN_MRC_INDEX] =
384 cpu_to_le16((u16)data->auto_corr_ofdm_mrc_x1);
385
386 tbl[HD_AUTO_CORR40_X4_TH_ADD_MIN_INDEX] =
387 cpu_to_le16((u16)data->auto_corr_cck);
388 tbl[HD_AUTO_CORR40_X4_TH_ADD_MIN_MRC_INDEX] =
389 cpu_to_le16((u16)data->auto_corr_cck_mrc);
390
391 tbl[HD_MIN_ENERGY_CCK_DET_INDEX] =
392 cpu_to_le16((u16)data->nrg_th_cck);
393 tbl[HD_MIN_ENERGY_OFDM_DET_INDEX] =
394 cpu_to_le16((u16)data->nrg_th_ofdm);
395
396 tbl[HD_BARKER_CORR_TH_ADD_MIN_INDEX] =
397 cpu_to_le16(data->barker_corr_th_min);
398 tbl[HD_BARKER_CORR_TH_ADD_MIN_MRC_INDEX] =
399 cpu_to_le16(data->barker_corr_th_min_mrc);
400 tbl[HD_OFDM_ENERGY_TH_IN_INDEX] =
401 cpu_to_le16(data->nrg_th_cca);
402
403 IWL_DEBUG_CALIB(priv, "ofdm: ac %u mrc %u x1 %u mrc_x1 %u thresh %u\n",
404 data->auto_corr_ofdm, data->auto_corr_ofdm_mrc,
405 data->auto_corr_ofdm_x1, data->auto_corr_ofdm_mrc_x1,
406 data->nrg_th_ofdm);
407
408 IWL_DEBUG_CALIB(priv, "cck: ac %u mrc %u thresh %u\n",
409 data->auto_corr_cck, data->auto_corr_cck_mrc,
410 data->nrg_th_cck);
411}
412
413/* Prepare a SENSITIVITY_CMD, send to uCode if values have changed */
414static int iwl4965_sensitivity_write(struct iwl_priv *priv)
415{
416 struct iwl_sensitivity_cmd cmd;
417 struct iwl_sensitivity_data *data = NULL;
418 struct iwl_host_cmd cmd_out = {
419 .id = SENSITIVITY_CMD,
420 .len = sizeof(struct iwl_sensitivity_cmd),
421 .flags = CMD_ASYNC,
422 .data = &cmd,
423 };
424
425 data = &(priv->sensitivity_data);
426
427 memset(&cmd, 0, sizeof(cmd));
428
429 iwl4965_prepare_legacy_sensitivity_tbl(priv, data, &cmd.table[0]);
430
431 /* Update uCode's "work" table, and copy it to DSP */
432 cmd.control = SENSITIVITY_CMD_CONTROL_WORK_TABLE;
433
434 /* Don't send command to uCode if nothing has changed */
435 if (!memcmp(&cmd.table[0], &(priv->sensitivity_tbl[0]),
436 sizeof(u16)*HD_TABLE_SIZE)) {
437 IWL_DEBUG_CALIB(priv, "No change in SENSITIVITY_CMD\n");
438 return 0;
439 }
440
441 /* Copy table for comparison next time */
442 memcpy(&(priv->sensitivity_tbl[0]), &(cmd.table[0]),
443 sizeof(u16)*HD_TABLE_SIZE);
444
445 return iwl_legacy_send_cmd(priv, &cmd_out);
446}
447
448void iwl4965_init_sensitivity(struct iwl_priv *priv)
449{
450 int ret = 0;
451 int i;
452 struct iwl_sensitivity_data *data = NULL;
453 const struct iwl_sensitivity_ranges *ranges = priv->hw_params.sens;
454
455 if (priv->disable_sens_cal)
456 return;
457
458 IWL_DEBUG_CALIB(priv, "Start iwl4965_init_sensitivity\n");
459
460 /* Clear driver's sensitivity algo data */
461 data = &(priv->sensitivity_data);
462
463 if (ranges == NULL)
464 return;
465
466 memset(data, 0, sizeof(struct iwl_sensitivity_data));
467
468 data->num_in_cck_no_fa = 0;
469 data->nrg_curr_state = IWL_FA_TOO_MANY;
470 data->nrg_prev_state = IWL_FA_TOO_MANY;
471 data->nrg_silence_ref = 0;
472 data->nrg_silence_idx = 0;
473 data->nrg_energy_idx = 0;
474
475 for (i = 0; i < 10; i++)
476 data->nrg_value[i] = 0;
477
478 for (i = 0; i < NRG_NUM_PREV_STAT_L; i++)
479 data->nrg_silence_rssi[i] = 0;
480
481 data->auto_corr_ofdm = ranges->auto_corr_min_ofdm;
482 data->auto_corr_ofdm_mrc = ranges->auto_corr_min_ofdm_mrc;
483 data->auto_corr_ofdm_x1 = ranges->auto_corr_min_ofdm_x1;
484 data->auto_corr_ofdm_mrc_x1 = ranges->auto_corr_min_ofdm_mrc_x1;
485 data->auto_corr_cck = AUTO_CORR_CCK_MIN_VAL_DEF;
486 data->auto_corr_cck_mrc = ranges->auto_corr_min_cck_mrc;
487 data->nrg_th_cck = ranges->nrg_th_cck;
488 data->nrg_th_ofdm = ranges->nrg_th_ofdm;
489 data->barker_corr_th_min = ranges->barker_corr_th_min;
490 data->barker_corr_th_min_mrc = ranges->barker_corr_th_min_mrc;
491 data->nrg_th_cca = ranges->nrg_th_cca;
492
493 data->last_bad_plcp_cnt_ofdm = 0;
494 data->last_fa_cnt_ofdm = 0;
495 data->last_bad_plcp_cnt_cck = 0;
496 data->last_fa_cnt_cck = 0;
497
498 ret |= iwl4965_sensitivity_write(priv);
499 IWL_DEBUG_CALIB(priv, "<<return 0x%X\n", ret);
500}
501
502void iwl4965_sensitivity_calibration(struct iwl_priv *priv, void *resp)
503{
504 u32 rx_enable_time;
505 u32 fa_cck;
506 u32 fa_ofdm;
507 u32 bad_plcp_cck;
508 u32 bad_plcp_ofdm;
509 u32 norm_fa_ofdm;
510 u32 norm_fa_cck;
511 struct iwl_sensitivity_data *data = NULL;
512 struct statistics_rx_non_phy *rx_info;
513 struct statistics_rx_phy *ofdm, *cck;
514 unsigned long flags;
515 struct statistics_general_data statis;
516
517 if (priv->disable_sens_cal)
518 return;
519
520 data = &(priv->sensitivity_data);
521
522 if (!iwl_legacy_is_any_associated(priv)) {
523 IWL_DEBUG_CALIB(priv, "<< - not associated\n");
524 return;
525 }
526
527 spin_lock_irqsave(&priv->lock, flags);
528
529 rx_info = &(((struct iwl_notif_statistics *)resp)->rx.general);
530 ofdm = &(((struct iwl_notif_statistics *)resp)->rx.ofdm);
531 cck = &(((struct iwl_notif_statistics *)resp)->rx.cck);
532
533 if (rx_info->interference_data_flag != INTERFERENCE_DATA_AVAILABLE) {
534 IWL_DEBUG_CALIB(priv, "<< invalid data.\n");
535 spin_unlock_irqrestore(&priv->lock, flags);
536 return;
537 }
538
539 /* Extract Statistics: */
540 rx_enable_time = le32_to_cpu(rx_info->channel_load);
541 fa_cck = le32_to_cpu(cck->false_alarm_cnt);
542 fa_ofdm = le32_to_cpu(ofdm->false_alarm_cnt);
543 bad_plcp_cck = le32_to_cpu(cck->plcp_err);
544 bad_plcp_ofdm = le32_to_cpu(ofdm->plcp_err);
545
546 statis.beacon_silence_rssi_a =
547 le32_to_cpu(rx_info->beacon_silence_rssi_a);
548 statis.beacon_silence_rssi_b =
549 le32_to_cpu(rx_info->beacon_silence_rssi_b);
550 statis.beacon_silence_rssi_c =
551 le32_to_cpu(rx_info->beacon_silence_rssi_c);
552 statis.beacon_energy_a =
553 le32_to_cpu(rx_info->beacon_energy_a);
554 statis.beacon_energy_b =
555 le32_to_cpu(rx_info->beacon_energy_b);
556 statis.beacon_energy_c =
557 le32_to_cpu(rx_info->beacon_energy_c);
558
559 spin_unlock_irqrestore(&priv->lock, flags);
560
561 IWL_DEBUG_CALIB(priv, "rx_enable_time = %u usecs\n", rx_enable_time);
562
563 if (!rx_enable_time) {
564 IWL_DEBUG_CALIB(priv, "<< RX Enable Time == 0!\n");
565 return;
566 }
567
568 /* These statistics increase monotonically, and do not reset
569 * at each beacon. Calculate difference from last value, or just
570 * use the new statistics value if it has reset or wrapped around. */
571 if (data->last_bad_plcp_cnt_cck > bad_plcp_cck)
572 data->last_bad_plcp_cnt_cck = bad_plcp_cck;
573 else {
574 bad_plcp_cck -= data->last_bad_plcp_cnt_cck;
575 data->last_bad_plcp_cnt_cck += bad_plcp_cck;
576 }
577
578 if (data->last_bad_plcp_cnt_ofdm > bad_plcp_ofdm)
579 data->last_bad_plcp_cnt_ofdm = bad_plcp_ofdm;
580 else {
581 bad_plcp_ofdm -= data->last_bad_plcp_cnt_ofdm;
582 data->last_bad_plcp_cnt_ofdm += bad_plcp_ofdm;
583 }
584
585 if (data->last_fa_cnt_ofdm > fa_ofdm)
586 data->last_fa_cnt_ofdm = fa_ofdm;
587 else {
588 fa_ofdm -= data->last_fa_cnt_ofdm;
589 data->last_fa_cnt_ofdm += fa_ofdm;
590 }
591
592 if (data->last_fa_cnt_cck > fa_cck)
593 data->last_fa_cnt_cck = fa_cck;
594 else {
595 fa_cck -= data->last_fa_cnt_cck;
596 data->last_fa_cnt_cck += fa_cck;
597 }
598
599 /* Total aborted signal locks */
600 norm_fa_ofdm = fa_ofdm + bad_plcp_ofdm;
601 norm_fa_cck = fa_cck + bad_plcp_cck;
602
603 IWL_DEBUG_CALIB(priv,
604 "cck: fa %u badp %u ofdm: fa %u badp %u\n", fa_cck,
605 bad_plcp_cck, fa_ofdm, bad_plcp_ofdm);
606
607 iwl4965_sens_auto_corr_ofdm(priv, norm_fa_ofdm, rx_enable_time);
608 iwl4965_sens_energy_cck(priv, norm_fa_cck, rx_enable_time, &statis);
609
610 iwl4965_sensitivity_write(priv);
611}
612
613static inline u8 iwl4965_find_first_chain(u8 mask)
614{
615 if (mask & ANT_A)
616 return CHAIN_A;
617 if (mask & ANT_B)
618 return CHAIN_B;
619 return CHAIN_C;
620}
621
622/**
623 * Run disconnected antenna algorithm to find out which antennas are
624 * disconnected.
625 */
626static void
627iwl4965_find_disconn_antenna(struct iwl_priv *priv, u32* average_sig,
628 struct iwl_chain_noise_data *data)
629{
630 u32 active_chains = 0;
631 u32 max_average_sig;
632 u16 max_average_sig_antenna_i;
633 u8 num_tx_chains;
634 u8 first_chain;
635 u16 i = 0;
636
637 average_sig[0] = data->chain_signal_a /
638 priv->cfg->base_params->chain_noise_num_beacons;
639 average_sig[1] = data->chain_signal_b /
640 priv->cfg->base_params->chain_noise_num_beacons;
641 average_sig[2] = data->chain_signal_c /
642 priv->cfg->base_params->chain_noise_num_beacons;
643
644 if (average_sig[0] >= average_sig[1]) {
645 max_average_sig = average_sig[0];
646 max_average_sig_antenna_i = 0;
647 active_chains = (1 << max_average_sig_antenna_i);
648 } else {
649 max_average_sig = average_sig[1];
650 max_average_sig_antenna_i = 1;
651 active_chains = (1 << max_average_sig_antenna_i);
652 }
653
654 if (average_sig[2] >= max_average_sig) {
655 max_average_sig = average_sig[2];
656 max_average_sig_antenna_i = 2;
657 active_chains = (1 << max_average_sig_antenna_i);
658 }
659
660 IWL_DEBUG_CALIB(priv, "average_sig: a %d b %d c %d\n",
661 average_sig[0], average_sig[1], average_sig[2]);
662 IWL_DEBUG_CALIB(priv, "max_average_sig = %d, antenna %d\n",
663 max_average_sig, max_average_sig_antenna_i);
664
665 /* Compare signal strengths for all 3 receivers. */
666 for (i = 0; i < NUM_RX_CHAINS; i++) {
667 if (i != max_average_sig_antenna_i) {
668 s32 rssi_delta = (max_average_sig - average_sig[i]);
669
670 /* If signal is very weak, compared with
671 * strongest, mark it as disconnected. */
672 if (rssi_delta > MAXIMUM_ALLOWED_PATHLOSS)
673 data->disconn_array[i] = 1;
674 else
675 active_chains |= (1 << i);
676 IWL_DEBUG_CALIB(priv, "i = %d rssiDelta = %d "
677 "disconn_array[i] = %d\n",
678 i, rssi_delta, data->disconn_array[i]);
679 }
680 }
681
682 /*
683 * The above algorithm sometimes fails when the ucode
684 * reports 0 for all chains. It's not clear why that
685 * happens to start with, but it is then causing trouble
686 * because this can make us enable more chains than the
687 * hardware really has.
688 *
689 * To be safe, simply mask out any chains that we know
690 * are not on the device.
691 */
692 active_chains &= priv->hw_params.valid_rx_ant;
693
694 num_tx_chains = 0;
695 for (i = 0; i < NUM_RX_CHAINS; i++) {
696 /* loops on all the bits of
697 * priv->hw_setting.valid_tx_ant */
698 u8 ant_msk = (1 << i);
699 if (!(priv->hw_params.valid_tx_ant & ant_msk))
700 continue;
701
702 num_tx_chains++;
703 if (data->disconn_array[i] == 0)
704 /* there is a Tx antenna connected */
705 break;
706 if (num_tx_chains == priv->hw_params.tx_chains_num &&
707 data->disconn_array[i]) {
708 /*
709 * If all chains are disconnected
710 * connect the first valid tx chain
711 */
712 first_chain =
713 iwl4965_find_first_chain(priv->cfg->valid_tx_ant);
714 data->disconn_array[first_chain] = 0;
715 active_chains |= BIT(first_chain);
716 IWL_DEBUG_CALIB(priv, "All Tx chains are disconnected \
717 W/A - declare %d as connected\n",
718 first_chain);
719 break;
720 }
721 }
722
723 if (active_chains != priv->hw_params.valid_rx_ant &&
724 active_chains != priv->chain_noise_data.active_chains)
725 IWL_DEBUG_CALIB(priv,
726 "Detected that not all antennas are connected! "
727 "Connected: %#x, valid: %#x.\n",
728 active_chains, priv->hw_params.valid_rx_ant);
729
730 /* Save for use within RXON, TX, SCAN commands, etc. */
731 data->active_chains = active_chains;
732 IWL_DEBUG_CALIB(priv, "active_chains (bitwise) = 0x%x\n",
733 active_chains);
734}
735
736static void iwl4965_gain_computation(struct iwl_priv *priv,
737 u32 *average_noise,
738 u16 min_average_noise_antenna_i,
739 u32 min_average_noise,
740 u8 default_chain)
741{
742 int i, ret;
743 struct iwl_chain_noise_data *data = &priv->chain_noise_data;
744
745 data->delta_gain_code[min_average_noise_antenna_i] = 0;
746
747 for (i = default_chain; i < NUM_RX_CHAINS; i++) {
748 s32 delta_g = 0;
749
750 if (!(data->disconn_array[i]) &&
751 (data->delta_gain_code[i] ==
752 CHAIN_NOISE_DELTA_GAIN_INIT_VAL)) {
753 delta_g = average_noise[i] - min_average_noise;
754 data->delta_gain_code[i] = (u8)((delta_g * 10) / 15);
755 data->delta_gain_code[i] =
756 min(data->delta_gain_code[i],
757 (u8) CHAIN_NOISE_MAX_DELTA_GAIN_CODE);
758
759 data->delta_gain_code[i] =
760 (data->delta_gain_code[i] | (1 << 2));
761 } else {
762 data->delta_gain_code[i] = 0;
763 }
764 }
765 IWL_DEBUG_CALIB(priv, "delta_gain_codes: a %d b %d c %d\n",
766 data->delta_gain_code[0],
767 data->delta_gain_code[1],
768 data->delta_gain_code[2]);
769
770 /* Differential gain gets sent to uCode only once */
771 if (!data->radio_write) {
772 struct iwl_calib_diff_gain_cmd cmd;
773 data->radio_write = 1;
774
775 memset(&cmd, 0, sizeof(cmd));
776 cmd.hdr.op_code = IWL_PHY_CALIBRATE_DIFF_GAIN_CMD;
777 cmd.diff_gain_a = data->delta_gain_code[0];
778 cmd.diff_gain_b = data->delta_gain_code[1];
779 cmd.diff_gain_c = data->delta_gain_code[2];
780 ret = iwl_legacy_send_cmd_pdu(priv, REPLY_PHY_CALIBRATION_CMD,
781 sizeof(cmd), &cmd);
782 if (ret)
783 IWL_DEBUG_CALIB(priv, "fail sending cmd "
784 "REPLY_PHY_CALIBRATION_CMD\n");
785
786 /* TODO we might want recalculate
787 * rx_chain in rxon cmd */
788
789 /* Mark so we run this algo only once! */
790 data->state = IWL_CHAIN_NOISE_CALIBRATED;
791 }
792}
793
794
795
796/*
797 * Accumulate 16 beacons of signal and noise statistics for each of
798 * 3 receivers/antennas/rx-chains, then figure out:
799 * 1) Which antennas are connected.
800 * 2) Differential rx gain settings to balance the 3 receivers.
801 */
802void iwl4965_chain_noise_calibration(struct iwl_priv *priv, void *stat_resp)
803{
804 struct iwl_chain_noise_data *data = NULL;
805
806 u32 chain_noise_a;
807 u32 chain_noise_b;
808 u32 chain_noise_c;
809 u32 chain_sig_a;
810 u32 chain_sig_b;
811 u32 chain_sig_c;
812 u32 average_sig[NUM_RX_CHAINS] = {INITIALIZATION_VALUE};
813 u32 average_noise[NUM_RX_CHAINS] = {INITIALIZATION_VALUE};
814 u32 min_average_noise = MIN_AVERAGE_NOISE_MAX_VALUE;
815 u16 min_average_noise_antenna_i = INITIALIZATION_VALUE;
816 u16 i = 0;
817 u16 rxon_chnum = INITIALIZATION_VALUE;
818 u16 stat_chnum = INITIALIZATION_VALUE;
819 u8 rxon_band24;
820 u8 stat_band24;
821 unsigned long flags;
822 struct statistics_rx_non_phy *rx_info;
823
824 struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
825
826 if (priv->disable_chain_noise_cal)
827 return;
828
829 data = &(priv->chain_noise_data);
830
831 /*
832 * Accumulate just the first "chain_noise_num_beacons" after
833 * the first association, then we're done forever.
834 */
835 if (data->state != IWL_CHAIN_NOISE_ACCUMULATE) {
836 if (data->state == IWL_CHAIN_NOISE_ALIVE)
837 IWL_DEBUG_CALIB(priv, "Wait for noise calib reset\n");
838 return;
839 }
840
841 spin_lock_irqsave(&priv->lock, flags);
842
843 rx_info = &(((struct iwl_notif_statistics *)stat_resp)->
844 rx.general);
845
846 if (rx_info->interference_data_flag != INTERFERENCE_DATA_AVAILABLE) {
847 IWL_DEBUG_CALIB(priv, " << Interference data unavailable\n");
848 spin_unlock_irqrestore(&priv->lock, flags);
849 return;
850 }
851
852 rxon_band24 = !!(ctx->staging.flags & RXON_FLG_BAND_24G_MSK);
853 rxon_chnum = le16_to_cpu(ctx->staging.channel);
854
855 stat_band24 = !!(((struct iwl_notif_statistics *)
856 stat_resp)->flag &
857 STATISTICS_REPLY_FLG_BAND_24G_MSK);
858 stat_chnum = le32_to_cpu(((struct iwl_notif_statistics *)
859 stat_resp)->flag) >> 16;
860
861 /* Make sure we accumulate data for just the associated channel
862 * (even if scanning). */
863 if ((rxon_chnum != stat_chnum) || (rxon_band24 != stat_band24)) {
864 IWL_DEBUG_CALIB(priv, "Stats not from chan=%d, band24=%d\n",
865 rxon_chnum, rxon_band24);
866 spin_unlock_irqrestore(&priv->lock, flags);
867 return;
868 }
869
870 /*
871 * Accumulate beacon statistics values across
872 * "chain_noise_num_beacons"
873 */
874 chain_noise_a = le32_to_cpu(rx_info->beacon_silence_rssi_a) &
875 IN_BAND_FILTER;
876 chain_noise_b = le32_to_cpu(rx_info->beacon_silence_rssi_b) &
877 IN_BAND_FILTER;
878 chain_noise_c = le32_to_cpu(rx_info->beacon_silence_rssi_c) &
879 IN_BAND_FILTER;
880
881 chain_sig_a = le32_to_cpu(rx_info->beacon_rssi_a) & IN_BAND_FILTER;
882 chain_sig_b = le32_to_cpu(rx_info->beacon_rssi_b) & IN_BAND_FILTER;
883 chain_sig_c = le32_to_cpu(rx_info->beacon_rssi_c) & IN_BAND_FILTER;
884
885 spin_unlock_irqrestore(&priv->lock, flags);
886
887 data->beacon_count++;
888
889 data->chain_noise_a = (chain_noise_a + data->chain_noise_a);
890 data->chain_noise_b = (chain_noise_b + data->chain_noise_b);
891 data->chain_noise_c = (chain_noise_c + data->chain_noise_c);
892
893 data->chain_signal_a = (chain_sig_a + data->chain_signal_a);
894 data->chain_signal_b = (chain_sig_b + data->chain_signal_b);
895 data->chain_signal_c = (chain_sig_c + data->chain_signal_c);
896
897 IWL_DEBUG_CALIB(priv, "chan=%d, band24=%d, beacon=%d\n",
898 rxon_chnum, rxon_band24, data->beacon_count);
899 IWL_DEBUG_CALIB(priv, "chain_sig: a %d b %d c %d\n",
900 chain_sig_a, chain_sig_b, chain_sig_c);
901 IWL_DEBUG_CALIB(priv, "chain_noise: a %d b %d c %d\n",
902 chain_noise_a, chain_noise_b, chain_noise_c);
903
904 /* If this is the "chain_noise_num_beacons", determine:
905 * 1) Disconnected antennas (using signal strengths)
906 * 2) Differential gain (using silence noise) to balance receivers */
907 if (data->beacon_count !=
908 priv->cfg->base_params->chain_noise_num_beacons)
909 return;
910
911 /* Analyze signal for disconnected antenna */
912 iwl4965_find_disconn_antenna(priv, average_sig, data);
913
914 /* Analyze noise for rx balance */
915 average_noise[0] = data->chain_noise_a /
916 priv->cfg->base_params->chain_noise_num_beacons;
917 average_noise[1] = data->chain_noise_b /
918 priv->cfg->base_params->chain_noise_num_beacons;
919 average_noise[2] = data->chain_noise_c /
920 priv->cfg->base_params->chain_noise_num_beacons;
921
922 for (i = 0; i < NUM_RX_CHAINS; i++) {
923 if (!(data->disconn_array[i]) &&
924 (average_noise[i] <= min_average_noise)) {
925 /* This means that chain i is active and has
926 * lower noise values so far: */
927 min_average_noise = average_noise[i];
928 min_average_noise_antenna_i = i;
929 }
930 }
931
932 IWL_DEBUG_CALIB(priv, "average_noise: a %d b %d c %d\n",
933 average_noise[0], average_noise[1],
934 average_noise[2]);
935
936 IWL_DEBUG_CALIB(priv, "min_average_noise = %d, antenna %d\n",
937 min_average_noise, min_average_noise_antenna_i);
938
939 iwl4965_gain_computation(priv, average_noise,
940 min_average_noise_antenna_i, min_average_noise,
941 iwl4965_find_first_chain(priv->cfg->valid_rx_ant));
942
943 /* Some power changes may have been made during the calibration.
944 * Update and commit the RXON
945 */
946 if (priv->cfg->ops->lib->update_chain_flags)
947 priv->cfg->ops->lib->update_chain_flags(priv);
948
949 data->state = IWL_CHAIN_NOISE_DONE;
950 iwl_legacy_power_update_mode(priv, false);
951}
952
953void iwl4965_reset_run_time_calib(struct iwl_priv *priv)
954{
955 int i;
956 memset(&(priv->sensitivity_data), 0,
957 sizeof(struct iwl_sensitivity_data));
958 memset(&(priv->chain_noise_data), 0,
959 sizeof(struct iwl_chain_noise_data));
960 for (i = 0; i < NUM_RX_CHAINS; i++)
961 priv->chain_noise_data.delta_gain_code[i] =
962 CHAIN_NOISE_DELTA_GAIN_INIT_VAL;
963
964 /* Ask for statistics now, the uCode will send notification
965 * periodically after association */
966 iwl_legacy_send_statistics_request(priv, CMD_ASYNC, true);
967}
diff --git a/drivers/net/wireless/iwlwifi/iwl-legacy.h b/drivers/net/wireless/iwlegacy/iwl-4965-calib.h
index 9f7b2f935964..f46c80e6e005 100644
--- a/drivers/net/wireless/iwlwifi/iwl-legacy.h
+++ b/drivers/net/wireless/iwlegacy/iwl-4965-calib.h
@@ -5,7 +5,7 @@
5 * 5 *
6 * GPL LICENSE SUMMARY 6 * GPL LICENSE SUMMARY
7 * 7 *
8 * Copyright(c) 2008 - 2010 Intel Corporation. All rights reserved. 8 * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
9 * 9 *
10 * This program is free software; you can redistribute it and/or modify 10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of version 2 of the GNU General Public License as 11 * it under the terms of version 2 of the GNU General Public License as
@@ -30,7 +30,7 @@
30 * 30 *
31 * BSD LICENSE 31 * BSD LICENSE
32 * 32 *
33 * Copyright(c) 2005 - 2010 Intel Corporation. All rights reserved. 33 * Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved.
34 * All rights reserved. 34 * All rights reserved.
35 * 35 *
36 * Redistribution and use in source and binary forms, with or without 36 * Redistribution and use in source and binary forms, with or without
@@ -59,21 +59,17 @@
59 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 59 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
60 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 60 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
61 *****************************************************************************/ 61 *****************************************************************************/
62#ifndef __iwl_4965_calib_h__
63#define __iwl_4965_calib_h__
62 64
63#ifndef __iwl_legacy_h__ 65#include "iwl-dev.h"
64#define __iwl_legacy_h__ 66#include "iwl-core.h"
67#include "iwl-commands.h"
65 68
66/* mac80211 handlers */ 69void iwl4965_chain_noise_calibration(struct iwl_priv *priv, void *stat_resp);
67int iwl_legacy_mac_config(struct ieee80211_hw *hw, u32 changed); 70void iwl4965_sensitivity_calibration(struct iwl_priv *priv, void *resp);
68void iwl_legacy_mac_reset_tsf(struct ieee80211_hw *hw); 71void iwl4965_init_sensitivity(struct iwl_priv *priv);
69void iwl_legacy_mac_bss_info_changed(struct ieee80211_hw *hw, 72void iwl4965_reset_run_time_calib(struct iwl_priv *priv);
70 struct ieee80211_vif *vif, 73void iwl4965_calib_free_results(struct iwl_priv *priv);
71 struct ieee80211_bss_conf *bss_conf,
72 u32 changes);
73void iwl_legacy_tx_cmd_protection(struct iwl_priv *priv,
74 struct ieee80211_tx_info *info,
75 __le16 fc, __le32 *tx_flags);
76 74
77irqreturn_t iwl_isr_legacy(int irq, void *data); 75#endif /* __iwl_4965_calib_h__ */
78
79#endif /* __iwl_legacy_h__ */
diff --git a/drivers/net/wireless/iwlegacy/iwl-4965-debugfs.c b/drivers/net/wireless/iwlegacy/iwl-4965-debugfs.c
new file mode 100644
index 000000000000..1c93665766e4
--- /dev/null
+++ b/drivers/net/wireless/iwlegacy/iwl-4965-debugfs.c
@@ -0,0 +1,774 @@
1/******************************************************************************
2*
3* GPL LICENSE SUMMARY
4*
5* Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
6*
7* This program is free software; you can redistribute it and/or modify
8* it under the terms of version 2 of the GNU General Public License as
9* published by the Free Software Foundation.
10*
11* This program is distributed in the hope that it will be useful, but
12* WITHOUT ANY WARRANTY; without even the implied warranty of
13* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14* General Public License for more details.
15*
16* You should have received a copy of the GNU General Public License
17* along with this program; if not, write to the Free Software
18* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
19* USA
20*
21* The full GNU General Public License is included in this distribution
22* in the file called LICENSE.GPL.
23*
24* Contact Information:
25* Intel Linux Wireless <ilw@linux.intel.com>
26* Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
27*****************************************************************************/
28#include "iwl-4965.h"
29#include "iwl-4965-debugfs.h"
30
31static const char *fmt_value = " %-30s %10u\n";
32static const char *fmt_table = " %-30s %10u %10u %10u %10u\n";
33static const char *fmt_header =
34 "%-32s current cumulative delta max\n";
35
36static int iwl4965_statistics_flag(struct iwl_priv *priv, char *buf, int bufsz)
37{
38 int p = 0;
39 u32 flag;
40
41 flag = le32_to_cpu(priv->_4965.statistics.flag);
42
43 p += scnprintf(buf + p, bufsz - p, "Statistics Flag(0x%X):\n", flag);
44 if (flag & UCODE_STATISTICS_CLEAR_MSK)
45 p += scnprintf(buf + p, bufsz - p,
46 "\tStatistics have been cleared\n");
47 p += scnprintf(buf + p, bufsz - p, "\tOperational Frequency: %s\n",
48 (flag & UCODE_STATISTICS_FREQUENCY_MSK)
49 ? "2.4 GHz" : "5.2 GHz");
50 p += scnprintf(buf + p, bufsz - p, "\tTGj Narrow Band: %s\n",
51 (flag & UCODE_STATISTICS_NARROW_BAND_MSK)
52 ? "enabled" : "disabled");
53
54 return p;
55}
56
57ssize_t iwl4965_ucode_rx_stats_read(struct file *file, char __user *user_buf,
58 size_t count, loff_t *ppos)
59{
60 struct iwl_priv *priv = file->private_data;
61 int pos = 0;
62 char *buf;
63 int bufsz = sizeof(struct statistics_rx_phy) * 40 +
64 sizeof(struct statistics_rx_non_phy) * 40 +
65 sizeof(struct statistics_rx_ht_phy) * 40 + 400;
66 ssize_t ret;
67 struct statistics_rx_phy *ofdm, *accum_ofdm, *delta_ofdm, *max_ofdm;
68 struct statistics_rx_phy *cck, *accum_cck, *delta_cck, *max_cck;
69 struct statistics_rx_non_phy *general, *accum_general;
70 struct statistics_rx_non_phy *delta_general, *max_general;
71 struct statistics_rx_ht_phy *ht, *accum_ht, *delta_ht, *max_ht;
72
73 if (!iwl_legacy_is_alive(priv))
74 return -EAGAIN;
75
76 buf = kzalloc(bufsz, GFP_KERNEL);
77 if (!buf) {
78 IWL_ERR(priv, "Can not allocate Buffer\n");
79 return -ENOMEM;
80 }
81
82 /*
83 * the statistic information display here is based on
84 * the last statistics notification from uCode
85 * might not reflect the current uCode activity
86 */
87 ofdm = &priv->_4965.statistics.rx.ofdm;
88 cck = &priv->_4965.statistics.rx.cck;
89 general = &priv->_4965.statistics.rx.general;
90 ht = &priv->_4965.statistics.rx.ofdm_ht;
91 accum_ofdm = &priv->_4965.accum_statistics.rx.ofdm;
92 accum_cck = &priv->_4965.accum_statistics.rx.cck;
93 accum_general = &priv->_4965.accum_statistics.rx.general;
94 accum_ht = &priv->_4965.accum_statistics.rx.ofdm_ht;
95 delta_ofdm = &priv->_4965.delta_statistics.rx.ofdm;
96 delta_cck = &priv->_4965.delta_statistics.rx.cck;
97 delta_general = &priv->_4965.delta_statistics.rx.general;
98 delta_ht = &priv->_4965.delta_statistics.rx.ofdm_ht;
99 max_ofdm = &priv->_4965.max_delta.rx.ofdm;
100 max_cck = &priv->_4965.max_delta.rx.cck;
101 max_general = &priv->_4965.max_delta.rx.general;
102 max_ht = &priv->_4965.max_delta.rx.ofdm_ht;
103
104 pos += iwl4965_statistics_flag(priv, buf, bufsz);
105 pos += scnprintf(buf + pos, bufsz - pos,
106 fmt_header, "Statistics_Rx - OFDM:");
107 pos += scnprintf(buf + pos, bufsz - pos,
108 fmt_table, "ina_cnt:",
109 le32_to_cpu(ofdm->ina_cnt),
110 accum_ofdm->ina_cnt,
111 delta_ofdm->ina_cnt, max_ofdm->ina_cnt);
112 pos += scnprintf(buf + pos, bufsz - pos,
113 fmt_table, "fina_cnt:",
114 le32_to_cpu(ofdm->fina_cnt), accum_ofdm->fina_cnt,
115 delta_ofdm->fina_cnt, max_ofdm->fina_cnt);
116 pos += scnprintf(buf + pos, bufsz - pos,
117 fmt_table, "plcp_err:",
118 le32_to_cpu(ofdm->plcp_err), accum_ofdm->plcp_err,
119 delta_ofdm->plcp_err, max_ofdm->plcp_err);
120 pos += scnprintf(buf + pos, bufsz - pos,
121 fmt_table, "crc32_err:",
122 le32_to_cpu(ofdm->crc32_err), accum_ofdm->crc32_err,
123 delta_ofdm->crc32_err, max_ofdm->crc32_err);
124 pos += scnprintf(buf + pos, bufsz - pos,
125 fmt_table, "overrun_err:",
126 le32_to_cpu(ofdm->overrun_err),
127 accum_ofdm->overrun_err, delta_ofdm->overrun_err,
128 max_ofdm->overrun_err);
129 pos += scnprintf(buf + pos, bufsz - pos,
130 fmt_table, "early_overrun_err:",
131 le32_to_cpu(ofdm->early_overrun_err),
132 accum_ofdm->early_overrun_err,
133 delta_ofdm->early_overrun_err,
134 max_ofdm->early_overrun_err);
135 pos += scnprintf(buf + pos, bufsz - pos,
136 fmt_table, "crc32_good:",
137 le32_to_cpu(ofdm->crc32_good),
138 accum_ofdm->crc32_good, delta_ofdm->crc32_good,
139 max_ofdm->crc32_good);
140 pos += scnprintf(buf + pos, bufsz - pos,
141 fmt_table, "false_alarm_cnt:",
142 le32_to_cpu(ofdm->false_alarm_cnt),
143 accum_ofdm->false_alarm_cnt,
144 delta_ofdm->false_alarm_cnt,
145 max_ofdm->false_alarm_cnt);
146 pos += scnprintf(buf + pos, bufsz - pos,
147 fmt_table, "fina_sync_err_cnt:",
148 le32_to_cpu(ofdm->fina_sync_err_cnt),
149 accum_ofdm->fina_sync_err_cnt,
150 delta_ofdm->fina_sync_err_cnt,
151 max_ofdm->fina_sync_err_cnt);
152 pos += scnprintf(buf + pos, bufsz - pos,
153 fmt_table, "sfd_timeout:",
154 le32_to_cpu(ofdm->sfd_timeout),
155 accum_ofdm->sfd_timeout, delta_ofdm->sfd_timeout,
156 max_ofdm->sfd_timeout);
157 pos += scnprintf(buf + pos, bufsz - pos,
158 fmt_table, "fina_timeout:",
159 le32_to_cpu(ofdm->fina_timeout),
160 accum_ofdm->fina_timeout, delta_ofdm->fina_timeout,
161 max_ofdm->fina_timeout);
162 pos += scnprintf(buf + pos, bufsz - pos,
163 fmt_table, "unresponded_rts:",
164 le32_to_cpu(ofdm->unresponded_rts),
165 accum_ofdm->unresponded_rts,
166 delta_ofdm->unresponded_rts,
167 max_ofdm->unresponded_rts);
168 pos += scnprintf(buf + pos, bufsz - pos,
169 fmt_table, "rxe_frame_lmt_ovrun:",
170 le32_to_cpu(ofdm->rxe_frame_limit_overrun),
171 accum_ofdm->rxe_frame_limit_overrun,
172 delta_ofdm->rxe_frame_limit_overrun,
173 max_ofdm->rxe_frame_limit_overrun);
174 pos += scnprintf(buf + pos, bufsz - pos,
175 fmt_table, "sent_ack_cnt:",
176 le32_to_cpu(ofdm->sent_ack_cnt),
177 accum_ofdm->sent_ack_cnt, delta_ofdm->sent_ack_cnt,
178 max_ofdm->sent_ack_cnt);
179 pos += scnprintf(buf + pos, bufsz - pos,
180 fmt_table, "sent_cts_cnt:",
181 le32_to_cpu(ofdm->sent_cts_cnt),
182 accum_ofdm->sent_cts_cnt, delta_ofdm->sent_cts_cnt,
183 max_ofdm->sent_cts_cnt);
184 pos += scnprintf(buf + pos, bufsz - pos,
185 fmt_table, "sent_ba_rsp_cnt:",
186 le32_to_cpu(ofdm->sent_ba_rsp_cnt),
187 accum_ofdm->sent_ba_rsp_cnt,
188 delta_ofdm->sent_ba_rsp_cnt,
189 max_ofdm->sent_ba_rsp_cnt);
190 pos += scnprintf(buf + pos, bufsz - pos,
191 fmt_table, "dsp_self_kill:",
192 le32_to_cpu(ofdm->dsp_self_kill),
193 accum_ofdm->dsp_self_kill,
194 delta_ofdm->dsp_self_kill,
195 max_ofdm->dsp_self_kill);
196 pos += scnprintf(buf + pos, bufsz - pos,
197 fmt_table, "mh_format_err:",
198 le32_to_cpu(ofdm->mh_format_err),
199 accum_ofdm->mh_format_err,
200 delta_ofdm->mh_format_err,
201 max_ofdm->mh_format_err);
202 pos += scnprintf(buf + pos, bufsz - pos,
203 fmt_table, "re_acq_main_rssi_sum:",
204 le32_to_cpu(ofdm->re_acq_main_rssi_sum),
205 accum_ofdm->re_acq_main_rssi_sum,
206 delta_ofdm->re_acq_main_rssi_sum,
207 max_ofdm->re_acq_main_rssi_sum);
208
209 pos += scnprintf(buf + pos, bufsz - pos,
210 fmt_header, "Statistics_Rx - CCK:");
211 pos += scnprintf(buf + pos, bufsz - pos,
212 fmt_table, "ina_cnt:",
213 le32_to_cpu(cck->ina_cnt), accum_cck->ina_cnt,
214 delta_cck->ina_cnt, max_cck->ina_cnt);
215 pos += scnprintf(buf + pos, bufsz - pos,
216 fmt_table, "fina_cnt:",
217 le32_to_cpu(cck->fina_cnt), accum_cck->fina_cnt,
218 delta_cck->fina_cnt, max_cck->fina_cnt);
219 pos += scnprintf(buf + pos, bufsz - pos,
220 fmt_table, "plcp_err:",
221 le32_to_cpu(cck->plcp_err), accum_cck->plcp_err,
222 delta_cck->plcp_err, max_cck->plcp_err);
223 pos += scnprintf(buf + pos, bufsz - pos,
224 fmt_table, "crc32_err:",
225 le32_to_cpu(cck->crc32_err), accum_cck->crc32_err,
226 delta_cck->crc32_err, max_cck->crc32_err);
227 pos += scnprintf(buf + pos, bufsz - pos,
228 fmt_table, "overrun_err:",
229 le32_to_cpu(cck->overrun_err),
230 accum_cck->overrun_err, delta_cck->overrun_err,
231 max_cck->overrun_err);
232 pos += scnprintf(buf + pos, bufsz - pos,
233 fmt_table, "early_overrun_err:",
234 le32_to_cpu(cck->early_overrun_err),
235 accum_cck->early_overrun_err,
236 delta_cck->early_overrun_err,
237 max_cck->early_overrun_err);
238 pos += scnprintf(buf + pos, bufsz - pos,
239 fmt_table, "crc32_good:",
240 le32_to_cpu(cck->crc32_good), accum_cck->crc32_good,
241 delta_cck->crc32_good, max_cck->crc32_good);
242 pos += scnprintf(buf + pos, bufsz - pos,
243 fmt_table, "false_alarm_cnt:",
244 le32_to_cpu(cck->false_alarm_cnt),
245 accum_cck->false_alarm_cnt,
246 delta_cck->false_alarm_cnt, max_cck->false_alarm_cnt);
247 pos += scnprintf(buf + pos, bufsz - pos,
248 fmt_table, "fina_sync_err_cnt:",
249 le32_to_cpu(cck->fina_sync_err_cnt),
250 accum_cck->fina_sync_err_cnt,
251 delta_cck->fina_sync_err_cnt,
252 max_cck->fina_sync_err_cnt);
253 pos += scnprintf(buf + pos, bufsz - pos,
254 fmt_table, "sfd_timeout:",
255 le32_to_cpu(cck->sfd_timeout),
256 accum_cck->sfd_timeout, delta_cck->sfd_timeout,
257 max_cck->sfd_timeout);
258 pos += scnprintf(buf + pos, bufsz - pos,
259 fmt_table, "fina_timeout:",
260 le32_to_cpu(cck->fina_timeout),
261 accum_cck->fina_timeout, delta_cck->fina_timeout,
262 max_cck->fina_timeout);
263 pos += scnprintf(buf + pos, bufsz - pos,
264 fmt_table, "unresponded_rts:",
265 le32_to_cpu(cck->unresponded_rts),
266 accum_cck->unresponded_rts, delta_cck->unresponded_rts,
267 max_cck->unresponded_rts);
268 pos += scnprintf(buf + pos, bufsz - pos,
269 fmt_table, "rxe_frame_lmt_ovrun:",
270 le32_to_cpu(cck->rxe_frame_limit_overrun),
271 accum_cck->rxe_frame_limit_overrun,
272 delta_cck->rxe_frame_limit_overrun,
273 max_cck->rxe_frame_limit_overrun);
274 pos += scnprintf(buf + pos, bufsz - pos,
275 fmt_table, "sent_ack_cnt:",
276 le32_to_cpu(cck->sent_ack_cnt),
277 accum_cck->sent_ack_cnt, delta_cck->sent_ack_cnt,
278 max_cck->sent_ack_cnt);
279 pos += scnprintf(buf + pos, bufsz - pos,
280 fmt_table, "sent_cts_cnt:",
281 le32_to_cpu(cck->sent_cts_cnt),
282 accum_cck->sent_cts_cnt, delta_cck->sent_cts_cnt,
283 max_cck->sent_cts_cnt);
284 pos += scnprintf(buf + pos, bufsz - pos,
285 fmt_table, "sent_ba_rsp_cnt:",
286 le32_to_cpu(cck->sent_ba_rsp_cnt),
287 accum_cck->sent_ba_rsp_cnt,
288 delta_cck->sent_ba_rsp_cnt,
289 max_cck->sent_ba_rsp_cnt);
290 pos += scnprintf(buf + pos, bufsz - pos,
291 fmt_table, "dsp_self_kill:",
292 le32_to_cpu(cck->dsp_self_kill),
293 accum_cck->dsp_self_kill, delta_cck->dsp_self_kill,
294 max_cck->dsp_self_kill);
295 pos += scnprintf(buf + pos, bufsz - pos,
296 fmt_table, "mh_format_err:",
297 le32_to_cpu(cck->mh_format_err),
298 accum_cck->mh_format_err, delta_cck->mh_format_err,
299 max_cck->mh_format_err);
300 pos += scnprintf(buf + pos, bufsz - pos,
301 fmt_table, "re_acq_main_rssi_sum:",
302 le32_to_cpu(cck->re_acq_main_rssi_sum),
303 accum_cck->re_acq_main_rssi_sum,
304 delta_cck->re_acq_main_rssi_sum,
305 max_cck->re_acq_main_rssi_sum);
306
307 pos += scnprintf(buf + pos, bufsz - pos,
308 fmt_header, "Statistics_Rx - GENERAL:");
309 pos += scnprintf(buf + pos, bufsz - pos,
310 fmt_table, "bogus_cts:",
311 le32_to_cpu(general->bogus_cts),
312 accum_general->bogus_cts, delta_general->bogus_cts,
313 max_general->bogus_cts);
314 pos += scnprintf(buf + pos, bufsz - pos,
315 fmt_table, "bogus_ack:",
316 le32_to_cpu(general->bogus_ack),
317 accum_general->bogus_ack, delta_general->bogus_ack,
318 max_general->bogus_ack);
319 pos += scnprintf(buf + pos, bufsz - pos,
320 fmt_table, "non_bssid_frames:",
321 le32_to_cpu(general->non_bssid_frames),
322 accum_general->non_bssid_frames,
323 delta_general->non_bssid_frames,
324 max_general->non_bssid_frames);
325 pos += scnprintf(buf + pos, bufsz - pos,
326 fmt_table, "filtered_frames:",
327 le32_to_cpu(general->filtered_frames),
328 accum_general->filtered_frames,
329 delta_general->filtered_frames,
330 max_general->filtered_frames);
331 pos += scnprintf(buf + pos, bufsz - pos,
332 fmt_table, "non_channel_beacons:",
333 le32_to_cpu(general->non_channel_beacons),
334 accum_general->non_channel_beacons,
335 delta_general->non_channel_beacons,
336 max_general->non_channel_beacons);
337 pos += scnprintf(buf + pos, bufsz - pos,
338 fmt_table, "channel_beacons:",
339 le32_to_cpu(general->channel_beacons),
340 accum_general->channel_beacons,
341 delta_general->channel_beacons,
342 max_general->channel_beacons);
343 pos += scnprintf(buf + pos, bufsz - pos,
344 fmt_table, "num_missed_bcon:",
345 le32_to_cpu(general->num_missed_bcon),
346 accum_general->num_missed_bcon,
347 delta_general->num_missed_bcon,
348 max_general->num_missed_bcon);
349 pos += scnprintf(buf + pos, bufsz - pos,
350 fmt_table, "adc_rx_saturation_time:",
351 le32_to_cpu(general->adc_rx_saturation_time),
352 accum_general->adc_rx_saturation_time,
353 delta_general->adc_rx_saturation_time,
354 max_general->adc_rx_saturation_time);
355 pos += scnprintf(buf + pos, bufsz - pos,
356 fmt_table, "ina_detect_search_tm:",
357 le32_to_cpu(general->ina_detection_search_time),
358 accum_general->ina_detection_search_time,
359 delta_general->ina_detection_search_time,
360 max_general->ina_detection_search_time);
361 pos += scnprintf(buf + pos, bufsz - pos,
362 fmt_table, "beacon_silence_rssi_a:",
363 le32_to_cpu(general->beacon_silence_rssi_a),
364 accum_general->beacon_silence_rssi_a,
365 delta_general->beacon_silence_rssi_a,
366 max_general->beacon_silence_rssi_a);
367 pos += scnprintf(buf + pos, bufsz - pos,
368 fmt_table, "beacon_silence_rssi_b:",
369 le32_to_cpu(general->beacon_silence_rssi_b),
370 accum_general->beacon_silence_rssi_b,
371 delta_general->beacon_silence_rssi_b,
372 max_general->beacon_silence_rssi_b);
373 pos += scnprintf(buf + pos, bufsz - pos,
374 fmt_table, "beacon_silence_rssi_c:",
375 le32_to_cpu(general->beacon_silence_rssi_c),
376 accum_general->beacon_silence_rssi_c,
377 delta_general->beacon_silence_rssi_c,
378 max_general->beacon_silence_rssi_c);
379 pos += scnprintf(buf + pos, bufsz - pos,
380 fmt_table, "interference_data_flag:",
381 le32_to_cpu(general->interference_data_flag),
382 accum_general->interference_data_flag,
383 delta_general->interference_data_flag,
384 max_general->interference_data_flag);
385 pos += scnprintf(buf + pos, bufsz - pos,
386 fmt_table, "channel_load:",
387 le32_to_cpu(general->channel_load),
388 accum_general->channel_load,
389 delta_general->channel_load,
390 max_general->channel_load);
391 pos += scnprintf(buf + pos, bufsz - pos,
392 fmt_table, "dsp_false_alarms:",
393 le32_to_cpu(general->dsp_false_alarms),
394 accum_general->dsp_false_alarms,
395 delta_general->dsp_false_alarms,
396 max_general->dsp_false_alarms);
397 pos += scnprintf(buf + pos, bufsz - pos,
398 fmt_table, "beacon_rssi_a:",
399 le32_to_cpu(general->beacon_rssi_a),
400 accum_general->beacon_rssi_a,
401 delta_general->beacon_rssi_a,
402 max_general->beacon_rssi_a);
403 pos += scnprintf(buf + pos, bufsz - pos,
404 fmt_table, "beacon_rssi_b:",
405 le32_to_cpu(general->beacon_rssi_b),
406 accum_general->beacon_rssi_b,
407 delta_general->beacon_rssi_b,
408 max_general->beacon_rssi_b);
409 pos += scnprintf(buf + pos, bufsz - pos,
410 fmt_table, "beacon_rssi_c:",
411 le32_to_cpu(general->beacon_rssi_c),
412 accum_general->beacon_rssi_c,
413 delta_general->beacon_rssi_c,
414 max_general->beacon_rssi_c);
415 pos += scnprintf(buf + pos, bufsz - pos,
416 fmt_table, "beacon_energy_a:",
417 le32_to_cpu(general->beacon_energy_a),
418 accum_general->beacon_energy_a,
419 delta_general->beacon_energy_a,
420 max_general->beacon_energy_a);
421 pos += scnprintf(buf + pos, bufsz - pos,
422 fmt_table, "beacon_energy_b:",
423 le32_to_cpu(general->beacon_energy_b),
424 accum_general->beacon_energy_b,
425 delta_general->beacon_energy_b,
426 max_general->beacon_energy_b);
427 pos += scnprintf(buf + pos, bufsz - pos,
428 fmt_table, "beacon_energy_c:",
429 le32_to_cpu(general->beacon_energy_c),
430 accum_general->beacon_energy_c,
431 delta_general->beacon_energy_c,
432 max_general->beacon_energy_c);
433
434 pos += scnprintf(buf + pos, bufsz - pos,
435 fmt_header, "Statistics_Rx - OFDM_HT:");
436 pos += scnprintf(buf + pos, bufsz - pos,
437 fmt_table, "plcp_err:",
438 le32_to_cpu(ht->plcp_err), accum_ht->plcp_err,
439 delta_ht->plcp_err, max_ht->plcp_err);
440 pos += scnprintf(buf + pos, bufsz - pos,
441 fmt_table, "overrun_err:",
442 le32_to_cpu(ht->overrun_err), accum_ht->overrun_err,
443 delta_ht->overrun_err, max_ht->overrun_err);
444 pos += scnprintf(buf + pos, bufsz - pos,
445 fmt_table, "early_overrun_err:",
446 le32_to_cpu(ht->early_overrun_err),
447 accum_ht->early_overrun_err,
448 delta_ht->early_overrun_err,
449 max_ht->early_overrun_err);
450 pos += scnprintf(buf + pos, bufsz - pos,
451 fmt_table, "crc32_good:",
452 le32_to_cpu(ht->crc32_good), accum_ht->crc32_good,
453 delta_ht->crc32_good, max_ht->crc32_good);
454 pos += scnprintf(buf + pos, bufsz - pos,
455 fmt_table, "crc32_err:",
456 le32_to_cpu(ht->crc32_err), accum_ht->crc32_err,
457 delta_ht->crc32_err, max_ht->crc32_err);
458 pos += scnprintf(buf + pos, bufsz - pos,
459 fmt_table, "mh_format_err:",
460 le32_to_cpu(ht->mh_format_err),
461 accum_ht->mh_format_err,
462 delta_ht->mh_format_err, max_ht->mh_format_err);
463 pos += scnprintf(buf + pos, bufsz - pos,
464 fmt_table, "agg_crc32_good:",
465 le32_to_cpu(ht->agg_crc32_good),
466 accum_ht->agg_crc32_good,
467 delta_ht->agg_crc32_good, max_ht->agg_crc32_good);
468 pos += scnprintf(buf + pos, bufsz - pos,
469 fmt_table, "agg_mpdu_cnt:",
470 le32_to_cpu(ht->agg_mpdu_cnt),
471 accum_ht->agg_mpdu_cnt,
472 delta_ht->agg_mpdu_cnt, max_ht->agg_mpdu_cnt);
473 pos += scnprintf(buf + pos, bufsz - pos,
474 fmt_table, "agg_cnt:",
475 le32_to_cpu(ht->agg_cnt), accum_ht->agg_cnt,
476 delta_ht->agg_cnt, max_ht->agg_cnt);
477 pos += scnprintf(buf + pos, bufsz - pos,
478 fmt_table, "unsupport_mcs:",
479 le32_to_cpu(ht->unsupport_mcs),
480 accum_ht->unsupport_mcs,
481 delta_ht->unsupport_mcs, max_ht->unsupport_mcs);
482
483 ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
484 kfree(buf);
485 return ret;
486}
487
488ssize_t iwl4965_ucode_tx_stats_read(struct file *file,
489 char __user *user_buf,
490 size_t count, loff_t *ppos)
491{
492 struct iwl_priv *priv = file->private_data;
493 int pos = 0;
494 char *buf;
495 int bufsz = (sizeof(struct statistics_tx) * 48) + 250;
496 ssize_t ret;
497 struct statistics_tx *tx, *accum_tx, *delta_tx, *max_tx;
498
499 if (!iwl_legacy_is_alive(priv))
500 return -EAGAIN;
501
502 buf = kzalloc(bufsz, GFP_KERNEL);
503 if (!buf) {
504 IWL_ERR(priv, "Can not allocate Buffer\n");
505 return -ENOMEM;
506 }
507
508 /* the statistic information display here is based on
509 * the last statistics notification from uCode
510 * might not reflect the current uCode activity
511 */
512 tx = &priv->_4965.statistics.tx;
513 accum_tx = &priv->_4965.accum_statistics.tx;
514 delta_tx = &priv->_4965.delta_statistics.tx;
515 max_tx = &priv->_4965.max_delta.tx;
516
517 pos += iwl4965_statistics_flag(priv, buf, bufsz);
518 pos += scnprintf(buf + pos, bufsz - pos,
519 fmt_header, "Statistics_Tx:");
520 pos += scnprintf(buf + pos, bufsz - pos,
521 fmt_table, "preamble:",
522 le32_to_cpu(tx->preamble_cnt),
523 accum_tx->preamble_cnt,
524 delta_tx->preamble_cnt, max_tx->preamble_cnt);
525 pos += scnprintf(buf + pos, bufsz - pos,
526 fmt_table, "rx_detected_cnt:",
527 le32_to_cpu(tx->rx_detected_cnt),
528 accum_tx->rx_detected_cnt,
529 delta_tx->rx_detected_cnt, max_tx->rx_detected_cnt);
530 pos += scnprintf(buf + pos, bufsz - pos,
531 fmt_table, "bt_prio_defer_cnt:",
532 le32_to_cpu(tx->bt_prio_defer_cnt),
533 accum_tx->bt_prio_defer_cnt,
534 delta_tx->bt_prio_defer_cnt,
535 max_tx->bt_prio_defer_cnt);
536 pos += scnprintf(buf + pos, bufsz - pos,
537 fmt_table, "bt_prio_kill_cnt:",
538 le32_to_cpu(tx->bt_prio_kill_cnt),
539 accum_tx->bt_prio_kill_cnt,
540 delta_tx->bt_prio_kill_cnt,
541 max_tx->bt_prio_kill_cnt);
542 pos += scnprintf(buf + pos, bufsz - pos,
543 fmt_table, "few_bytes_cnt:",
544 le32_to_cpu(tx->few_bytes_cnt),
545 accum_tx->few_bytes_cnt,
546 delta_tx->few_bytes_cnt, max_tx->few_bytes_cnt);
547 pos += scnprintf(buf + pos, bufsz - pos,
548 fmt_table, "cts_timeout:",
549 le32_to_cpu(tx->cts_timeout), accum_tx->cts_timeout,
550 delta_tx->cts_timeout, max_tx->cts_timeout);
551 pos += scnprintf(buf + pos, bufsz - pos,
552 fmt_table, "ack_timeout:",
553 le32_to_cpu(tx->ack_timeout),
554 accum_tx->ack_timeout,
555 delta_tx->ack_timeout, max_tx->ack_timeout);
556 pos += scnprintf(buf + pos, bufsz - pos,
557 fmt_table, "expected_ack_cnt:",
558 le32_to_cpu(tx->expected_ack_cnt),
559 accum_tx->expected_ack_cnt,
560 delta_tx->expected_ack_cnt,
561 max_tx->expected_ack_cnt);
562 pos += scnprintf(buf + pos, bufsz - pos,
563 fmt_table, "actual_ack_cnt:",
564 le32_to_cpu(tx->actual_ack_cnt),
565 accum_tx->actual_ack_cnt,
566 delta_tx->actual_ack_cnt,
567 max_tx->actual_ack_cnt);
568 pos += scnprintf(buf + pos, bufsz - pos,
569 fmt_table, "dump_msdu_cnt:",
570 le32_to_cpu(tx->dump_msdu_cnt),
571 accum_tx->dump_msdu_cnt,
572 delta_tx->dump_msdu_cnt,
573 max_tx->dump_msdu_cnt);
574 pos += scnprintf(buf + pos, bufsz - pos,
575 fmt_table, "abort_nxt_frame_mismatch:",
576 le32_to_cpu(tx->burst_abort_next_frame_mismatch_cnt),
577 accum_tx->burst_abort_next_frame_mismatch_cnt,
578 delta_tx->burst_abort_next_frame_mismatch_cnt,
579 max_tx->burst_abort_next_frame_mismatch_cnt);
580 pos += scnprintf(buf + pos, bufsz - pos,
581 fmt_table, "abort_missing_nxt_frame:",
582 le32_to_cpu(tx->burst_abort_missing_next_frame_cnt),
583 accum_tx->burst_abort_missing_next_frame_cnt,
584 delta_tx->burst_abort_missing_next_frame_cnt,
585 max_tx->burst_abort_missing_next_frame_cnt);
586 pos += scnprintf(buf + pos, bufsz - pos,
587 fmt_table, "cts_timeout_collision:",
588 le32_to_cpu(tx->cts_timeout_collision),
589 accum_tx->cts_timeout_collision,
590 delta_tx->cts_timeout_collision,
591 max_tx->cts_timeout_collision);
592 pos += scnprintf(buf + pos, bufsz - pos,
593 fmt_table, "ack_ba_timeout_collision:",
594 le32_to_cpu(tx->ack_or_ba_timeout_collision),
595 accum_tx->ack_or_ba_timeout_collision,
596 delta_tx->ack_or_ba_timeout_collision,
597 max_tx->ack_or_ba_timeout_collision);
598 pos += scnprintf(buf + pos, bufsz - pos,
599 fmt_table, "agg ba_timeout:",
600 le32_to_cpu(tx->agg.ba_timeout),
601 accum_tx->agg.ba_timeout,
602 delta_tx->agg.ba_timeout,
603 max_tx->agg.ba_timeout);
604 pos += scnprintf(buf + pos, bufsz - pos,
605 fmt_table, "agg ba_resched_frames:",
606 le32_to_cpu(tx->agg.ba_reschedule_frames),
607 accum_tx->agg.ba_reschedule_frames,
608 delta_tx->agg.ba_reschedule_frames,
609 max_tx->agg.ba_reschedule_frames);
610 pos += scnprintf(buf + pos, bufsz - pos,
611 fmt_table, "agg scd_query_agg_frame:",
612 le32_to_cpu(tx->agg.scd_query_agg_frame_cnt),
613 accum_tx->agg.scd_query_agg_frame_cnt,
614 delta_tx->agg.scd_query_agg_frame_cnt,
615 max_tx->agg.scd_query_agg_frame_cnt);
616 pos += scnprintf(buf + pos, bufsz - pos,
617 fmt_table, "agg scd_query_no_agg:",
618 le32_to_cpu(tx->agg.scd_query_no_agg),
619 accum_tx->agg.scd_query_no_agg,
620 delta_tx->agg.scd_query_no_agg,
621 max_tx->agg.scd_query_no_agg);
622 pos += scnprintf(buf + pos, bufsz - pos,
623 fmt_table, "agg scd_query_agg:",
624 le32_to_cpu(tx->agg.scd_query_agg),
625 accum_tx->agg.scd_query_agg,
626 delta_tx->agg.scd_query_agg,
627 max_tx->agg.scd_query_agg);
628 pos += scnprintf(buf + pos, bufsz - pos,
629 fmt_table, "agg scd_query_mismatch:",
630 le32_to_cpu(tx->agg.scd_query_mismatch),
631 accum_tx->agg.scd_query_mismatch,
632 delta_tx->agg.scd_query_mismatch,
633 max_tx->agg.scd_query_mismatch);
634 pos += scnprintf(buf + pos, bufsz - pos,
635 fmt_table, "agg frame_not_ready:",
636 le32_to_cpu(tx->agg.frame_not_ready),
637 accum_tx->agg.frame_not_ready,
638 delta_tx->agg.frame_not_ready,
639 max_tx->agg.frame_not_ready);
640 pos += scnprintf(buf + pos, bufsz - pos,
641 fmt_table, "agg underrun:",
642 le32_to_cpu(tx->agg.underrun),
643 accum_tx->agg.underrun,
644 delta_tx->agg.underrun, max_tx->agg.underrun);
645 pos += scnprintf(buf + pos, bufsz - pos,
646 fmt_table, "agg bt_prio_kill:",
647 le32_to_cpu(tx->agg.bt_prio_kill),
648 accum_tx->agg.bt_prio_kill,
649 delta_tx->agg.bt_prio_kill,
650 max_tx->agg.bt_prio_kill);
651 pos += scnprintf(buf + pos, bufsz - pos,
652 fmt_table, "agg rx_ba_rsp_cnt:",
653 le32_to_cpu(tx->agg.rx_ba_rsp_cnt),
654 accum_tx->agg.rx_ba_rsp_cnt,
655 delta_tx->agg.rx_ba_rsp_cnt,
656 max_tx->agg.rx_ba_rsp_cnt);
657
658 ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
659 kfree(buf);
660 return ret;
661}
662
663ssize_t
664iwl4965_ucode_general_stats_read(struct file *file, char __user *user_buf,
665 size_t count, loff_t *ppos)
666{
667 struct iwl_priv *priv = file->private_data;
668 int pos = 0;
669 char *buf;
670 int bufsz = sizeof(struct statistics_general) * 10 + 300;
671 ssize_t ret;
672 struct statistics_general_common *general, *accum_general;
673 struct statistics_general_common *delta_general, *max_general;
674 struct statistics_dbg *dbg, *accum_dbg, *delta_dbg, *max_dbg;
675 struct statistics_div *div, *accum_div, *delta_div, *max_div;
676
677 if (!iwl_legacy_is_alive(priv))
678 return -EAGAIN;
679
680 buf = kzalloc(bufsz, GFP_KERNEL);
681 if (!buf) {
682 IWL_ERR(priv, "Can not allocate Buffer\n");
683 return -ENOMEM;
684 }
685
686 /* the statistic information display here is based on
687 * the last statistics notification from uCode
688 * might not reflect the current uCode activity
689 */
690 general = &priv->_4965.statistics.general.common;
691 dbg = &priv->_4965.statistics.general.common.dbg;
692 div = &priv->_4965.statistics.general.common.div;
693 accum_general = &priv->_4965.accum_statistics.general.common;
694 accum_dbg = &priv->_4965.accum_statistics.general.common.dbg;
695 accum_div = &priv->_4965.accum_statistics.general.common.div;
696 delta_general = &priv->_4965.delta_statistics.general.common;
697 max_general = &priv->_4965.max_delta.general.common;
698 delta_dbg = &priv->_4965.delta_statistics.general.common.dbg;
699 max_dbg = &priv->_4965.max_delta.general.common.dbg;
700 delta_div = &priv->_4965.delta_statistics.general.common.div;
701 max_div = &priv->_4965.max_delta.general.common.div;
702
703 pos += iwl4965_statistics_flag(priv, buf, bufsz);
704 pos += scnprintf(buf + pos, bufsz - pos,
705 fmt_header, "Statistics_General:");
706 pos += scnprintf(buf + pos, bufsz - pos,
707 fmt_value, "temperature:",
708 le32_to_cpu(general->temperature));
709 pos += scnprintf(buf + pos, bufsz - pos,
710 fmt_value, "ttl_timestamp:",
711 le32_to_cpu(general->ttl_timestamp));
712 pos += scnprintf(buf + pos, bufsz - pos,
713 fmt_table, "burst_check:",
714 le32_to_cpu(dbg->burst_check),
715 accum_dbg->burst_check,
716 delta_dbg->burst_check, max_dbg->burst_check);
717 pos += scnprintf(buf + pos, bufsz - pos,
718 fmt_table, "burst_count:",
719 le32_to_cpu(dbg->burst_count),
720 accum_dbg->burst_count,
721 delta_dbg->burst_count, max_dbg->burst_count);
722 pos += scnprintf(buf + pos, bufsz - pos,
723 fmt_table, "wait_for_silence_timeout_count:",
724 le32_to_cpu(dbg->wait_for_silence_timeout_cnt),
725 accum_dbg->wait_for_silence_timeout_cnt,
726 delta_dbg->wait_for_silence_timeout_cnt,
727 max_dbg->wait_for_silence_timeout_cnt);
728 pos += scnprintf(buf + pos, bufsz - pos,
729 fmt_table, "sleep_time:",
730 le32_to_cpu(general->sleep_time),
731 accum_general->sleep_time,
732 delta_general->sleep_time, max_general->sleep_time);
733 pos += scnprintf(buf + pos, bufsz - pos,
734 fmt_table, "slots_out:",
735 le32_to_cpu(general->slots_out),
736 accum_general->slots_out,
737 delta_general->slots_out, max_general->slots_out);
738 pos += scnprintf(buf + pos, bufsz - pos,
739 fmt_table, "slots_idle:",
740 le32_to_cpu(general->slots_idle),
741 accum_general->slots_idle,
742 delta_general->slots_idle, max_general->slots_idle);
743 pos += scnprintf(buf + pos, bufsz - pos,
744 fmt_table, "tx_on_a:",
745 le32_to_cpu(div->tx_on_a), accum_div->tx_on_a,
746 delta_div->tx_on_a, max_div->tx_on_a);
747 pos += scnprintf(buf + pos, bufsz - pos,
748 fmt_table, "tx_on_b:",
749 le32_to_cpu(div->tx_on_b), accum_div->tx_on_b,
750 delta_div->tx_on_b, max_div->tx_on_b);
751 pos += scnprintf(buf + pos, bufsz - pos,
752 fmt_table, "exec_time:",
753 le32_to_cpu(div->exec_time), accum_div->exec_time,
754 delta_div->exec_time, max_div->exec_time);
755 pos += scnprintf(buf + pos, bufsz - pos,
756 fmt_table, "probe_time:",
757 le32_to_cpu(div->probe_time), accum_div->probe_time,
758 delta_div->probe_time, max_div->probe_time);
759 pos += scnprintf(buf + pos, bufsz - pos,
760 fmt_table, "rx_enable_counter:",
761 le32_to_cpu(general->rx_enable_counter),
762 accum_general->rx_enable_counter,
763 delta_general->rx_enable_counter,
764 max_general->rx_enable_counter);
765 pos += scnprintf(buf + pos, bufsz - pos,
766 fmt_table, "num_of_sos_states:",
767 le32_to_cpu(general->num_of_sos_states),
768 accum_general->num_of_sos_states,
769 delta_general->num_of_sos_states,
770 max_general->num_of_sos_states);
771 ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
772 kfree(buf);
773 return ret;
774}
diff --git a/drivers/net/wireless/iwlegacy/iwl-4965-debugfs.h b/drivers/net/wireless/iwlegacy/iwl-4965-debugfs.h
new file mode 100644
index 000000000000..6c8e35361a9e
--- /dev/null
+++ b/drivers/net/wireless/iwlegacy/iwl-4965-debugfs.h
@@ -0,0 +1,59 @@
1/******************************************************************************
2 *
3 * GPL LICENSE SUMMARY
4 *
5 * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of version 2 of the GNU General Public License as
9 * published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful, but
12 * WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
19 * USA
20 *
21 * The full GNU General Public License is included in this distribution
22 * in the file called LICENSE.GPL.
23 *
24 * Contact Information:
25 * Intel Linux Wireless <ilw@linux.intel.com>
26 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
27 *****************************************************************************/
28
29#include "iwl-dev.h"
30#include "iwl-core.h"
31#include "iwl-debug.h"
32
33#ifdef CONFIG_IWLWIFI_LEGACY_DEBUGFS
34ssize_t iwl4965_ucode_rx_stats_read(struct file *file, char __user *user_buf,
35 size_t count, loff_t *ppos);
36ssize_t iwl4965_ucode_tx_stats_read(struct file *file, char __user *user_buf,
37 size_t count, loff_t *ppos);
38ssize_t iwl4965_ucode_general_stats_read(struct file *file,
39 char __user *user_buf, size_t count, loff_t *ppos);
40#else
41static ssize_t
42iwl4965_ucode_rx_stats_read(struct file *file, char __user *user_buf,
43 size_t count, loff_t *ppos)
44{
45 return 0;
46}
47static ssize_t
48iwl4965_ucode_tx_stats_read(struct file *file, char __user *user_buf,
49 size_t count, loff_t *ppos)
50{
51 return 0;
52}
53static ssize_t
54iwl4965_ucode_general_stats_read(struct file *file, char __user *user_buf,
55 size_t count, loff_t *ppos)
56{
57 return 0;
58}
59#endif
diff --git a/drivers/net/wireless/iwlegacy/iwl-4965-eeprom.c b/drivers/net/wireless/iwlegacy/iwl-4965-eeprom.c
new file mode 100644
index 000000000000..cb9baab1ff7d
--- /dev/null
+++ b/drivers/net/wireless/iwlegacy/iwl-4965-eeprom.c
@@ -0,0 +1,154 @@
1/******************************************************************************
2 *
3 * This file is provided under a dual BSD/GPLv2 license. When using or
4 * redistributing this file, you may do so under either license.
5 *
6 * GPL LICENSE SUMMARY
7 *
8 * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of version 2 of the GNU General Public License as
12 * published by the Free Software Foundation.
13 *
14 * This program is distributed in the hope that it will be useful, but
15 * WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
22 * USA
23 *
24 * The full GNU General Public License is included in this distribution
25 * in the file called LICENSE.GPL.
26 *
27 * Contact Information:
28 * Intel Linux Wireless <ilw@linux.intel.com>
29 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
30 *
31 * BSD LICENSE
32 *
33 * Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved.
34 * All rights reserved.
35 *
36 * Redistribution and use in source and binary forms, with or without
37 * modification, are permitted provided that the following conditions
38 * are met:
39 *
40 * * Redistributions of source code must retain the above copyright
41 * notice, this list of conditions and the following disclaimer.
42 * * Redistributions in binary form must reproduce the above copyright
43 * notice, this list of conditions and the following disclaimer in
44 * the documentation and/or other materials provided with the
45 * distribution.
46 * * Neither the name Intel Corporation nor the names of its
47 * contributors may be used to endorse or promote products derived
48 * from this software without specific prior written permission.
49 *
50 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
51 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
52 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
53 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
54 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
55 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
56 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
57 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
58 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
59 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
60 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
61 *****************************************************************************/
62
63
64#include <linux/kernel.h>
65#include <linux/module.h>
66#include <linux/slab.h>
67#include <linux/init.h>
68
69#include <net/mac80211.h>
70
71#include "iwl-commands.h"
72#include "iwl-dev.h"
73#include "iwl-core.h"
74#include "iwl-debug.h"
75#include "iwl-4965.h"
76#include "iwl-io.h"
77
78/******************************************************************************
79 *
80 * EEPROM related functions
81 *
82******************************************************************************/
83
84/*
85 * The device's EEPROM semaphore prevents conflicts between driver and uCode
86 * when accessing the EEPROM; each access is a series of pulses to/from the
87 * EEPROM chip, not a single event, so even reads could conflict if they
88 * weren't arbitrated by the semaphore.
89 */
90int iwl4965_eeprom_acquire_semaphore(struct iwl_priv *priv)
91{
92 u16 count;
93 int ret;
94
95 for (count = 0; count < EEPROM_SEM_RETRY_LIMIT; count++) {
96 /* Request semaphore */
97 iwl_legacy_set_bit(priv, CSR_HW_IF_CONFIG_REG,
98 CSR_HW_IF_CONFIG_REG_BIT_EEPROM_OWN_SEM);
99
100 /* See if we got it */
101 ret = iwl_poll_bit(priv, CSR_HW_IF_CONFIG_REG,
102 CSR_HW_IF_CONFIG_REG_BIT_EEPROM_OWN_SEM,
103 CSR_HW_IF_CONFIG_REG_BIT_EEPROM_OWN_SEM,
104 EEPROM_SEM_TIMEOUT);
105 if (ret >= 0) {
106 IWL_DEBUG_IO(priv,
107 "Acquired semaphore after %d tries.\n",
108 count+1);
109 return ret;
110 }
111 }
112
113 return ret;
114}
115
116void iwl4965_eeprom_release_semaphore(struct iwl_priv *priv)
117{
118 iwl_legacy_clear_bit(priv, CSR_HW_IF_CONFIG_REG,
119 CSR_HW_IF_CONFIG_REG_BIT_EEPROM_OWN_SEM);
120
121}
122
123int iwl4965_eeprom_check_version(struct iwl_priv *priv)
124{
125 u16 eeprom_ver;
126 u16 calib_ver;
127
128 eeprom_ver = iwl_legacy_eeprom_query16(priv, EEPROM_VERSION);
129 calib_ver = iwl_legacy_eeprom_query16(priv,
130 EEPROM_4965_CALIB_VERSION_OFFSET);
131
132 if (eeprom_ver < priv->cfg->eeprom_ver ||
133 calib_ver < priv->cfg->eeprom_calib_ver)
134 goto err;
135
136 IWL_INFO(priv, "device EEPROM VER=0x%x, CALIB=0x%x\n",
137 eeprom_ver, calib_ver);
138
139 return 0;
140err:
141 IWL_ERR(priv, "Unsupported (too old) EEPROM VER=0x%x < 0x%x "
142 "CALIB=0x%x < 0x%x\n",
143 eeprom_ver, priv->cfg->eeprom_ver,
144 calib_ver, priv->cfg->eeprom_calib_ver);
145 return -EINVAL;
146
147}
148
149void iwl4965_eeprom_get_mac(const struct iwl_priv *priv, u8 *mac)
150{
151 const u8 *addr = iwl_legacy_eeprom_query_addr(priv,
152 EEPROM_MAC_ADDRESS);
153 memcpy(mac, addr, ETH_ALEN);
154}
diff --git a/drivers/net/wireless/iwlwifi/iwl-4965-hw.h b/drivers/net/wireless/iwlegacy/iwl-4965-hw.h
index 9166794eda0d..08b189c8472d 100644
--- a/drivers/net/wireless/iwlwifi/iwl-4965-hw.h
+++ b/drivers/net/wireless/iwlegacy/iwl-4965-hw.h
@@ -5,7 +5,7 @@
5 * 5 *
6 * GPL LICENSE SUMMARY 6 * GPL LICENSE SUMMARY
7 * 7 *
8 * Copyright(c) 2005 - 2010 Intel Corporation. All rights reserved. 8 * Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved.
9 * 9 *
10 * This program is free software; you can redistribute it and/or modify 10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of version 2 of the GNU General Public License as 11 * it under the terms of version 2 of the GNU General Public License as
@@ -30,7 +30,7 @@
30 * 30 *
31 * BSD LICENSE 31 * BSD LICENSE
32 * 32 *
33 * Copyright(c) 2005 - 2010 Intel Corporation. All rights reserved. 33 * Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved.
34 * All rights reserved. 34 * All rights reserved.
35 * 35 *
36 * Redistribution and use in source and binary forms, with or without 36 * Redistribution and use in source and binary forms, with or without
@@ -789,4 +789,26 @@ struct iwl4965_scd_bc_tbl {
789 u8 pad[1024 - (TFD_QUEUE_BC_SIZE) * sizeof(__le16)]; 789 u8 pad[1024 - (TFD_QUEUE_BC_SIZE) * sizeof(__le16)];
790} __packed; 790} __packed;
791 791
792
793#define IWL4965_RTC_INST_LOWER_BOUND (0x000000)
794
795/* RSSI to dBm */
796#define IWL4965_RSSI_OFFSET 44
797
798/* PCI registers */
799#define PCI_CFG_RETRY_TIMEOUT 0x041
800
801/* PCI register values */
802#define PCI_CFG_LINK_CTRL_VAL_L0S_EN 0x01
803#define PCI_CFG_LINK_CTRL_VAL_L1_EN 0x02
804
805#define IWL4965_DEFAULT_TX_RETRY 15
806
807/* Limit range of txpower output target to be between these values */
808#define IWL4965_TX_POWER_TARGET_POWER_MIN (0) /* 0 dBm: 1 milliwatt */
809
810/* EEPROM */
811#define IWL4965_FIRST_AMPDU_QUEUE 10
812
813
792#endif /* !__iwl_4965_hw_h__ */ 814#endif /* !__iwl_4965_hw_h__ */
diff --git a/drivers/net/wireless/iwlegacy/iwl-4965-led.c b/drivers/net/wireless/iwlegacy/iwl-4965-led.c
new file mode 100644
index 000000000000..26d324e30692
--- /dev/null
+++ b/drivers/net/wireless/iwlegacy/iwl-4965-led.c
@@ -0,0 +1,74 @@
1/******************************************************************************
2 *
3 * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of version 2 of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
17 *
18 * The full GNU General Public License is included in this distribution in the
19 * file called LICENSE.
20 *
21 * Contact Information:
22 * Intel Linux Wireless <ilw@linux.intel.com>
23 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
24 *
25 *****************************************************************************/
26
27#include <linux/kernel.h>
28#include <linux/module.h>
29#include <linux/init.h>
30#include <linux/pci.h>
31#include <linux/dma-mapping.h>
32#include <linux/delay.h>
33#include <linux/skbuff.h>
34#include <linux/netdevice.h>
35#include <linux/wireless.h>
36#include <net/mac80211.h>
37#include <linux/etherdevice.h>
38#include <asm/unaligned.h>
39
40#include "iwl-commands.h"
41#include "iwl-dev.h"
42#include "iwl-core.h"
43#include "iwl-io.h"
44#include "iwl-4965-led.h"
45
46/* Send led command */
47static int
48iwl4965_send_led_cmd(struct iwl_priv *priv, struct iwl_led_cmd *led_cmd)
49{
50 struct iwl_host_cmd cmd = {
51 .id = REPLY_LEDS_CMD,
52 .len = sizeof(struct iwl_led_cmd),
53 .data = led_cmd,
54 .flags = CMD_ASYNC,
55 .callback = NULL,
56 };
57 u32 reg;
58
59 reg = iwl_read32(priv, CSR_LED_REG);
60 if (reg != (reg & CSR_LED_BSM_CTRL_MSK))
61 iwl_write32(priv, CSR_LED_REG, reg & CSR_LED_BSM_CTRL_MSK);
62
63 return iwl_legacy_send_cmd(priv, &cmd);
64}
65
66/* Set led register off */
67void iwl4965_led_enable(struct iwl_priv *priv)
68{
69 iwl_write32(priv, CSR_LED_REG, CSR_LED_REG_TRUN_ON);
70}
71
72const struct iwl_led_ops iwl4965_led_ops = {
73 .cmd = iwl4965_send_led_cmd,
74};
diff --git a/drivers/net/wireless/iwlegacy/iwl-4965-led.h b/drivers/net/wireless/iwlegacy/iwl-4965-led.h
new file mode 100644
index 000000000000..5ed3615fc338
--- /dev/null
+++ b/drivers/net/wireless/iwlegacy/iwl-4965-led.h
@@ -0,0 +1,33 @@
1/******************************************************************************
2 *
3 * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of version 2 of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
17 *
18 * The full GNU General Public License is included in this distribution in the
19 * file called LICENSE.
20 *
21 * Contact Information:
22 * Intel Linux Wireless <ilw@linux.intel.com>
23 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
24 *
25 *****************************************************************************/
26
27#ifndef __iwl_4965_led_h__
28#define __iwl_4965_led_h__
29
30extern const struct iwl_led_ops iwl4965_led_ops;
31void iwl4965_led_enable(struct iwl_priv *priv);
32
33#endif /* __iwl_4965_led_h__ */
diff --git a/drivers/net/wireless/iwlegacy/iwl-4965-lib.c b/drivers/net/wireless/iwlegacy/iwl-4965-lib.c
new file mode 100644
index 000000000000..c1a24946715e
--- /dev/null
+++ b/drivers/net/wireless/iwlegacy/iwl-4965-lib.c
@@ -0,0 +1,1260 @@
1/******************************************************************************
2 *
3 * GPL LICENSE SUMMARY
4 *
5 * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of version 2 of the GNU General Public License as
9 * published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful, but
12 * WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
19 * USA
20 *
21 * The full GNU General Public License is included in this distribution
22 * in the file called LICENSE.GPL.
23 *
24 * Contact Information:
25 * Intel Linux Wireless <ilw@linux.intel.com>
26 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
27 *
28 *****************************************************************************/
29#include <linux/etherdevice.h>
30#include <linux/kernel.h>
31#include <linux/module.h>
32#include <linux/init.h>
33#include <linux/sched.h>
34
35#include "iwl-dev.h"
36#include "iwl-core.h"
37#include "iwl-io.h"
38#include "iwl-helpers.h"
39#include "iwl-4965-hw.h"
40#include "iwl-4965.h"
41#include "iwl-sta.h"
42
43void iwl4965_check_abort_status(struct iwl_priv *priv,
44 u8 frame_count, u32 status)
45{
46 if (frame_count == 1 && status == TX_STATUS_FAIL_RFKILL_FLUSH) {
47 IWL_ERR(priv, "Tx flush command to flush out all frames\n");
48 if (!test_bit(STATUS_EXIT_PENDING, &priv->status))
49 queue_work(priv->workqueue, &priv->tx_flush);
50 }
51}
52
53/*
54 * EEPROM
55 */
56struct iwl_mod_params iwl4965_mod_params = {
57 .amsdu_size_8K = 1,
58 .restart_fw = 1,
59 /* the rest are 0 by default */
60};
61
62void iwl4965_rx_queue_reset(struct iwl_priv *priv, struct iwl_rx_queue *rxq)
63{
64 unsigned long flags;
65 int i;
66 spin_lock_irqsave(&rxq->lock, flags);
67 INIT_LIST_HEAD(&rxq->rx_free);
68 INIT_LIST_HEAD(&rxq->rx_used);
69 /* Fill the rx_used queue with _all_ of the Rx buffers */
70 for (i = 0; i < RX_FREE_BUFFERS + RX_QUEUE_SIZE; i++) {
71 /* In the reset function, these buffers may have been allocated
72 * to an SKB, so we need to unmap and free potential storage */
73 if (rxq->pool[i].page != NULL) {
74 pci_unmap_page(priv->pci_dev, rxq->pool[i].page_dma,
75 PAGE_SIZE << priv->hw_params.rx_page_order,
76 PCI_DMA_FROMDEVICE);
77 __iwl_legacy_free_pages(priv, rxq->pool[i].page);
78 rxq->pool[i].page = NULL;
79 }
80 list_add_tail(&rxq->pool[i].list, &rxq->rx_used);
81 }
82
83 for (i = 0; i < RX_QUEUE_SIZE; i++)
84 rxq->queue[i] = NULL;
85
86 /* Set us so that we have processed and used all buffers, but have
87 * not restocked the Rx queue with fresh buffers */
88 rxq->read = rxq->write = 0;
89 rxq->write_actual = 0;
90 rxq->free_count = 0;
91 spin_unlock_irqrestore(&rxq->lock, flags);
92}
93
94int iwl4965_rx_init(struct iwl_priv *priv, struct iwl_rx_queue *rxq)
95{
96 u32 rb_size;
97 const u32 rfdnlog = RX_QUEUE_SIZE_LOG; /* 256 RBDs */
98 u32 rb_timeout = 0;
99
100 if (priv->cfg->mod_params->amsdu_size_8K)
101 rb_size = FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_8K;
102 else
103 rb_size = FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_4K;
104
105 /* Stop Rx DMA */
106 iwl_legacy_write_direct32(priv, FH_MEM_RCSR_CHNL0_CONFIG_REG, 0);
107
108 /* Reset driver's Rx queue write index */
109 iwl_legacy_write_direct32(priv, FH_RSCSR_CHNL0_RBDCB_WPTR_REG, 0);
110
111 /* Tell device where to find RBD circular buffer in DRAM */
112 iwl_legacy_write_direct32(priv, FH_RSCSR_CHNL0_RBDCB_BASE_REG,
113 (u32)(rxq->bd_dma >> 8));
114
115 /* Tell device where in DRAM to update its Rx status */
116 iwl_legacy_write_direct32(priv, FH_RSCSR_CHNL0_STTS_WPTR_REG,
117 rxq->rb_stts_dma >> 4);
118
119 /* Enable Rx DMA
120 * Direct rx interrupts to hosts
121 * Rx buffer size 4 or 8k
122 * RB timeout 0x10
123 * 256 RBDs
124 */
125 iwl_legacy_write_direct32(priv, FH_MEM_RCSR_CHNL0_CONFIG_REG,
126 FH_RCSR_RX_CONFIG_CHNL_EN_ENABLE_VAL |
127 FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_INT_HOST_VAL |
128 FH_RCSR_CHNL0_RX_CONFIG_SINGLE_FRAME_MSK |
129 rb_size|
130 (rb_timeout << FH_RCSR_RX_CONFIG_REG_IRQ_RBTH_POS)|
131 (rfdnlog << FH_RCSR_RX_CONFIG_RBDCB_SIZE_POS));
132
133 /* Set interrupt coalescing timer to default (2048 usecs) */
134 iwl_write8(priv, CSR_INT_COALESCING, IWL_HOST_INT_TIMEOUT_DEF);
135
136 return 0;
137}
138
139static void iwl4965_set_pwr_vmain(struct iwl_priv *priv)
140{
141/*
142 * (for documentation purposes)
143 * to set power to V_AUX, do:
144
145 if (pci_pme_capable(priv->pci_dev, PCI_D3cold))
146 iwl_legacy_set_bits_mask_prph(priv, APMG_PS_CTRL_REG,
147 APMG_PS_CTRL_VAL_PWR_SRC_VAUX,
148 ~APMG_PS_CTRL_MSK_PWR_SRC);
149 */
150
151 iwl_legacy_set_bits_mask_prph(priv, APMG_PS_CTRL_REG,
152 APMG_PS_CTRL_VAL_PWR_SRC_VMAIN,
153 ~APMG_PS_CTRL_MSK_PWR_SRC);
154}
155
156int iwl4965_hw_nic_init(struct iwl_priv *priv)
157{
158 unsigned long flags;
159 struct iwl_rx_queue *rxq = &priv->rxq;
160 int ret;
161
162 /* nic_init */
163 spin_lock_irqsave(&priv->lock, flags);
164 priv->cfg->ops->lib->apm_ops.init(priv);
165
166 /* Set interrupt coalescing calibration timer to default (512 usecs) */
167 iwl_write8(priv, CSR_INT_COALESCING, IWL_HOST_INT_CALIB_TIMEOUT_DEF);
168
169 spin_unlock_irqrestore(&priv->lock, flags);
170
171 iwl4965_set_pwr_vmain(priv);
172
173 priv->cfg->ops->lib->apm_ops.config(priv);
174
175 /* Allocate the RX queue, or reset if it is already allocated */
176 if (!rxq->bd) {
177 ret = iwl_legacy_rx_queue_alloc(priv);
178 if (ret) {
179 IWL_ERR(priv, "Unable to initialize Rx queue\n");
180 return -ENOMEM;
181 }
182 } else
183 iwl4965_rx_queue_reset(priv, rxq);
184
185 iwl4965_rx_replenish(priv);
186
187 iwl4965_rx_init(priv, rxq);
188
189 spin_lock_irqsave(&priv->lock, flags);
190
191 rxq->need_update = 1;
192 iwl_legacy_rx_queue_update_write_ptr(priv, rxq);
193
194 spin_unlock_irqrestore(&priv->lock, flags);
195
196 /* Allocate or reset and init all Tx and Command queues */
197 if (!priv->txq) {
198 ret = iwl4965_txq_ctx_alloc(priv);
199 if (ret)
200 return ret;
201 } else
202 iwl4965_txq_ctx_reset(priv);
203
204 set_bit(STATUS_INIT, &priv->status);
205
206 return 0;
207}
208
209/**
210 * iwl4965_dma_addr2rbd_ptr - convert a DMA address to a uCode read buffer ptr
211 */
212static inline __le32 iwl4965_dma_addr2rbd_ptr(struct iwl_priv *priv,
213 dma_addr_t dma_addr)
214{
215 return cpu_to_le32((u32)(dma_addr >> 8));
216}
217
218/**
219 * iwl4965_rx_queue_restock - refill RX queue from pre-allocated pool
220 *
221 * If there are slots in the RX queue that need to be restocked,
222 * and we have free pre-allocated buffers, fill the ranks as much
223 * as we can, pulling from rx_free.
224 *
225 * This moves the 'write' index forward to catch up with 'processed', and
226 * also updates the memory address in the firmware to reference the new
227 * target buffer.
228 */
229void iwl4965_rx_queue_restock(struct iwl_priv *priv)
230{
231 struct iwl_rx_queue *rxq = &priv->rxq;
232 struct list_head *element;
233 struct iwl_rx_mem_buffer *rxb;
234 unsigned long flags;
235
236 spin_lock_irqsave(&rxq->lock, flags);
237 while ((iwl_legacy_rx_queue_space(rxq) > 0) && (rxq->free_count)) {
238 /* The overwritten rxb must be a used one */
239 rxb = rxq->queue[rxq->write];
240 BUG_ON(rxb && rxb->page);
241
242 /* Get next free Rx buffer, remove from free list */
243 element = rxq->rx_free.next;
244 rxb = list_entry(element, struct iwl_rx_mem_buffer, list);
245 list_del(element);
246
247 /* Point to Rx buffer via next RBD in circular buffer */
248 rxq->bd[rxq->write] = iwl4965_dma_addr2rbd_ptr(priv,
249 rxb->page_dma);
250 rxq->queue[rxq->write] = rxb;
251 rxq->write = (rxq->write + 1) & RX_QUEUE_MASK;
252 rxq->free_count--;
253 }
254 spin_unlock_irqrestore(&rxq->lock, flags);
255 /* If the pre-allocated buffer pool is dropping low, schedule to
256 * refill it */
257 if (rxq->free_count <= RX_LOW_WATERMARK)
258 queue_work(priv->workqueue, &priv->rx_replenish);
259
260
261 /* If we've added more space for the firmware to place data, tell it.
262 * Increment device's write pointer in multiples of 8. */
263 if (rxq->write_actual != (rxq->write & ~0x7)) {
264 spin_lock_irqsave(&rxq->lock, flags);
265 rxq->need_update = 1;
266 spin_unlock_irqrestore(&rxq->lock, flags);
267 iwl_legacy_rx_queue_update_write_ptr(priv, rxq);
268 }
269}
270
271/**
272 * iwl4965_rx_replenish - Move all used packet from rx_used to rx_free
273 *
274 * When moving to rx_free an SKB is allocated for the slot.
275 *
276 * Also restock the Rx queue via iwl_rx_queue_restock.
277 * This is called as a scheduled work item (except for during initialization)
278 */
279static void iwl4965_rx_allocate(struct iwl_priv *priv, gfp_t priority)
280{
281 struct iwl_rx_queue *rxq = &priv->rxq;
282 struct list_head *element;
283 struct iwl_rx_mem_buffer *rxb;
284 struct page *page;
285 unsigned long flags;
286 gfp_t gfp_mask = priority;
287
288 while (1) {
289 spin_lock_irqsave(&rxq->lock, flags);
290 if (list_empty(&rxq->rx_used)) {
291 spin_unlock_irqrestore(&rxq->lock, flags);
292 return;
293 }
294 spin_unlock_irqrestore(&rxq->lock, flags);
295
296 if (rxq->free_count > RX_LOW_WATERMARK)
297 gfp_mask |= __GFP_NOWARN;
298
299 if (priv->hw_params.rx_page_order > 0)
300 gfp_mask |= __GFP_COMP;
301
302 /* Alloc a new receive buffer */
303 page = alloc_pages(gfp_mask, priv->hw_params.rx_page_order);
304 if (!page) {
305 if (net_ratelimit())
306 IWL_DEBUG_INFO(priv, "alloc_pages failed, "
307 "order: %d\n",
308 priv->hw_params.rx_page_order);
309
310 if ((rxq->free_count <= RX_LOW_WATERMARK) &&
311 net_ratelimit())
312 IWL_CRIT(priv,
313 "Failed to alloc_pages with %s. "
314 "Only %u free buffers remaining.\n",
315 priority == GFP_ATOMIC ?
316 "GFP_ATOMIC" : "GFP_KERNEL",
317 rxq->free_count);
318 /* We don't reschedule replenish work here -- we will
319 * call the restock method and if it still needs
320 * more buffers it will schedule replenish */
321 return;
322 }
323
324 spin_lock_irqsave(&rxq->lock, flags);
325
326 if (list_empty(&rxq->rx_used)) {
327 spin_unlock_irqrestore(&rxq->lock, flags);
328 __free_pages(page, priv->hw_params.rx_page_order);
329 return;
330 }
331 element = rxq->rx_used.next;
332 rxb = list_entry(element, struct iwl_rx_mem_buffer, list);
333 list_del(element);
334
335 spin_unlock_irqrestore(&rxq->lock, flags);
336
337 BUG_ON(rxb->page);
338 rxb->page = page;
339 /* Get physical address of the RB */
340 rxb->page_dma = pci_map_page(priv->pci_dev, page, 0,
341 PAGE_SIZE << priv->hw_params.rx_page_order,
342 PCI_DMA_FROMDEVICE);
343 /* dma address must be no more than 36 bits */
344 BUG_ON(rxb->page_dma & ~DMA_BIT_MASK(36));
345 /* and also 256 byte aligned! */
346 BUG_ON(rxb->page_dma & DMA_BIT_MASK(8));
347
348 spin_lock_irqsave(&rxq->lock, flags);
349
350 list_add_tail(&rxb->list, &rxq->rx_free);
351 rxq->free_count++;
352 priv->alloc_rxb_page++;
353
354 spin_unlock_irqrestore(&rxq->lock, flags);
355 }
356}
357
358void iwl4965_rx_replenish(struct iwl_priv *priv)
359{
360 unsigned long flags;
361
362 iwl4965_rx_allocate(priv, GFP_KERNEL);
363
364 spin_lock_irqsave(&priv->lock, flags);
365 iwl4965_rx_queue_restock(priv);
366 spin_unlock_irqrestore(&priv->lock, flags);
367}
368
369void iwl4965_rx_replenish_now(struct iwl_priv *priv)
370{
371 iwl4965_rx_allocate(priv, GFP_ATOMIC);
372
373 iwl4965_rx_queue_restock(priv);
374}
375
376/* Assumes that the skb field of the buffers in 'pool' is kept accurate.
377 * If an SKB has been detached, the POOL needs to have its SKB set to NULL
378 * This free routine walks the list of POOL entries and if SKB is set to
379 * non NULL it is unmapped and freed
380 */
381void iwl4965_rx_queue_free(struct iwl_priv *priv, struct iwl_rx_queue *rxq)
382{
383 int i;
384 for (i = 0; i < RX_QUEUE_SIZE + RX_FREE_BUFFERS; i++) {
385 if (rxq->pool[i].page != NULL) {
386 pci_unmap_page(priv->pci_dev, rxq->pool[i].page_dma,
387 PAGE_SIZE << priv->hw_params.rx_page_order,
388 PCI_DMA_FROMDEVICE);
389 __iwl_legacy_free_pages(priv, rxq->pool[i].page);
390 rxq->pool[i].page = NULL;
391 }
392 }
393
394 dma_free_coherent(&priv->pci_dev->dev, 4 * RX_QUEUE_SIZE, rxq->bd,
395 rxq->bd_dma);
396 dma_free_coherent(&priv->pci_dev->dev, sizeof(struct iwl_rb_status),
397 rxq->rb_stts, rxq->rb_stts_dma);
398 rxq->bd = NULL;
399 rxq->rb_stts = NULL;
400}
401
402int iwl4965_rxq_stop(struct iwl_priv *priv)
403{
404
405 /* stop Rx DMA */
406 iwl_legacy_write_direct32(priv, FH_MEM_RCSR_CHNL0_CONFIG_REG, 0);
407 iwl_poll_direct_bit(priv, FH_MEM_RSSR_RX_STATUS_REG,
408 FH_RSSR_CHNL0_RX_STATUS_CHNL_IDLE, 1000);
409
410 return 0;
411}
412
413int iwl4965_hwrate_to_mac80211_idx(u32 rate_n_flags, enum ieee80211_band band)
414{
415 int idx = 0;
416 int band_offset = 0;
417
418 /* HT rate format: mac80211 wants an MCS number, which is just LSB */
419 if (rate_n_flags & RATE_MCS_HT_MSK) {
420 idx = (rate_n_flags & 0xff);
421 return idx;
422 /* Legacy rate format, search for match in table */
423 } else {
424 if (band == IEEE80211_BAND_5GHZ)
425 band_offset = IWL_FIRST_OFDM_RATE;
426 for (idx = band_offset; idx < IWL_RATE_COUNT_LEGACY; idx++)
427 if (iwl_rates[idx].plcp == (rate_n_flags & 0xFF))
428 return idx - band_offset;
429 }
430
431 return -1;
432}
433
434static int iwl4965_calc_rssi(struct iwl_priv *priv,
435 struct iwl_rx_phy_res *rx_resp)
436{
437 /* data from PHY/DSP regarding signal strength, etc.,
438 * contents are always there, not configurable by host. */
439 struct iwl4965_rx_non_cfg_phy *ncphy =
440 (struct iwl4965_rx_non_cfg_phy *)rx_resp->non_cfg_phy_buf;
441 u32 agc = (le16_to_cpu(ncphy->agc_info) & IWL49_AGC_DB_MASK)
442 >> IWL49_AGC_DB_POS;
443
444 u32 valid_antennae =
445 (le16_to_cpu(rx_resp->phy_flags) & IWL49_RX_PHY_FLAGS_ANTENNAE_MASK)
446 >> IWL49_RX_PHY_FLAGS_ANTENNAE_OFFSET;
447 u8 max_rssi = 0;
448 u32 i;
449
450 /* Find max rssi among 3 possible receivers.
451 * These values are measured by the digital signal processor (DSP).
452 * They should stay fairly constant even as the signal strength varies,
453 * if the radio's automatic gain control (AGC) is working right.
454 * AGC value (see below) will provide the "interesting" info. */
455 for (i = 0; i < 3; i++)
456 if (valid_antennae & (1 << i))
457 max_rssi = max(ncphy->rssi_info[i << 1], max_rssi);
458
459 IWL_DEBUG_STATS(priv, "Rssi In A %d B %d C %d Max %d AGC dB %d\n",
460 ncphy->rssi_info[0], ncphy->rssi_info[2], ncphy->rssi_info[4],
461 max_rssi, agc);
462
463 /* dBm = max_rssi dB - agc dB - constant.
464 * Higher AGC (higher radio gain) means lower signal. */
465 return max_rssi - agc - IWL4965_RSSI_OFFSET;
466}
467
468
469static u32 iwl4965_translate_rx_status(struct iwl_priv *priv, u32 decrypt_in)
470{
471 u32 decrypt_out = 0;
472
473 if ((decrypt_in & RX_RES_STATUS_STATION_FOUND) ==
474 RX_RES_STATUS_STATION_FOUND)
475 decrypt_out |= (RX_RES_STATUS_STATION_FOUND |
476 RX_RES_STATUS_NO_STATION_INFO_MISMATCH);
477
478 decrypt_out |= (decrypt_in & RX_RES_STATUS_SEC_TYPE_MSK);
479
480 /* packet was not encrypted */
481 if ((decrypt_in & RX_RES_STATUS_SEC_TYPE_MSK) ==
482 RX_RES_STATUS_SEC_TYPE_NONE)
483 return decrypt_out;
484
485 /* packet was encrypted with unknown alg */
486 if ((decrypt_in & RX_RES_STATUS_SEC_TYPE_MSK) ==
487 RX_RES_STATUS_SEC_TYPE_ERR)
488 return decrypt_out;
489
490 /* decryption was not done in HW */
491 if ((decrypt_in & RX_MPDU_RES_STATUS_DEC_DONE_MSK) !=
492 RX_MPDU_RES_STATUS_DEC_DONE_MSK)
493 return decrypt_out;
494
495 switch (decrypt_in & RX_RES_STATUS_SEC_TYPE_MSK) {
496
497 case RX_RES_STATUS_SEC_TYPE_CCMP:
498 /* alg is CCM: check MIC only */
499 if (!(decrypt_in & RX_MPDU_RES_STATUS_MIC_OK))
500 /* Bad MIC */
501 decrypt_out |= RX_RES_STATUS_BAD_ICV_MIC;
502 else
503 decrypt_out |= RX_RES_STATUS_DECRYPT_OK;
504
505 break;
506
507 case RX_RES_STATUS_SEC_TYPE_TKIP:
508 if (!(decrypt_in & RX_MPDU_RES_STATUS_TTAK_OK)) {
509 /* Bad TTAK */
510 decrypt_out |= RX_RES_STATUS_BAD_KEY_TTAK;
511 break;
512 }
513 /* fall through if TTAK OK */
514 default:
515 if (!(decrypt_in & RX_MPDU_RES_STATUS_ICV_OK))
516 decrypt_out |= RX_RES_STATUS_BAD_ICV_MIC;
517 else
518 decrypt_out |= RX_RES_STATUS_DECRYPT_OK;
519 break;
520 }
521
522 IWL_DEBUG_RX(priv, "decrypt_in:0x%x decrypt_out = 0x%x\n",
523 decrypt_in, decrypt_out);
524
525 return decrypt_out;
526}
527
528static void iwl4965_pass_packet_to_mac80211(struct iwl_priv *priv,
529 struct ieee80211_hdr *hdr,
530 u16 len,
531 u32 ampdu_status,
532 struct iwl_rx_mem_buffer *rxb,
533 struct ieee80211_rx_status *stats)
534{
535 struct sk_buff *skb;
536 __le16 fc = hdr->frame_control;
537
538 /* We only process data packets if the interface is open */
539 if (unlikely(!priv->is_open)) {
540 IWL_DEBUG_DROP_LIMIT(priv,
541 "Dropping packet while interface is not open.\n");
542 return;
543 }
544
545 /* In case of HW accelerated crypto and bad decryption, drop */
546 if (!priv->cfg->mod_params->sw_crypto &&
547 iwl_legacy_set_decrypted_flag(priv, hdr, ampdu_status, stats))
548 return;
549
550 skb = dev_alloc_skb(128);
551 if (!skb) {
552 IWL_ERR(priv, "dev_alloc_skb failed\n");
553 return;
554 }
555
556 skb_add_rx_frag(skb, 0, rxb->page, (void *)hdr - rxb_addr(rxb), len);
557
558 iwl_legacy_update_stats(priv, false, fc, len);
559 memcpy(IEEE80211_SKB_RXCB(skb), stats, sizeof(*stats));
560
561 ieee80211_rx(priv->hw, skb);
562 priv->alloc_rxb_page--;
563 rxb->page = NULL;
564}
565
566/* Called for REPLY_RX (legacy ABG frames), or
567 * REPLY_RX_MPDU_CMD (HT high-throughput N frames). */
568void iwl4965_rx_reply_rx(struct iwl_priv *priv,
569 struct iwl_rx_mem_buffer *rxb)
570{
571 struct ieee80211_hdr *header;
572 struct ieee80211_rx_status rx_status;
573 struct iwl_rx_packet *pkt = rxb_addr(rxb);
574 struct iwl_rx_phy_res *phy_res;
575 __le32 rx_pkt_status;
576 struct iwl_rx_mpdu_res_start *amsdu;
577 u32 len;
578 u32 ampdu_status;
579 u32 rate_n_flags;
580
581 /**
582 * REPLY_RX and REPLY_RX_MPDU_CMD are handled differently.
583 * REPLY_RX: physical layer info is in this buffer
584 * REPLY_RX_MPDU_CMD: physical layer info was sent in separate
585 * command and cached in priv->last_phy_res
586 *
587 * Here we set up local variables depending on which command is
588 * received.
589 */
590 if (pkt->hdr.cmd == REPLY_RX) {
591 phy_res = (struct iwl_rx_phy_res *)pkt->u.raw;
592 header = (struct ieee80211_hdr *)(pkt->u.raw + sizeof(*phy_res)
593 + phy_res->cfg_phy_cnt);
594
595 len = le16_to_cpu(phy_res->byte_count);
596 rx_pkt_status = *(__le32 *)(pkt->u.raw + sizeof(*phy_res) +
597 phy_res->cfg_phy_cnt + len);
598 ampdu_status = le32_to_cpu(rx_pkt_status);
599 } else {
600 if (!priv->_4965.last_phy_res_valid) {
601 IWL_ERR(priv, "MPDU frame without cached PHY data\n");
602 return;
603 }
604 phy_res = &priv->_4965.last_phy_res;
605 amsdu = (struct iwl_rx_mpdu_res_start *)pkt->u.raw;
606 header = (struct ieee80211_hdr *)(pkt->u.raw + sizeof(*amsdu));
607 len = le16_to_cpu(amsdu->byte_count);
608 rx_pkt_status = *(__le32 *)(pkt->u.raw + sizeof(*amsdu) + len);
609 ampdu_status = iwl4965_translate_rx_status(priv,
610 le32_to_cpu(rx_pkt_status));
611 }
612
613 if ((unlikely(phy_res->cfg_phy_cnt > 20))) {
614 IWL_DEBUG_DROP(priv, "dsp size out of range [0,20]: %d/n",
615 phy_res->cfg_phy_cnt);
616 return;
617 }
618
619 if (!(rx_pkt_status & RX_RES_STATUS_NO_CRC32_ERROR) ||
620 !(rx_pkt_status & RX_RES_STATUS_NO_RXE_OVERFLOW)) {
621 IWL_DEBUG_RX(priv, "Bad CRC or FIFO: 0x%08X.\n",
622 le32_to_cpu(rx_pkt_status));
623 return;
624 }
625
626 /* This will be used in several places later */
627 rate_n_flags = le32_to_cpu(phy_res->rate_n_flags);
628
629 /* rx_status carries information about the packet to mac80211 */
630 rx_status.mactime = le64_to_cpu(phy_res->timestamp);
631 rx_status.freq =
632 ieee80211_channel_to_frequency(le16_to_cpu(phy_res->channel),
633 rx_status.band);
634 rx_status.band = (phy_res->phy_flags & RX_RES_PHY_FLAGS_BAND_24_MSK) ?
635 IEEE80211_BAND_2GHZ : IEEE80211_BAND_5GHZ;
636 rx_status.rate_idx =
637 iwl4965_hwrate_to_mac80211_idx(rate_n_flags, rx_status.band);
638 rx_status.flag = 0;
639
640 /* TSF isn't reliable. In order to allow smooth user experience,
641 * this W/A doesn't propagate it to the mac80211 */
642 /*rx_status.flag |= RX_FLAG_TSFT;*/
643
644 priv->ucode_beacon_time = le32_to_cpu(phy_res->beacon_time_stamp);
645
646 /* Find max signal strength (dBm) among 3 antenna/receiver chains */
647 rx_status.signal = iwl4965_calc_rssi(priv, phy_res);
648
649 iwl_legacy_dbg_log_rx_data_frame(priv, len, header);
650 IWL_DEBUG_STATS_LIMIT(priv, "Rssi %d, TSF %llu\n",
651 rx_status.signal, (unsigned long long)rx_status.mactime);
652
653 /*
654 * "antenna number"
655 *
656 * It seems that the antenna field in the phy flags value
657 * is actually a bit field. This is undefined by radiotap,
658 * it wants an actual antenna number but I always get "7"
659 * for most legacy frames I receive indicating that the
660 * same frame was received on all three RX chains.
661 *
662 * I think this field should be removed in favor of a
663 * new 802.11n radiotap field "RX chains" that is defined
664 * as a bitmask.
665 */
666 rx_status.antenna =
667 (le16_to_cpu(phy_res->phy_flags) & RX_RES_PHY_FLAGS_ANTENNA_MSK)
668 >> RX_RES_PHY_FLAGS_ANTENNA_POS;
669
670 /* set the preamble flag if appropriate */
671 if (phy_res->phy_flags & RX_RES_PHY_FLAGS_SHORT_PREAMBLE_MSK)
672 rx_status.flag |= RX_FLAG_SHORTPRE;
673
674 /* Set up the HT phy flags */
675 if (rate_n_flags & RATE_MCS_HT_MSK)
676 rx_status.flag |= RX_FLAG_HT;
677 if (rate_n_flags & RATE_MCS_HT40_MSK)
678 rx_status.flag |= RX_FLAG_40MHZ;
679 if (rate_n_flags & RATE_MCS_SGI_MSK)
680 rx_status.flag |= RX_FLAG_SHORT_GI;
681
682 iwl4965_pass_packet_to_mac80211(priv, header, len, ampdu_status,
683 rxb, &rx_status);
684}
685
686/* Cache phy data (Rx signal strength, etc) for HT frame (REPLY_RX_PHY_CMD).
687 * This will be used later in iwl_rx_reply_rx() for REPLY_RX_MPDU_CMD. */
688void iwl4965_rx_reply_rx_phy(struct iwl_priv *priv,
689 struct iwl_rx_mem_buffer *rxb)
690{
691 struct iwl_rx_packet *pkt = rxb_addr(rxb);
692 priv->_4965.last_phy_res_valid = true;
693 memcpy(&priv->_4965.last_phy_res, pkt->u.raw,
694 sizeof(struct iwl_rx_phy_res));
695}
696
697static int iwl4965_get_single_channel_for_scan(struct iwl_priv *priv,
698 struct ieee80211_vif *vif,
699 enum ieee80211_band band,
700 struct iwl_scan_channel *scan_ch)
701{
702 const struct ieee80211_supported_band *sband;
703 u16 passive_dwell = 0;
704 u16 active_dwell = 0;
705 int added = 0;
706 u16 channel = 0;
707
708 sband = iwl_get_hw_mode(priv, band);
709 if (!sband) {
710 IWL_ERR(priv, "invalid band\n");
711 return added;
712 }
713
714 active_dwell = iwl_legacy_get_active_dwell_time(priv, band, 0);
715 passive_dwell = iwl_legacy_get_passive_dwell_time(priv, band, vif);
716
717 if (passive_dwell <= active_dwell)
718 passive_dwell = active_dwell + 1;
719
720 channel = iwl_legacy_get_single_channel_number(priv, band);
721 if (channel) {
722 scan_ch->channel = cpu_to_le16(channel);
723 scan_ch->type = SCAN_CHANNEL_TYPE_PASSIVE;
724 scan_ch->active_dwell = cpu_to_le16(active_dwell);
725 scan_ch->passive_dwell = cpu_to_le16(passive_dwell);
726 /* Set txpower levels to defaults */
727 scan_ch->dsp_atten = 110;
728 if (band == IEEE80211_BAND_5GHZ)
729 scan_ch->tx_gain = ((1 << 5) | (3 << 3)) | 3;
730 else
731 scan_ch->tx_gain = ((1 << 5) | (5 << 3));
732 added++;
733 } else
734 IWL_ERR(priv, "no valid channel found\n");
735 return added;
736}
737
738static int iwl4965_get_channels_for_scan(struct iwl_priv *priv,
739 struct ieee80211_vif *vif,
740 enum ieee80211_band band,
741 u8 is_active, u8 n_probes,
742 struct iwl_scan_channel *scan_ch)
743{
744 struct ieee80211_channel *chan;
745 const struct ieee80211_supported_band *sband;
746 const struct iwl_channel_info *ch_info;
747 u16 passive_dwell = 0;
748 u16 active_dwell = 0;
749 int added, i;
750 u16 channel;
751
752 sband = iwl_get_hw_mode(priv, band);
753 if (!sband)
754 return 0;
755
756 active_dwell = iwl_legacy_get_active_dwell_time(priv, band, n_probes);
757 passive_dwell = iwl_legacy_get_passive_dwell_time(priv, band, vif);
758
759 if (passive_dwell <= active_dwell)
760 passive_dwell = active_dwell + 1;
761
762 for (i = 0, added = 0; i < priv->scan_request->n_channels; i++) {
763 chan = priv->scan_request->channels[i];
764
765 if (chan->band != band)
766 continue;
767
768 channel = chan->hw_value;
769 scan_ch->channel = cpu_to_le16(channel);
770
771 ch_info = iwl_legacy_get_channel_info(priv, band, channel);
772 if (!iwl_legacy_is_channel_valid(ch_info)) {
773 IWL_DEBUG_SCAN(priv,
774 "Channel %d is INVALID for this band.\n",
775 channel);
776 continue;
777 }
778
779 if (!is_active || iwl_legacy_is_channel_passive(ch_info) ||
780 (chan->flags & IEEE80211_CHAN_PASSIVE_SCAN))
781 scan_ch->type = SCAN_CHANNEL_TYPE_PASSIVE;
782 else
783 scan_ch->type = SCAN_CHANNEL_TYPE_ACTIVE;
784
785 if (n_probes)
786 scan_ch->type |= IWL_SCAN_PROBE_MASK(n_probes);
787
788 scan_ch->active_dwell = cpu_to_le16(active_dwell);
789 scan_ch->passive_dwell = cpu_to_le16(passive_dwell);
790
791 /* Set txpower levels to defaults */
792 scan_ch->dsp_atten = 110;
793
794 /* NOTE: if we were doing 6Mb OFDM for scans we'd use
795 * power level:
796 * scan_ch->tx_gain = ((1 << 5) | (2 << 3)) | 3;
797 */
798 if (band == IEEE80211_BAND_5GHZ)
799 scan_ch->tx_gain = ((1 << 5) | (3 << 3)) | 3;
800 else
801 scan_ch->tx_gain = ((1 << 5) | (5 << 3));
802
803 IWL_DEBUG_SCAN(priv, "Scanning ch=%d prob=0x%X [%s %d]\n",
804 channel, le32_to_cpu(scan_ch->type),
805 (scan_ch->type & SCAN_CHANNEL_TYPE_ACTIVE) ?
806 "ACTIVE" : "PASSIVE",
807 (scan_ch->type & SCAN_CHANNEL_TYPE_ACTIVE) ?
808 active_dwell : passive_dwell);
809
810 scan_ch++;
811 added++;
812 }
813
814 IWL_DEBUG_SCAN(priv, "total channels to scan %d\n", added);
815 return added;
816}
817
818int iwl4965_request_scan(struct iwl_priv *priv, struct ieee80211_vif *vif)
819{
820 struct iwl_host_cmd cmd = {
821 .id = REPLY_SCAN_CMD,
822 .len = sizeof(struct iwl_scan_cmd),
823 .flags = CMD_SIZE_HUGE,
824 };
825 struct iwl_scan_cmd *scan;
826 struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
827 u32 rate_flags = 0;
828 u16 cmd_len;
829 u16 rx_chain = 0;
830 enum ieee80211_band band;
831 u8 n_probes = 0;
832 u8 rx_ant = priv->hw_params.valid_rx_ant;
833 u8 rate;
834 bool is_active = false;
835 int chan_mod;
836 u8 active_chains;
837 u8 scan_tx_antennas = priv->hw_params.valid_tx_ant;
838 int ret;
839
840 lockdep_assert_held(&priv->mutex);
841
842 if (vif)
843 ctx = iwl_legacy_rxon_ctx_from_vif(vif);
844
845 if (!priv->scan_cmd) {
846 priv->scan_cmd = kmalloc(sizeof(struct iwl_scan_cmd) +
847 IWL_MAX_SCAN_SIZE, GFP_KERNEL);
848 if (!priv->scan_cmd) {
849 IWL_DEBUG_SCAN(priv,
850 "fail to allocate memory for scan\n");
851 return -ENOMEM;
852 }
853 }
854 scan = priv->scan_cmd;
855 memset(scan, 0, sizeof(struct iwl_scan_cmd) + IWL_MAX_SCAN_SIZE);
856
857 scan->quiet_plcp_th = IWL_PLCP_QUIET_THRESH;
858 scan->quiet_time = IWL_ACTIVE_QUIET_TIME;
859
860 if (iwl_legacy_is_any_associated(priv)) {
861 u16 interval = 0;
862 u32 extra;
863 u32 suspend_time = 100;
864 u32 scan_suspend_time = 100;
865
866 IWL_DEBUG_INFO(priv, "Scanning while associated...\n");
867 if (priv->is_internal_short_scan)
868 interval = 0;
869 else
870 interval = vif->bss_conf.beacon_int;
871
872 scan->suspend_time = 0;
873 scan->max_out_time = cpu_to_le32(200 * 1024);
874 if (!interval)
875 interval = suspend_time;
876
877 extra = (suspend_time / interval) << 22;
878 scan_suspend_time = (extra |
879 ((suspend_time % interval) * 1024));
880 scan->suspend_time = cpu_to_le32(scan_suspend_time);
881 IWL_DEBUG_SCAN(priv, "suspend_time 0x%X beacon interval %d\n",
882 scan_suspend_time, interval);
883 }
884
885 if (priv->is_internal_short_scan) {
886 IWL_DEBUG_SCAN(priv, "Start internal passive scan.\n");
887 } else if (priv->scan_request->n_ssids) {
888 int i, p = 0;
889 IWL_DEBUG_SCAN(priv, "Kicking off active scan\n");
890 for (i = 0; i < priv->scan_request->n_ssids; i++) {
891 /* always does wildcard anyway */
892 if (!priv->scan_request->ssids[i].ssid_len)
893 continue;
894 scan->direct_scan[p].id = WLAN_EID_SSID;
895 scan->direct_scan[p].len =
896 priv->scan_request->ssids[i].ssid_len;
897 memcpy(scan->direct_scan[p].ssid,
898 priv->scan_request->ssids[i].ssid,
899 priv->scan_request->ssids[i].ssid_len);
900 n_probes++;
901 p++;
902 }
903 is_active = true;
904 } else
905 IWL_DEBUG_SCAN(priv, "Start passive scan.\n");
906
907 scan->tx_cmd.tx_flags = TX_CMD_FLG_SEQ_CTL_MSK;
908 scan->tx_cmd.sta_id = ctx->bcast_sta_id;
909 scan->tx_cmd.stop_time.life_time = TX_CMD_LIFE_TIME_INFINITE;
910
911 switch (priv->scan_band) {
912 case IEEE80211_BAND_2GHZ:
913 scan->flags = RXON_FLG_BAND_24G_MSK | RXON_FLG_AUTO_DETECT_MSK;
914 chan_mod = le32_to_cpu(
915 priv->contexts[IWL_RXON_CTX_BSS].active.flags &
916 RXON_FLG_CHANNEL_MODE_MSK)
917 >> RXON_FLG_CHANNEL_MODE_POS;
918 if (chan_mod == CHANNEL_MODE_PURE_40) {
919 rate = IWL_RATE_6M_PLCP;
920 } else {
921 rate = IWL_RATE_1M_PLCP;
922 rate_flags = RATE_MCS_CCK_MSK;
923 }
924 break;
925 case IEEE80211_BAND_5GHZ:
926 rate = IWL_RATE_6M_PLCP;
927 break;
928 default:
929 IWL_WARN(priv, "Invalid scan band\n");
930 return -EIO;
931 }
932
933 /*
934 * If active scanning is requested but a certain channel is
935 * marked passive, we can do active scanning if we detect
936 * transmissions.
937 *
938 * There is an issue with some firmware versions that triggers
939 * a sysassert on a "good CRC threshold" of zero (== disabled),
940 * on a radar channel even though this means that we should NOT
941 * send probes.
942 *
943 * The "good CRC threshold" is the number of frames that we
944 * need to receive during our dwell time on a channel before
945 * sending out probes -- setting this to a huge value will
946 * mean we never reach it, but at the same time work around
947 * the aforementioned issue. Thus use IWL_GOOD_CRC_TH_NEVER
948 * here instead of IWL_GOOD_CRC_TH_DISABLED.
949 */
950 scan->good_CRC_th = is_active ? IWL_GOOD_CRC_TH_DEFAULT :
951 IWL_GOOD_CRC_TH_NEVER;
952
953 band = priv->scan_band;
954
955 if (priv->cfg->scan_rx_antennas[band])
956 rx_ant = priv->cfg->scan_rx_antennas[band];
957
958 if (priv->cfg->scan_tx_antennas[band])
959 scan_tx_antennas = priv->cfg->scan_tx_antennas[band];
960
961 priv->scan_tx_ant[band] = iwl4965_toggle_tx_ant(priv,
962 priv->scan_tx_ant[band],
963 scan_tx_antennas);
964 rate_flags |= iwl4965_ant_idx_to_flags(priv->scan_tx_ant[band]);
965 scan->tx_cmd.rate_n_flags = iwl4965_hw_set_rate_n_flags(rate, rate_flags);
966
967 /* In power save mode use one chain, otherwise use all chains */
968 if (test_bit(STATUS_POWER_PMI, &priv->status)) {
969 /* rx_ant has been set to all valid chains previously */
970 active_chains = rx_ant &
971 ((u8)(priv->chain_noise_data.active_chains));
972 if (!active_chains)
973 active_chains = rx_ant;
974
975 IWL_DEBUG_SCAN(priv, "chain_noise_data.active_chains: %u\n",
976 priv->chain_noise_data.active_chains);
977
978 rx_ant = iwl4965_first_antenna(active_chains);
979 }
980
981 /* MIMO is not used here, but value is required */
982 rx_chain |= priv->hw_params.valid_rx_ant << RXON_RX_CHAIN_VALID_POS;
983 rx_chain |= rx_ant << RXON_RX_CHAIN_FORCE_MIMO_SEL_POS;
984 rx_chain |= rx_ant << RXON_RX_CHAIN_FORCE_SEL_POS;
985 rx_chain |= 0x1 << RXON_RX_CHAIN_DRIVER_FORCE_POS;
986 scan->rx_chain = cpu_to_le16(rx_chain);
987 if (!priv->is_internal_short_scan) {
988 cmd_len = iwl_legacy_fill_probe_req(priv,
989 (struct ieee80211_mgmt *)scan->data,
990 vif->addr,
991 priv->scan_request->ie,
992 priv->scan_request->ie_len,
993 IWL_MAX_SCAN_SIZE - sizeof(*scan));
994 } else {
995 /* use bcast addr, will not be transmitted but must be valid */
996 cmd_len = iwl_legacy_fill_probe_req(priv,
997 (struct ieee80211_mgmt *)scan->data,
998 iwl_bcast_addr, NULL, 0,
999 IWL_MAX_SCAN_SIZE - sizeof(*scan));
1000
1001 }
1002 scan->tx_cmd.len = cpu_to_le16(cmd_len);
1003
1004 scan->filter_flags |= (RXON_FILTER_ACCEPT_GRP_MSK |
1005 RXON_FILTER_BCON_AWARE_MSK);
1006
1007 if (priv->is_internal_short_scan) {
1008 scan->channel_count =
1009 iwl4965_get_single_channel_for_scan(priv, vif, band,
1010 (void *)&scan->data[le16_to_cpu(
1011 scan->tx_cmd.len)]);
1012 } else {
1013 scan->channel_count =
1014 iwl4965_get_channels_for_scan(priv, vif, band,
1015 is_active, n_probes,
1016 (void *)&scan->data[le16_to_cpu(
1017 scan->tx_cmd.len)]);
1018 }
1019 if (scan->channel_count == 0) {
1020 IWL_DEBUG_SCAN(priv, "channel count %d\n", scan->channel_count);
1021 return -EIO;
1022 }
1023
1024 cmd.len += le16_to_cpu(scan->tx_cmd.len) +
1025 scan->channel_count * sizeof(struct iwl_scan_channel);
1026 cmd.data = scan;
1027 scan->len = cpu_to_le16(cmd.len);
1028
1029 set_bit(STATUS_SCAN_HW, &priv->status);
1030
1031 ret = iwl_legacy_send_cmd_sync(priv, &cmd);
1032 if (ret)
1033 clear_bit(STATUS_SCAN_HW, &priv->status);
1034
1035 return ret;
1036}
1037
1038int iwl4965_manage_ibss_station(struct iwl_priv *priv,
1039 struct ieee80211_vif *vif, bool add)
1040{
1041 struct iwl_vif_priv *vif_priv = (void *)vif->drv_priv;
1042
1043 if (add)
1044 return iwl4965_add_bssid_station(priv, vif_priv->ctx,
1045 vif->bss_conf.bssid,
1046 &vif_priv->ibss_bssid_sta_id);
1047 return iwl_legacy_remove_station(priv, vif_priv->ibss_bssid_sta_id,
1048 vif->bss_conf.bssid);
1049}
1050
1051void iwl4965_free_tfds_in_queue(struct iwl_priv *priv,
1052 int sta_id, int tid, int freed)
1053{
1054 lockdep_assert_held(&priv->sta_lock);
1055
1056 if (priv->stations[sta_id].tid[tid].tfds_in_queue >= freed)
1057 priv->stations[sta_id].tid[tid].tfds_in_queue -= freed;
1058 else {
1059 IWL_DEBUG_TX(priv, "free more than tfds_in_queue (%u:%d)\n",
1060 priv->stations[sta_id].tid[tid].tfds_in_queue,
1061 freed);
1062 priv->stations[sta_id].tid[tid].tfds_in_queue = 0;
1063 }
1064}
1065
1066#define IWL_TX_QUEUE_MSK 0xfffff
1067
1068static bool iwl4965_is_single_rx_stream(struct iwl_priv *priv)
1069{
1070 return priv->current_ht_config.smps == IEEE80211_SMPS_STATIC ||
1071 priv->current_ht_config.single_chain_sufficient;
1072}
1073
1074#define IWL_NUM_RX_CHAINS_MULTIPLE 3
1075#define IWL_NUM_RX_CHAINS_SINGLE 2
1076#define IWL_NUM_IDLE_CHAINS_DUAL 2
1077#define IWL_NUM_IDLE_CHAINS_SINGLE 1
1078
1079/*
1080 * Determine how many receiver/antenna chains to use.
1081 *
1082 * More provides better reception via diversity. Fewer saves power
1083 * at the expense of throughput, but only when not in powersave to
1084 * start with.
1085 *
1086 * MIMO (dual stream) requires at least 2, but works better with 3.
1087 * This does not determine *which* chains to use, just how many.
1088 */
1089static int iwl4965_get_active_rx_chain_count(struct iwl_priv *priv)
1090{
1091 /* # of Rx chains to use when expecting MIMO. */
1092 if (iwl4965_is_single_rx_stream(priv))
1093 return IWL_NUM_RX_CHAINS_SINGLE;
1094 else
1095 return IWL_NUM_RX_CHAINS_MULTIPLE;
1096}
1097
1098/*
1099 * When we are in power saving mode, unless device support spatial
1100 * multiplexing power save, use the active count for rx chain count.
1101 */
1102static int
1103iwl4965_get_idle_rx_chain_count(struct iwl_priv *priv, int active_cnt)
1104{
1105 /* # Rx chains when idling, depending on SMPS mode */
1106 switch (priv->current_ht_config.smps) {
1107 case IEEE80211_SMPS_STATIC:
1108 case IEEE80211_SMPS_DYNAMIC:
1109 return IWL_NUM_IDLE_CHAINS_SINGLE;
1110 case IEEE80211_SMPS_OFF:
1111 return active_cnt;
1112 default:
1113 WARN(1, "invalid SMPS mode %d",
1114 priv->current_ht_config.smps);
1115 return active_cnt;
1116 }
1117}
1118
1119/* up to 4 chains */
1120static u8 iwl4965_count_chain_bitmap(u32 chain_bitmap)
1121{
1122 u8 res;
1123 res = (chain_bitmap & BIT(0)) >> 0;
1124 res += (chain_bitmap & BIT(1)) >> 1;
1125 res += (chain_bitmap & BIT(2)) >> 2;
1126 res += (chain_bitmap & BIT(3)) >> 3;
1127 return res;
1128}
1129
1130/**
1131 * iwl4965_set_rxon_chain - Set up Rx chain usage in "staging" RXON image
1132 *
1133 * Selects how many and which Rx receivers/antennas/chains to use.
1134 * This should not be used for scan command ... it puts data in wrong place.
1135 */
1136void iwl4965_set_rxon_chain(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
1137{
1138 bool is_single = iwl4965_is_single_rx_stream(priv);
1139 bool is_cam = !test_bit(STATUS_POWER_PMI, &priv->status);
1140 u8 idle_rx_cnt, active_rx_cnt, valid_rx_cnt;
1141 u32 active_chains;
1142 u16 rx_chain;
1143
1144 /* Tell uCode which antennas are actually connected.
1145 * Before first association, we assume all antennas are connected.
1146 * Just after first association, iwl4965_chain_noise_calibration()
1147 * checks which antennas actually *are* connected. */
1148 if (priv->chain_noise_data.active_chains)
1149 active_chains = priv->chain_noise_data.active_chains;
1150 else
1151 active_chains = priv->hw_params.valid_rx_ant;
1152
1153 rx_chain = active_chains << RXON_RX_CHAIN_VALID_POS;
1154
1155 /* How many receivers should we use? */
1156 active_rx_cnt = iwl4965_get_active_rx_chain_count(priv);
1157 idle_rx_cnt = iwl4965_get_idle_rx_chain_count(priv, active_rx_cnt);
1158
1159
1160 /* correct rx chain count according hw settings
1161 * and chain noise calibration
1162 */
1163 valid_rx_cnt = iwl4965_count_chain_bitmap(active_chains);
1164 if (valid_rx_cnt < active_rx_cnt)
1165 active_rx_cnt = valid_rx_cnt;
1166
1167 if (valid_rx_cnt < idle_rx_cnt)
1168 idle_rx_cnt = valid_rx_cnt;
1169
1170 rx_chain |= active_rx_cnt << RXON_RX_CHAIN_MIMO_CNT_POS;
1171 rx_chain |= idle_rx_cnt << RXON_RX_CHAIN_CNT_POS;
1172
1173 ctx->staging.rx_chain = cpu_to_le16(rx_chain);
1174
1175 if (!is_single && (active_rx_cnt >= IWL_NUM_RX_CHAINS_SINGLE) && is_cam)
1176 ctx->staging.rx_chain |= RXON_RX_CHAIN_MIMO_FORCE_MSK;
1177 else
1178 ctx->staging.rx_chain &= ~RXON_RX_CHAIN_MIMO_FORCE_MSK;
1179
1180 IWL_DEBUG_ASSOC(priv, "rx_chain=0x%X active=%d idle=%d\n",
1181 ctx->staging.rx_chain,
1182 active_rx_cnt, idle_rx_cnt);
1183
1184 WARN_ON(active_rx_cnt == 0 || idle_rx_cnt == 0 ||
1185 active_rx_cnt < idle_rx_cnt);
1186}
1187
1188u8 iwl4965_toggle_tx_ant(struct iwl_priv *priv, u8 ant, u8 valid)
1189{
1190 int i;
1191 u8 ind = ant;
1192
1193 for (i = 0; i < RATE_ANT_NUM - 1; i++) {
1194 ind = (ind + 1) < RATE_ANT_NUM ? ind + 1 : 0;
1195 if (valid & BIT(ind))
1196 return ind;
1197 }
1198 return ant;
1199}
1200
1201static const char *iwl4965_get_fh_string(int cmd)
1202{
1203 switch (cmd) {
1204 IWL_CMD(FH_RSCSR_CHNL0_STTS_WPTR_REG);
1205 IWL_CMD(FH_RSCSR_CHNL0_RBDCB_BASE_REG);
1206 IWL_CMD(FH_RSCSR_CHNL0_WPTR);
1207 IWL_CMD(FH_MEM_RCSR_CHNL0_CONFIG_REG);
1208 IWL_CMD(FH_MEM_RSSR_SHARED_CTRL_REG);
1209 IWL_CMD(FH_MEM_RSSR_RX_STATUS_REG);
1210 IWL_CMD(FH_MEM_RSSR_RX_ENABLE_ERR_IRQ2DRV);
1211 IWL_CMD(FH_TSSR_TX_STATUS_REG);
1212 IWL_CMD(FH_TSSR_TX_ERROR_REG);
1213 default:
1214 return "UNKNOWN";
1215 }
1216}
1217
1218int iwl4965_dump_fh(struct iwl_priv *priv, char **buf, bool display)
1219{
1220 int i;
1221#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
1222 int pos = 0;
1223 size_t bufsz = 0;
1224#endif
1225 static const u32 fh_tbl[] = {
1226 FH_RSCSR_CHNL0_STTS_WPTR_REG,
1227 FH_RSCSR_CHNL0_RBDCB_BASE_REG,
1228 FH_RSCSR_CHNL0_WPTR,
1229 FH_MEM_RCSR_CHNL0_CONFIG_REG,
1230 FH_MEM_RSSR_SHARED_CTRL_REG,
1231 FH_MEM_RSSR_RX_STATUS_REG,
1232 FH_MEM_RSSR_RX_ENABLE_ERR_IRQ2DRV,
1233 FH_TSSR_TX_STATUS_REG,
1234 FH_TSSR_TX_ERROR_REG
1235 };
1236#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
1237 if (display) {
1238 bufsz = ARRAY_SIZE(fh_tbl) * 48 + 40;
1239 *buf = kmalloc(bufsz, GFP_KERNEL);
1240 if (!*buf)
1241 return -ENOMEM;
1242 pos += scnprintf(*buf + pos, bufsz - pos,
1243 "FH register values:\n");
1244 for (i = 0; i < ARRAY_SIZE(fh_tbl); i++) {
1245 pos += scnprintf(*buf + pos, bufsz - pos,
1246 " %34s: 0X%08x\n",
1247 iwl4965_get_fh_string(fh_tbl[i]),
1248 iwl_legacy_read_direct32(priv, fh_tbl[i]));
1249 }
1250 return pos;
1251 }
1252#endif
1253 IWL_ERR(priv, "FH register values:\n");
1254 for (i = 0; i < ARRAY_SIZE(fh_tbl); i++) {
1255 IWL_ERR(priv, " %34s: 0X%08x\n",
1256 iwl4965_get_fh_string(fh_tbl[i]),
1257 iwl_legacy_read_direct32(priv, fh_tbl[i]));
1258 }
1259 return 0;
1260}
diff --git a/drivers/net/wireless/iwlegacy/iwl-4965-rs.c b/drivers/net/wireless/iwlegacy/iwl-4965-rs.c
new file mode 100644
index 000000000000..69abd2816f8d
--- /dev/null
+++ b/drivers/net/wireless/iwlegacy/iwl-4965-rs.c
@@ -0,0 +1,2870 @@
1/******************************************************************************
2 *
3 * Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of version 2 of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
17 *
18 * The full GNU General Public License is included in this distribution in the
19 * file called LICENSE.
20 *
21 * Contact Information:
22 * Intel Linux Wireless <ilw@linux.intel.com>
23 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
24 *
25 *****************************************************************************/
26#include <linux/kernel.h>
27#include <linux/init.h>
28#include <linux/skbuff.h>
29#include <linux/slab.h>
30#include <linux/wireless.h>
31#include <net/mac80211.h>
32
33#include <linux/netdevice.h>
34#include <linux/etherdevice.h>
35#include <linux/delay.h>
36
37#include <linux/workqueue.h>
38
39#include "iwl-dev.h"
40#include "iwl-sta.h"
41#include "iwl-core.h"
42#include "iwl-4965.h"
43
44#define IWL4965_RS_NAME "iwl-4965-rs"
45
46#define NUM_TRY_BEFORE_ANT_TOGGLE 1
47#define IWL_NUMBER_TRY 1
48#define IWL_HT_NUMBER_TRY 3
49
50#define IWL_RATE_MAX_WINDOW 62 /* # tx in history window */
51#define IWL_RATE_MIN_FAILURE_TH 6 /* min failures to calc tpt */
52#define IWL_RATE_MIN_SUCCESS_TH 8 /* min successes to calc tpt */
53
54/* max allowed rate miss before sync LQ cmd */
55#define IWL_MISSED_RATE_MAX 15
56/* max time to accum history 2 seconds */
57#define IWL_RATE_SCALE_FLUSH_INTVL (3*HZ)
58
59static u8 rs_ht_to_legacy[] = {
60 IWL_RATE_6M_INDEX, IWL_RATE_6M_INDEX,
61 IWL_RATE_6M_INDEX, IWL_RATE_6M_INDEX,
62 IWL_RATE_6M_INDEX,
63 IWL_RATE_6M_INDEX, IWL_RATE_9M_INDEX,
64 IWL_RATE_12M_INDEX, IWL_RATE_18M_INDEX,
65 IWL_RATE_24M_INDEX, IWL_RATE_36M_INDEX,
66 IWL_RATE_48M_INDEX, IWL_RATE_54M_INDEX
67};
68
69static const u8 ant_toggle_lookup[] = {
70 /*ANT_NONE -> */ ANT_NONE,
71 /*ANT_A -> */ ANT_B,
72 /*ANT_B -> */ ANT_C,
73 /*ANT_AB -> */ ANT_BC,
74 /*ANT_C -> */ ANT_A,
75 /*ANT_AC -> */ ANT_AB,
76 /*ANT_BC -> */ ANT_AC,
77 /*ANT_ABC -> */ ANT_ABC,
78};
79
80#define IWL_DECLARE_RATE_INFO(r, s, ip, in, rp, rn, pp, np) \
81 [IWL_RATE_##r##M_INDEX] = { IWL_RATE_##r##M_PLCP, \
82 IWL_RATE_SISO_##s##M_PLCP, \
83 IWL_RATE_MIMO2_##s##M_PLCP,\
84 IWL_RATE_##r##M_IEEE, \
85 IWL_RATE_##ip##M_INDEX, \
86 IWL_RATE_##in##M_INDEX, \
87 IWL_RATE_##rp##M_INDEX, \
88 IWL_RATE_##rn##M_INDEX, \
89 IWL_RATE_##pp##M_INDEX, \
90 IWL_RATE_##np##M_INDEX }
91
92/*
93 * Parameter order:
94 * rate, ht rate, prev rate, next rate, prev tgg rate, next tgg rate
95 *
96 * If there isn't a valid next or previous rate then INV is used which
97 * maps to IWL_RATE_INVALID
98 *
99 */
100const struct iwl_rate_info iwl_rates[IWL_RATE_COUNT] = {
101 IWL_DECLARE_RATE_INFO(1, INV, INV, 2, INV, 2, INV, 2), /* 1mbps */
102 IWL_DECLARE_RATE_INFO(2, INV, 1, 5, 1, 5, 1, 5), /* 2mbps */
103 IWL_DECLARE_RATE_INFO(5, INV, 2, 6, 2, 11, 2, 11), /*5.5mbps */
104 IWL_DECLARE_RATE_INFO(11, INV, 9, 12, 9, 12, 5, 18), /* 11mbps */
105 IWL_DECLARE_RATE_INFO(6, 6, 5, 9, 5, 11, 5, 11), /* 6mbps */
106 IWL_DECLARE_RATE_INFO(9, 6, 6, 11, 6, 11, 5, 11), /* 9mbps */
107 IWL_DECLARE_RATE_INFO(12, 12, 11, 18, 11, 18, 11, 18), /* 12mbps */
108 IWL_DECLARE_RATE_INFO(18, 18, 12, 24, 12, 24, 11, 24), /* 18mbps */
109 IWL_DECLARE_RATE_INFO(24, 24, 18, 36, 18, 36, 18, 36), /* 24mbps */
110 IWL_DECLARE_RATE_INFO(36, 36, 24, 48, 24, 48, 24, 48), /* 36mbps */
111 IWL_DECLARE_RATE_INFO(48, 48, 36, 54, 36, 54, 36, 54), /* 48mbps */
112 IWL_DECLARE_RATE_INFO(54, 54, 48, INV, 48, INV, 48, INV),/* 54mbps */
113 IWL_DECLARE_RATE_INFO(60, 60, 48, INV, 48, INV, 48, INV),/* 60mbps */
114};
115
116static int iwl4965_hwrate_to_plcp_idx(u32 rate_n_flags)
117{
118 int idx = 0;
119
120 /* HT rate format */
121 if (rate_n_flags & RATE_MCS_HT_MSK) {
122 idx = (rate_n_flags & 0xff);
123
124 if (idx >= IWL_RATE_MIMO2_6M_PLCP)
125 idx = idx - IWL_RATE_MIMO2_6M_PLCP;
126
127 idx += IWL_FIRST_OFDM_RATE;
128 /* skip 9M not supported in ht*/
129 if (idx >= IWL_RATE_9M_INDEX)
130 idx += 1;
131 if ((idx >= IWL_FIRST_OFDM_RATE) && (idx <= IWL_LAST_OFDM_RATE))
132 return idx;
133
134 /* legacy rate format, search for match in table */
135 } else {
136 for (idx = 0; idx < ARRAY_SIZE(iwl_rates); idx++)
137 if (iwl_rates[idx].plcp == (rate_n_flags & 0xFF))
138 return idx;
139 }
140
141 return -1;
142}
143
144static void iwl4965_rs_rate_scale_perform(struct iwl_priv *priv,
145 struct sk_buff *skb,
146 struct ieee80211_sta *sta,
147 struct iwl_lq_sta *lq_sta);
148static void iwl4965_rs_fill_link_cmd(struct iwl_priv *priv,
149 struct iwl_lq_sta *lq_sta, u32 rate_n_flags);
150static void iwl4965_rs_stay_in_table(struct iwl_lq_sta *lq_sta,
151 bool force_search);
152
153#ifdef CONFIG_MAC80211_DEBUGFS
154static void iwl4965_rs_dbgfs_set_mcs(struct iwl_lq_sta *lq_sta,
155 u32 *rate_n_flags, int index);
156#else
157static void iwl4965_rs_dbgfs_set_mcs(struct iwl_lq_sta *lq_sta,
158 u32 *rate_n_flags, int index)
159{}
160#endif
161
162/**
163 * The following tables contain the expected throughput metrics for all rates
164 *
165 * 1, 2, 5.5, 11, 6, 9, 12, 18, 24, 36, 48, 54, 60 MBits
166 *
167 * where invalid entries are zeros.
168 *
169 * CCK rates are only valid in legacy table and will only be used in G
170 * (2.4 GHz) band.
171 */
172
173static s32 expected_tpt_legacy[IWL_RATE_COUNT] = {
174 7, 13, 35, 58, 40, 57, 72, 98, 121, 154, 177, 186, 0
175};
176
177static s32 expected_tpt_siso20MHz[4][IWL_RATE_COUNT] = {
178 {0, 0, 0, 0, 42, 0, 76, 102, 124, 158, 183, 193, 202}, /* Norm */
179 {0, 0, 0, 0, 46, 0, 82, 110, 132, 167, 192, 202, 210}, /* SGI */
180 {0, 0, 0, 0, 48, 0, 93, 135, 176, 251, 319, 351, 381}, /* AGG */
181 {0, 0, 0, 0, 53, 0, 102, 149, 193, 275, 348, 381, 413}, /* AGG+SGI */
182};
183
184static s32 expected_tpt_siso40MHz[4][IWL_RATE_COUNT] = {
185 {0, 0, 0, 0, 77, 0, 127, 160, 184, 220, 242, 250, 257}, /* Norm */
186 {0, 0, 0, 0, 83, 0, 135, 169, 193, 229, 250, 257, 264}, /* SGI */
187 {0, 0, 0, 0, 96, 0, 182, 259, 328, 451, 553, 598, 640}, /* AGG */
188 {0, 0, 0, 0, 106, 0, 199, 282, 357, 487, 593, 640, 683}, /* AGG+SGI */
189};
190
191static s32 expected_tpt_mimo2_20MHz[4][IWL_RATE_COUNT] = {
192 {0, 0, 0, 0, 74, 0, 123, 155, 179, 213, 235, 243, 250}, /* Norm */
193 {0, 0, 0, 0, 81, 0, 131, 164, 187, 221, 242, 250, 256}, /* SGI */
194 {0, 0, 0, 0, 92, 0, 175, 250, 317, 436, 534, 578, 619}, /* AGG */
195 {0, 0, 0, 0, 102, 0, 192, 273, 344, 470, 573, 619, 660}, /* AGG+SGI*/
196};
197
198static s32 expected_tpt_mimo2_40MHz[4][IWL_RATE_COUNT] = {
199 {0, 0, 0, 0, 123, 0, 182, 214, 235, 264, 279, 285, 289}, /* Norm */
200 {0, 0, 0, 0, 131, 0, 191, 222, 242, 270, 284, 289, 293}, /* SGI */
201 {0, 0, 0, 0, 180, 0, 327, 446, 545, 708, 828, 878, 922}, /* AGG */
202 {0, 0, 0, 0, 197, 0, 355, 481, 584, 752, 872, 922, 966}, /* AGG+SGI */
203};
204
205/* mbps, mcs */
206static const struct iwl_rate_mcs_info iwl_rate_mcs[IWL_RATE_COUNT] = {
207 { "1", "BPSK DSSS"},
208 { "2", "QPSK DSSS"},
209 {"5.5", "BPSK CCK"},
210 { "11", "QPSK CCK"},
211 { "6", "BPSK 1/2"},
212 { "9", "BPSK 1/2"},
213 { "12", "QPSK 1/2"},
214 { "18", "QPSK 3/4"},
215 { "24", "16QAM 1/2"},
216 { "36", "16QAM 3/4"},
217 { "48", "64QAM 2/3"},
218 { "54", "64QAM 3/4"},
219 { "60", "64QAM 5/6"},
220};
221
222#define MCS_INDEX_PER_STREAM (8)
223
224static inline u8 iwl4965_rs_extract_rate(u32 rate_n_flags)
225{
226 return (u8)(rate_n_flags & 0xFF);
227}
228
229static void
230iwl4965_rs_rate_scale_clear_window(struct iwl_rate_scale_data *window)
231{
232 window->data = 0;
233 window->success_counter = 0;
234 window->success_ratio = IWL_INVALID_VALUE;
235 window->counter = 0;
236 window->average_tpt = IWL_INVALID_VALUE;
237 window->stamp = 0;
238}
239
240static inline u8 iwl4965_rs_is_valid_ant(u8 valid_antenna, u8 ant_type)
241{
242 return (ant_type & valid_antenna) == ant_type;
243}
244
245/*
246 * removes the old data from the statistics. All data that is older than
247 * TID_MAX_TIME_DIFF, will be deleted.
248 */
249static void
250iwl4965_rs_tl_rm_old_stats(struct iwl_traffic_load *tl, u32 curr_time)
251{
252 /* The oldest age we want to keep */
253 u32 oldest_time = curr_time - TID_MAX_TIME_DIFF;
254
255 while (tl->queue_count &&
256 (tl->time_stamp < oldest_time)) {
257 tl->total -= tl->packet_count[tl->head];
258 tl->packet_count[tl->head] = 0;
259 tl->time_stamp += TID_QUEUE_CELL_SPACING;
260 tl->queue_count--;
261 tl->head++;
262 if (tl->head >= TID_QUEUE_MAX_SIZE)
263 tl->head = 0;
264 }
265}
266
267/*
268 * increment traffic load value for tid and also remove
269 * any old values if passed the certain time period
270 */
271static u8 iwl4965_rs_tl_add_packet(struct iwl_lq_sta *lq_data,
272 struct ieee80211_hdr *hdr)
273{
274 u32 curr_time = jiffies_to_msecs(jiffies);
275 u32 time_diff;
276 s32 index;
277 struct iwl_traffic_load *tl = NULL;
278 u8 tid;
279
280 if (ieee80211_is_data_qos(hdr->frame_control)) {
281 u8 *qc = ieee80211_get_qos_ctl(hdr);
282 tid = qc[0] & 0xf;
283 } else
284 return MAX_TID_COUNT;
285
286 if (unlikely(tid >= TID_MAX_LOAD_COUNT))
287 return MAX_TID_COUNT;
288
289 tl = &lq_data->load[tid];
290
291 curr_time -= curr_time % TID_ROUND_VALUE;
292
293 /* Happens only for the first packet. Initialize the data */
294 if (!(tl->queue_count)) {
295 tl->total = 1;
296 tl->time_stamp = curr_time;
297 tl->queue_count = 1;
298 tl->head = 0;
299 tl->packet_count[0] = 1;
300 return MAX_TID_COUNT;
301 }
302
303 time_diff = TIME_WRAP_AROUND(tl->time_stamp, curr_time);
304 index = time_diff / TID_QUEUE_CELL_SPACING;
305
306 /* The history is too long: remove data that is older than */
307 /* TID_MAX_TIME_DIFF */
308 if (index >= TID_QUEUE_MAX_SIZE)
309 iwl4965_rs_tl_rm_old_stats(tl, curr_time);
310
311 index = (tl->head + index) % TID_QUEUE_MAX_SIZE;
312 tl->packet_count[index] = tl->packet_count[index] + 1;
313 tl->total = tl->total + 1;
314
315 if ((index + 1) > tl->queue_count)
316 tl->queue_count = index + 1;
317
318 return tid;
319}
320
321/*
322 get the traffic load value for tid
323*/
324static u32 iwl4965_rs_tl_get_load(struct iwl_lq_sta *lq_data, u8 tid)
325{
326 u32 curr_time = jiffies_to_msecs(jiffies);
327 u32 time_diff;
328 s32 index;
329 struct iwl_traffic_load *tl = NULL;
330
331 if (tid >= TID_MAX_LOAD_COUNT)
332 return 0;
333
334 tl = &(lq_data->load[tid]);
335
336 curr_time -= curr_time % TID_ROUND_VALUE;
337
338 if (!(tl->queue_count))
339 return 0;
340
341 time_diff = TIME_WRAP_AROUND(tl->time_stamp, curr_time);
342 index = time_diff / TID_QUEUE_CELL_SPACING;
343
344 /* The history is too long: remove data that is older than */
345 /* TID_MAX_TIME_DIFF */
346 if (index >= TID_QUEUE_MAX_SIZE)
347 iwl4965_rs_tl_rm_old_stats(tl, curr_time);
348
349 return tl->total;
350}
351
352static int iwl4965_rs_tl_turn_on_agg_for_tid(struct iwl_priv *priv,
353 struct iwl_lq_sta *lq_data, u8 tid,
354 struct ieee80211_sta *sta)
355{
356 int ret = -EAGAIN;
357 u32 load;
358
359 load = iwl4965_rs_tl_get_load(lq_data, tid);
360
361 if (load > IWL_AGG_LOAD_THRESHOLD) {
362 IWL_DEBUG_HT(priv, "Starting Tx agg: STA: %pM tid: %d\n",
363 sta->addr, tid);
364 ret = ieee80211_start_tx_ba_session(sta, tid, 5000);
365 if (ret == -EAGAIN) {
366 /*
367 * driver and mac80211 is out of sync
368 * this might be cause by reloading firmware
369 * stop the tx ba session here
370 */
371 IWL_ERR(priv, "Fail start Tx agg on tid: %d\n",
372 tid);
373 ieee80211_stop_tx_ba_session(sta, tid);
374 }
375 } else {
376 IWL_ERR(priv, "Aggregation not enabled for tid %d "
377 "because load = %u\n", tid, load);
378 }
379 return ret;
380}
381
382static void iwl4965_rs_tl_turn_on_agg(struct iwl_priv *priv, u8 tid,
383 struct iwl_lq_sta *lq_data,
384 struct ieee80211_sta *sta)
385{
386 if (tid < TID_MAX_LOAD_COUNT)
387 iwl4965_rs_tl_turn_on_agg_for_tid(priv, lq_data, tid, sta);
388 else
389 IWL_ERR(priv, "tid exceeds max load count: %d/%d\n",
390 tid, TID_MAX_LOAD_COUNT);
391}
392
393static inline int iwl4965_get_iwl4965_num_of_ant_from_rate(u32 rate_n_flags)
394{
395 return !!(rate_n_flags & RATE_MCS_ANT_A_MSK) +
396 !!(rate_n_flags & RATE_MCS_ANT_B_MSK) +
397 !!(rate_n_flags & RATE_MCS_ANT_C_MSK);
398}
399
400/*
401 * Static function to get the expected throughput from an iwl_scale_tbl_info
402 * that wraps a NULL pointer check
403 */
404static s32
405iwl4965_get_expected_tpt(struct iwl_scale_tbl_info *tbl, int rs_index)
406{
407 if (tbl->expected_tpt)
408 return tbl->expected_tpt[rs_index];
409 return 0;
410}
411
412/**
413 * iwl4965_rs_collect_tx_data - Update the success/failure sliding window
414 *
415 * We keep a sliding window of the last 62 packets transmitted
416 * at this rate. window->data contains the bitmask of successful
417 * packets.
418 */
419static int iwl4965_rs_collect_tx_data(struct iwl_scale_tbl_info *tbl,
420 int scale_index, int attempts, int successes)
421{
422 struct iwl_rate_scale_data *window = NULL;
423 static const u64 mask = (((u64)1) << (IWL_RATE_MAX_WINDOW - 1));
424 s32 fail_count, tpt;
425
426 if (scale_index < 0 || scale_index >= IWL_RATE_COUNT)
427 return -EINVAL;
428
429 /* Select window for current tx bit rate */
430 window = &(tbl->win[scale_index]);
431
432 /* Get expected throughput */
433 tpt = iwl4965_get_expected_tpt(tbl, scale_index);
434
435 /*
436 * Keep track of only the latest 62 tx frame attempts in this rate's
437 * history window; anything older isn't really relevant any more.
438 * If we have filled up the sliding window, drop the oldest attempt;
439 * if the oldest attempt (highest bit in bitmap) shows "success",
440 * subtract "1" from the success counter (this is the main reason
441 * we keep these bitmaps!).
442 */
443 while (attempts > 0) {
444 if (window->counter >= IWL_RATE_MAX_WINDOW) {
445
446 /* remove earliest */
447 window->counter = IWL_RATE_MAX_WINDOW - 1;
448
449 if (window->data & mask) {
450 window->data &= ~mask;
451 window->success_counter--;
452 }
453 }
454
455 /* Increment frames-attempted counter */
456 window->counter++;
457
458 /* Shift bitmap by one frame to throw away oldest history */
459 window->data <<= 1;
460
461 /* Mark the most recent #successes attempts as successful */
462 if (successes > 0) {
463 window->success_counter++;
464 window->data |= 0x1;
465 successes--;
466 }
467
468 attempts--;
469 }
470
471 /* Calculate current success ratio, avoid divide-by-0! */
472 if (window->counter > 0)
473 window->success_ratio = 128 * (100 * window->success_counter)
474 / window->counter;
475 else
476 window->success_ratio = IWL_INVALID_VALUE;
477
478 fail_count = window->counter - window->success_counter;
479
480 /* Calculate average throughput, if we have enough history. */
481 if ((fail_count >= IWL_RATE_MIN_FAILURE_TH) ||
482 (window->success_counter >= IWL_RATE_MIN_SUCCESS_TH))
483 window->average_tpt = (window->success_ratio * tpt + 64) / 128;
484 else
485 window->average_tpt = IWL_INVALID_VALUE;
486
487 /* Tag this window as having been updated */
488 window->stamp = jiffies;
489
490 return 0;
491}
492
493/*
494 * Fill uCode API rate_n_flags field, based on "search" or "active" table.
495 */
496static u32 iwl4965_rate_n_flags_from_tbl(struct iwl_priv *priv,
497 struct iwl_scale_tbl_info *tbl,
498 int index, u8 use_green)
499{
500 u32 rate_n_flags = 0;
501
502 if (is_legacy(tbl->lq_type)) {
503 rate_n_flags = iwl_rates[index].plcp;
504 if (index >= IWL_FIRST_CCK_RATE && index <= IWL_LAST_CCK_RATE)
505 rate_n_flags |= RATE_MCS_CCK_MSK;
506
507 } else if (is_Ht(tbl->lq_type)) {
508 if (index > IWL_LAST_OFDM_RATE) {
509 IWL_ERR(priv, "Invalid HT rate index %d\n", index);
510 index = IWL_LAST_OFDM_RATE;
511 }
512 rate_n_flags = RATE_MCS_HT_MSK;
513
514 if (is_siso(tbl->lq_type))
515 rate_n_flags |= iwl_rates[index].plcp_siso;
516 else
517 rate_n_flags |= iwl_rates[index].plcp_mimo2;
518 } else {
519 IWL_ERR(priv, "Invalid tbl->lq_type %d\n", tbl->lq_type);
520 }
521
522 rate_n_flags |= ((tbl->ant_type << RATE_MCS_ANT_POS) &
523 RATE_MCS_ANT_ABC_MSK);
524
525 if (is_Ht(tbl->lq_type)) {
526 if (tbl->is_ht40) {
527 if (tbl->is_dup)
528 rate_n_flags |= RATE_MCS_DUP_MSK;
529 else
530 rate_n_flags |= RATE_MCS_HT40_MSK;
531 }
532 if (tbl->is_SGI)
533 rate_n_flags |= RATE_MCS_SGI_MSK;
534
535 if (use_green) {
536 rate_n_flags |= RATE_MCS_GF_MSK;
537 if (is_siso(tbl->lq_type) && tbl->is_SGI) {
538 rate_n_flags &= ~RATE_MCS_SGI_MSK;
539 IWL_ERR(priv, "GF was set with SGI:SISO\n");
540 }
541 }
542 }
543 return rate_n_flags;
544}
545
546/*
547 * Interpret uCode API's rate_n_flags format,
548 * fill "search" or "active" tx mode table.
549 */
550static int iwl4965_rs_get_tbl_info_from_mcs(const u32 rate_n_flags,
551 enum ieee80211_band band,
552 struct iwl_scale_tbl_info *tbl,
553 int *rate_idx)
554{
555 u32 ant_msk = (rate_n_flags & RATE_MCS_ANT_ABC_MSK);
556 u8 iwl4965_num_of_ant = iwl4965_get_iwl4965_num_of_ant_from_rate(rate_n_flags);
557 u8 mcs;
558
559 memset(tbl, 0, sizeof(struct iwl_scale_tbl_info));
560 *rate_idx = iwl4965_hwrate_to_plcp_idx(rate_n_flags);
561
562 if (*rate_idx == IWL_RATE_INVALID) {
563 *rate_idx = -1;
564 return -EINVAL;
565 }
566 tbl->is_SGI = 0; /* default legacy setup */
567 tbl->is_ht40 = 0;
568 tbl->is_dup = 0;
569 tbl->ant_type = (ant_msk >> RATE_MCS_ANT_POS);
570 tbl->lq_type = LQ_NONE;
571 tbl->max_search = IWL_MAX_SEARCH;
572
573 /* legacy rate format */
574 if (!(rate_n_flags & RATE_MCS_HT_MSK)) {
575 if (iwl4965_num_of_ant == 1) {
576 if (band == IEEE80211_BAND_5GHZ)
577 tbl->lq_type = LQ_A;
578 else
579 tbl->lq_type = LQ_G;
580 }
581 /* HT rate format */
582 } else {
583 if (rate_n_flags & RATE_MCS_SGI_MSK)
584 tbl->is_SGI = 1;
585
586 if ((rate_n_flags & RATE_MCS_HT40_MSK) ||
587 (rate_n_flags & RATE_MCS_DUP_MSK))
588 tbl->is_ht40 = 1;
589
590 if (rate_n_flags & RATE_MCS_DUP_MSK)
591 tbl->is_dup = 1;
592
593 mcs = iwl4965_rs_extract_rate(rate_n_flags);
594
595 /* SISO */
596 if (mcs <= IWL_RATE_SISO_60M_PLCP) {
597 if (iwl4965_num_of_ant == 1)
598 tbl->lq_type = LQ_SISO; /*else NONE*/
599 /* MIMO2 */
600 } else {
601 if (iwl4965_num_of_ant == 2)
602 tbl->lq_type = LQ_MIMO2;
603 }
604 }
605 return 0;
606}
607
608/* switch to another antenna/antennas and return 1 */
609/* if no other valid antenna found, return 0 */
610static int iwl4965_rs_toggle_antenna(u32 valid_ant, u32 *rate_n_flags,
611 struct iwl_scale_tbl_info *tbl)
612{
613 u8 new_ant_type;
614
615 if (!tbl->ant_type || tbl->ant_type > ANT_ABC)
616 return 0;
617
618 if (!iwl4965_rs_is_valid_ant(valid_ant, tbl->ant_type))
619 return 0;
620
621 new_ant_type = ant_toggle_lookup[tbl->ant_type];
622
623 while ((new_ant_type != tbl->ant_type) &&
624 !iwl4965_rs_is_valid_ant(valid_ant, new_ant_type))
625 new_ant_type = ant_toggle_lookup[new_ant_type];
626
627 if (new_ant_type == tbl->ant_type)
628 return 0;
629
630 tbl->ant_type = new_ant_type;
631 *rate_n_flags &= ~RATE_MCS_ANT_ABC_MSK;
632 *rate_n_flags |= new_ant_type << RATE_MCS_ANT_POS;
633 return 1;
634}
635
636/**
637 * Green-field mode is valid if the station supports it and
638 * there are no non-GF stations present in the BSS.
639 */
640static bool iwl4965_rs_use_green(struct ieee80211_sta *sta)
641{
642 struct iwl_station_priv *sta_priv = (void *)sta->drv_priv;
643 struct iwl_rxon_context *ctx = sta_priv->common.ctx;
644
645 return (sta->ht_cap.cap & IEEE80211_HT_CAP_GRN_FLD) &&
646 !(ctx->ht.non_gf_sta_present);
647}
648
649/**
650 * iwl4965_rs_get_supported_rates - get the available rates
651 *
652 * if management frame or broadcast frame only return
653 * basic available rates.
654 *
655 */
656static u16 iwl4965_rs_get_supported_rates(struct iwl_lq_sta *lq_sta,
657 struct ieee80211_hdr *hdr,
658 enum iwl_table_type rate_type)
659{
660 if (is_legacy(rate_type)) {
661 return lq_sta->active_legacy_rate;
662 } else {
663 if (is_siso(rate_type))
664 return lq_sta->active_siso_rate;
665 else
666 return lq_sta->active_mimo2_rate;
667 }
668}
669
670static u16
671iwl4965_rs_get_adjacent_rate(struct iwl_priv *priv, u8 index, u16 rate_mask,
672 int rate_type)
673{
674 u8 high = IWL_RATE_INVALID;
675 u8 low = IWL_RATE_INVALID;
676
677 /* 802.11A or ht walks to the next literal adjacent rate in
678 * the rate table */
679 if (is_a_band(rate_type) || !is_legacy(rate_type)) {
680 int i;
681 u32 mask;
682
683 /* Find the previous rate that is in the rate mask */
684 i = index - 1;
685 for (mask = (1 << i); i >= 0; i--, mask >>= 1) {
686 if (rate_mask & mask) {
687 low = i;
688 break;
689 }
690 }
691
692 /* Find the next rate that is in the rate mask */
693 i = index + 1;
694 for (mask = (1 << i); i < IWL_RATE_COUNT; i++, mask <<= 1) {
695 if (rate_mask & mask) {
696 high = i;
697 break;
698 }
699 }
700
701 return (high << 8) | low;
702 }
703
704 low = index;
705 while (low != IWL_RATE_INVALID) {
706 low = iwl_rates[low].prev_rs;
707 if (low == IWL_RATE_INVALID)
708 break;
709 if (rate_mask & (1 << low))
710 break;
711 IWL_DEBUG_RATE(priv, "Skipping masked lower rate: %d\n", low);
712 }
713
714 high = index;
715 while (high != IWL_RATE_INVALID) {
716 high = iwl_rates[high].next_rs;
717 if (high == IWL_RATE_INVALID)
718 break;
719 if (rate_mask & (1 << high))
720 break;
721 IWL_DEBUG_RATE(priv, "Skipping masked higher rate: %d\n", high);
722 }
723
724 return (high << 8) | low;
725}
726
727static u32 iwl4965_rs_get_lower_rate(struct iwl_lq_sta *lq_sta,
728 struct iwl_scale_tbl_info *tbl,
729 u8 scale_index, u8 ht_possible)
730{
731 s32 low;
732 u16 rate_mask;
733 u16 high_low;
734 u8 switch_to_legacy = 0;
735 u8 is_green = lq_sta->is_green;
736 struct iwl_priv *priv = lq_sta->drv;
737
738 /* check if we need to switch from HT to legacy rates.
739 * assumption is that mandatory rates (1Mbps or 6Mbps)
740 * are always supported (spec demand) */
741 if (!is_legacy(tbl->lq_type) && (!ht_possible || !scale_index)) {
742 switch_to_legacy = 1;
743 scale_index = rs_ht_to_legacy[scale_index];
744 if (lq_sta->band == IEEE80211_BAND_5GHZ)
745 tbl->lq_type = LQ_A;
746 else
747 tbl->lq_type = LQ_G;
748
749 if (iwl4965_num_of_ant(tbl->ant_type) > 1)
750 tbl->ant_type =
751 iwl4965_first_antenna(priv->hw_params.valid_tx_ant);
752
753 tbl->is_ht40 = 0;
754 tbl->is_SGI = 0;
755 tbl->max_search = IWL_MAX_SEARCH;
756 }
757
758 rate_mask = iwl4965_rs_get_supported_rates(lq_sta, NULL, tbl->lq_type);
759
760 /* Mask with station rate restriction */
761 if (is_legacy(tbl->lq_type)) {
762 /* supp_rates has no CCK bits in A mode */
763 if (lq_sta->band == IEEE80211_BAND_5GHZ)
764 rate_mask = (u16)(rate_mask &
765 (lq_sta->supp_rates << IWL_FIRST_OFDM_RATE));
766 else
767 rate_mask = (u16)(rate_mask & lq_sta->supp_rates);
768 }
769
770 /* If we switched from HT to legacy, check current rate */
771 if (switch_to_legacy && (rate_mask & (1 << scale_index))) {
772 low = scale_index;
773 goto out;
774 }
775
776 high_low = iwl4965_rs_get_adjacent_rate(lq_sta->drv,
777 scale_index, rate_mask,
778 tbl->lq_type);
779 low = high_low & 0xff;
780
781 if (low == IWL_RATE_INVALID)
782 low = scale_index;
783
784out:
785 return iwl4965_rate_n_flags_from_tbl(lq_sta->drv, tbl, low, is_green);
786}
787
788/*
789 * Simple function to compare two rate scale table types
790 */
791static bool iwl4965_table_type_matches(struct iwl_scale_tbl_info *a,
792 struct iwl_scale_tbl_info *b)
793{
794 return (a->lq_type == b->lq_type) && (a->ant_type == b->ant_type) &&
795 (a->is_SGI == b->is_SGI);
796}
797
798/*
799 * mac80211 sends us Tx status
800 */
801static void
802iwl4965_rs_tx_status(void *priv_r, struct ieee80211_supported_band *sband,
803 struct ieee80211_sta *sta, void *priv_sta,
804 struct sk_buff *skb)
805{
806 int legacy_success;
807 int retries;
808 int rs_index, mac_index, i;
809 struct iwl_lq_sta *lq_sta = priv_sta;
810 struct iwl_link_quality_cmd *table;
811 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
812 struct iwl_priv *priv = (struct iwl_priv *)priv_r;
813 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
814 enum mac80211_rate_control_flags mac_flags;
815 u32 tx_rate;
816 struct iwl_scale_tbl_info tbl_type;
817 struct iwl_scale_tbl_info *curr_tbl, *other_tbl, *tmp_tbl;
818 struct iwl_station_priv *sta_priv = (void *)sta->drv_priv;
819 struct iwl_rxon_context *ctx = sta_priv->common.ctx;
820
821 IWL_DEBUG_RATE_LIMIT(priv,
822 "get frame ack response, update rate scale window\n");
823
824 /* Treat uninitialized rate scaling data same as non-existing. */
825 if (!lq_sta) {
826 IWL_DEBUG_RATE(priv, "Station rate scaling not created yet.\n");
827 return;
828 } else if (!lq_sta->drv) {
829 IWL_DEBUG_RATE(priv, "Rate scaling not initialized yet.\n");
830 return;
831 }
832
833 if (!ieee80211_is_data(hdr->frame_control) ||
834 info->flags & IEEE80211_TX_CTL_NO_ACK)
835 return;
836
837 /* This packet was aggregated but doesn't carry status info */
838 if ((info->flags & IEEE80211_TX_CTL_AMPDU) &&
839 !(info->flags & IEEE80211_TX_STAT_AMPDU))
840 return;
841
842 /*
843 * Ignore this Tx frame response if its initial rate doesn't match
844 * that of latest Link Quality command. There may be stragglers
845 * from a previous Link Quality command, but we're no longer interested
846 * in those; they're either from the "active" mode while we're trying
847 * to check "search" mode, or a prior "search" mode after we've moved
848 * to a new "search" mode (which might become the new "active" mode).
849 */
850 table = &lq_sta->lq;
851 tx_rate = le32_to_cpu(table->rs_table[0].rate_n_flags);
852 iwl4965_rs_get_tbl_info_from_mcs(tx_rate,
853 priv->band, &tbl_type, &rs_index);
854 if (priv->band == IEEE80211_BAND_5GHZ)
855 rs_index -= IWL_FIRST_OFDM_RATE;
856 mac_flags = info->status.rates[0].flags;
857 mac_index = info->status.rates[0].idx;
858 /* For HT packets, map MCS to PLCP */
859 if (mac_flags & IEEE80211_TX_RC_MCS) {
860 mac_index &= RATE_MCS_CODE_MSK; /* Remove # of streams */
861 if (mac_index >= (IWL_RATE_9M_INDEX - IWL_FIRST_OFDM_RATE))
862 mac_index++;
863 /*
864 * mac80211 HT index is always zero-indexed; we need to move
865 * HT OFDM rates after CCK rates in 2.4 GHz band
866 */
867 if (priv->band == IEEE80211_BAND_2GHZ)
868 mac_index += IWL_FIRST_OFDM_RATE;
869 }
870 /* Here we actually compare this rate to the latest LQ command */
871 if ((mac_index < 0) ||
872 (tbl_type.is_SGI !=
873 !!(mac_flags & IEEE80211_TX_RC_SHORT_GI)) ||
874 (tbl_type.is_ht40 !=
875 !!(mac_flags & IEEE80211_TX_RC_40_MHZ_WIDTH)) ||
876 (tbl_type.is_dup !=
877 !!(mac_flags & IEEE80211_TX_RC_DUP_DATA)) ||
878 (tbl_type.ant_type != info->antenna_sel_tx) ||
879 (!!(tx_rate & RATE_MCS_HT_MSK) !=
880 !!(mac_flags & IEEE80211_TX_RC_MCS)) ||
881 (!!(tx_rate & RATE_MCS_GF_MSK) !=
882 !!(mac_flags & IEEE80211_TX_RC_GREEN_FIELD)) ||
883 (rs_index != mac_index)) {
884 IWL_DEBUG_RATE(priv,
885 "initial rate %d does not match %d (0x%x)\n",
886 mac_index, rs_index, tx_rate);
887 /*
888 * Since rates mis-match, the last LQ command may have failed.
889 * After IWL_MISSED_RATE_MAX mis-matches, resync the uCode with
890 * ... driver.
891 */
892 lq_sta->missed_rate_counter++;
893 if (lq_sta->missed_rate_counter > IWL_MISSED_RATE_MAX) {
894 lq_sta->missed_rate_counter = 0;
895 iwl_legacy_send_lq_cmd(priv, ctx, &lq_sta->lq,
896 CMD_ASYNC, false);
897 }
898 /* Regardless, ignore this status info for outdated rate */
899 return;
900 } else
901 /* Rate did match, so reset the missed_rate_counter */
902 lq_sta->missed_rate_counter = 0;
903
904 /* Figure out if rate scale algorithm is in active or search table */
905 if (iwl4965_table_type_matches(&tbl_type,
906 &(lq_sta->lq_info[lq_sta->active_tbl]))) {
907 curr_tbl = &(lq_sta->lq_info[lq_sta->active_tbl]);
908 other_tbl = &(lq_sta->lq_info[1 - lq_sta->active_tbl]);
909 } else if (iwl4965_table_type_matches(&tbl_type,
910 &lq_sta->lq_info[1 - lq_sta->active_tbl])) {
911 curr_tbl = &(lq_sta->lq_info[1 - lq_sta->active_tbl]);
912 other_tbl = &(lq_sta->lq_info[lq_sta->active_tbl]);
913 } else {
914 IWL_DEBUG_RATE(priv,
915 "Neither active nor search matches tx rate\n");
916 tmp_tbl = &(lq_sta->lq_info[lq_sta->active_tbl]);
917 IWL_DEBUG_RATE(priv, "active- lq:%x, ant:%x, SGI:%d\n",
918 tmp_tbl->lq_type, tmp_tbl->ant_type, tmp_tbl->is_SGI);
919 tmp_tbl = &(lq_sta->lq_info[1 - lq_sta->active_tbl]);
920 IWL_DEBUG_RATE(priv, "search- lq:%x, ant:%x, SGI:%d\n",
921 tmp_tbl->lq_type, tmp_tbl->ant_type, tmp_tbl->is_SGI);
922 IWL_DEBUG_RATE(priv, "actual- lq:%x, ant:%x, SGI:%d\n",
923 tbl_type.lq_type, tbl_type.ant_type, tbl_type.is_SGI);
924 /*
925 * no matching table found, let's by-pass the data collection
926 * and continue to perform rate scale to find the rate table
927 */
928 iwl4965_rs_stay_in_table(lq_sta, true);
929 goto done;
930 }
931
932 /*
933 * Updating the frame history depends on whether packets were
934 * aggregated.
935 *
936 * For aggregation, all packets were transmitted at the same rate, the
937 * first index into rate scale table.
938 */
939 if (info->flags & IEEE80211_TX_STAT_AMPDU) {
940 tx_rate = le32_to_cpu(table->rs_table[0].rate_n_flags);
941 iwl4965_rs_get_tbl_info_from_mcs(tx_rate, priv->band, &tbl_type,
942 &rs_index);
943 iwl4965_rs_collect_tx_data(curr_tbl, rs_index,
944 info->status.ampdu_len,
945 info->status.ampdu_ack_len);
946
947 /* Update success/fail counts if not searching for new mode */
948 if (lq_sta->stay_in_tbl) {
949 lq_sta->total_success += info->status.ampdu_ack_len;
950 lq_sta->total_failed += (info->status.ampdu_len -
951 info->status.ampdu_ack_len);
952 }
953 } else {
954 /*
955 * For legacy, update frame history with for each Tx retry.
956 */
957 retries = info->status.rates[0].count - 1;
958 /* HW doesn't send more than 15 retries */
959 retries = min(retries, 15);
960
961 /* The last transmission may have been successful */
962 legacy_success = !!(info->flags & IEEE80211_TX_STAT_ACK);
963 /* Collect data for each rate used during failed TX attempts */
964 for (i = 0; i <= retries; ++i) {
965 tx_rate = le32_to_cpu(table->rs_table[i].rate_n_flags);
966 iwl4965_rs_get_tbl_info_from_mcs(tx_rate, priv->band,
967 &tbl_type, &rs_index);
968 /*
969 * Only collect stats if retried rate is in the same RS
970 * table as active/search.
971 */
972 if (iwl4965_table_type_matches(&tbl_type, curr_tbl))
973 tmp_tbl = curr_tbl;
974 else if (iwl4965_table_type_matches(&tbl_type,
975 other_tbl))
976 tmp_tbl = other_tbl;
977 else
978 continue;
979 iwl4965_rs_collect_tx_data(tmp_tbl, rs_index, 1,
980 i < retries ? 0 : legacy_success);
981 }
982
983 /* Update success/fail counts if not searching for new mode */
984 if (lq_sta->stay_in_tbl) {
985 lq_sta->total_success += legacy_success;
986 lq_sta->total_failed += retries + (1 - legacy_success);
987 }
988 }
989 /* The last TX rate is cached in lq_sta; it's set in if/else above */
990 lq_sta->last_rate_n_flags = tx_rate;
991done:
992 /* See if there's a better rate or modulation mode to try. */
993 if (sta && sta->supp_rates[sband->band])
994 iwl4965_rs_rate_scale_perform(priv, skb, sta, lq_sta);
995}
996
997/*
998 * Begin a period of staying with a selected modulation mode.
999 * Set "stay_in_tbl" flag to prevent any mode switches.
1000 * Set frame tx success limits according to legacy vs. high-throughput,
1001 * and reset overall (spanning all rates) tx success history statistics.
1002 * These control how long we stay using same modulation mode before
1003 * searching for a new mode.
1004 */
1005static void iwl4965_rs_set_stay_in_table(struct iwl_priv *priv, u8 is_legacy,
1006 struct iwl_lq_sta *lq_sta)
1007{
1008 IWL_DEBUG_RATE(priv, "we are staying in the same table\n");
1009 lq_sta->stay_in_tbl = 1; /* only place this gets set */
1010 if (is_legacy) {
1011 lq_sta->table_count_limit = IWL_LEGACY_TABLE_COUNT;
1012 lq_sta->max_failure_limit = IWL_LEGACY_FAILURE_LIMIT;
1013 lq_sta->max_success_limit = IWL_LEGACY_SUCCESS_LIMIT;
1014 } else {
1015 lq_sta->table_count_limit = IWL_NONE_LEGACY_TABLE_COUNT;
1016 lq_sta->max_failure_limit = IWL_NONE_LEGACY_FAILURE_LIMIT;
1017 lq_sta->max_success_limit = IWL_NONE_LEGACY_SUCCESS_LIMIT;
1018 }
1019 lq_sta->table_count = 0;
1020 lq_sta->total_failed = 0;
1021 lq_sta->total_success = 0;
1022 lq_sta->flush_timer = jiffies;
1023 lq_sta->action_counter = 0;
1024}
1025
1026/*
1027 * Find correct throughput table for given mode of modulation
1028 */
1029static void iwl4965_rs_set_expected_tpt_table(struct iwl_lq_sta *lq_sta,
1030 struct iwl_scale_tbl_info *tbl)
1031{
1032 /* Used to choose among HT tables */
1033 s32 (*ht_tbl_pointer)[IWL_RATE_COUNT];
1034
1035 /* Check for invalid LQ type */
1036 if (WARN_ON_ONCE(!is_legacy(tbl->lq_type) && !is_Ht(tbl->lq_type))) {
1037 tbl->expected_tpt = expected_tpt_legacy;
1038 return;
1039 }
1040
1041 /* Legacy rates have only one table */
1042 if (is_legacy(tbl->lq_type)) {
1043 tbl->expected_tpt = expected_tpt_legacy;
1044 return;
1045 }
1046
1047 /* Choose among many HT tables depending on number of streams
1048 * (SISO/MIMO2), channel width (20/40), SGI, and aggregation
1049 * status */
1050 if (is_siso(tbl->lq_type) && (!tbl->is_ht40 || lq_sta->is_dup))
1051 ht_tbl_pointer = expected_tpt_siso20MHz;
1052 else if (is_siso(tbl->lq_type))
1053 ht_tbl_pointer = expected_tpt_siso40MHz;
1054 else if (is_mimo2(tbl->lq_type) && (!tbl->is_ht40 || lq_sta->is_dup))
1055 ht_tbl_pointer = expected_tpt_mimo2_20MHz;
1056 else /* if (is_mimo2(tbl->lq_type)) <-- must be true */
1057 ht_tbl_pointer = expected_tpt_mimo2_40MHz;
1058
1059 if (!tbl->is_SGI && !lq_sta->is_agg) /* Normal */
1060 tbl->expected_tpt = ht_tbl_pointer[0];
1061 else if (tbl->is_SGI && !lq_sta->is_agg) /* SGI */
1062 tbl->expected_tpt = ht_tbl_pointer[1];
1063 else if (!tbl->is_SGI && lq_sta->is_agg) /* AGG */
1064 tbl->expected_tpt = ht_tbl_pointer[2];
1065 else /* AGG+SGI */
1066 tbl->expected_tpt = ht_tbl_pointer[3];
1067}
1068
1069/*
1070 * Find starting rate for new "search" high-throughput mode of modulation.
1071 * Goal is to find lowest expected rate (under perfect conditions) that is
1072 * above the current measured throughput of "active" mode, to give new mode
1073 * a fair chance to prove itself without too many challenges.
1074 *
1075 * This gets called when transitioning to more aggressive modulation
1076 * (i.e. legacy to SISO or MIMO, or SISO to MIMO), as well as less aggressive
1077 * (i.e. MIMO to SISO). When moving to MIMO, bit rate will typically need
1078 * to decrease to match "active" throughput. When moving from MIMO to SISO,
1079 * bit rate will typically need to increase, but not if performance was bad.
1080 */
1081static s32 iwl4965_rs_get_best_rate(struct iwl_priv *priv,
1082 struct iwl_lq_sta *lq_sta,
1083 struct iwl_scale_tbl_info *tbl, /* "search" */
1084 u16 rate_mask, s8 index)
1085{
1086 /* "active" values */
1087 struct iwl_scale_tbl_info *active_tbl =
1088 &(lq_sta->lq_info[lq_sta->active_tbl]);
1089 s32 active_sr = active_tbl->win[index].success_ratio;
1090 s32 active_tpt = active_tbl->expected_tpt[index];
1091
1092 /* expected "search" throughput */
1093 s32 *tpt_tbl = tbl->expected_tpt;
1094
1095 s32 new_rate, high, low, start_hi;
1096 u16 high_low;
1097 s8 rate = index;
1098
1099 new_rate = high = low = start_hi = IWL_RATE_INVALID;
1100
1101 for (; ;) {
1102 high_low = iwl4965_rs_get_adjacent_rate(priv, rate, rate_mask,
1103 tbl->lq_type);
1104
1105 low = high_low & 0xff;
1106 high = (high_low >> 8) & 0xff;
1107
1108 /*
1109 * Lower the "search" bit rate, to give new "search" mode
1110 * approximately the same throughput as "active" if:
1111 *
1112 * 1) "Active" mode has been working modestly well (but not
1113 * great), and expected "search" throughput (under perfect
1114 * conditions) at candidate rate is above the actual
1115 * measured "active" throughput (but less than expected
1116 * "active" throughput under perfect conditions).
1117 * OR
1118 * 2) "Active" mode has been working perfectly or very well
1119 * and expected "search" throughput (under perfect
1120 * conditions) at candidate rate is above expected
1121 * "active" throughput (under perfect conditions).
1122 */
1123 if ((((100 * tpt_tbl[rate]) > lq_sta->last_tpt) &&
1124 ((active_sr > IWL_RATE_DECREASE_TH) &&
1125 (active_sr <= IWL_RATE_HIGH_TH) &&
1126 (tpt_tbl[rate] <= active_tpt))) ||
1127 ((active_sr >= IWL_RATE_SCALE_SWITCH) &&
1128 (tpt_tbl[rate] > active_tpt))) {
1129
1130 /* (2nd or later pass)
1131 * If we've already tried to raise the rate, and are
1132 * now trying to lower it, use the higher rate. */
1133 if (start_hi != IWL_RATE_INVALID) {
1134 new_rate = start_hi;
1135 break;
1136 }
1137
1138 new_rate = rate;
1139
1140 /* Loop again with lower rate */
1141 if (low != IWL_RATE_INVALID)
1142 rate = low;
1143
1144 /* Lower rate not available, use the original */
1145 else
1146 break;
1147
1148 /* Else try to raise the "search" rate to match "active" */
1149 } else {
1150 /* (2nd or later pass)
1151 * If we've already tried to lower the rate, and are
1152 * now trying to raise it, use the lower rate. */
1153 if (new_rate != IWL_RATE_INVALID)
1154 break;
1155
1156 /* Loop again with higher rate */
1157 else if (high != IWL_RATE_INVALID) {
1158 start_hi = high;
1159 rate = high;
1160
1161 /* Higher rate not available, use the original */
1162 } else {
1163 new_rate = rate;
1164 break;
1165 }
1166 }
1167 }
1168
1169 return new_rate;
1170}
1171
1172/*
1173 * Set up search table for MIMO2
1174 */
1175static int iwl4965_rs_switch_to_mimo2(struct iwl_priv *priv,
1176 struct iwl_lq_sta *lq_sta,
1177 struct ieee80211_conf *conf,
1178 struct ieee80211_sta *sta,
1179 struct iwl_scale_tbl_info *tbl, int index)
1180{
1181 u16 rate_mask;
1182 s32 rate;
1183 s8 is_green = lq_sta->is_green;
1184 struct iwl_station_priv *sta_priv = (void *)sta->drv_priv;
1185 struct iwl_rxon_context *ctx = sta_priv->common.ctx;
1186
1187 if (!conf_is_ht(conf) || !sta->ht_cap.ht_supported)
1188 return -1;
1189
1190 if (((sta->ht_cap.cap & IEEE80211_HT_CAP_SM_PS) >> 2)
1191 == WLAN_HT_CAP_SM_PS_STATIC)
1192 return -1;
1193
1194 /* Need both Tx chains/antennas to support MIMO */
1195 if (priv->hw_params.tx_chains_num < 2)
1196 return -1;
1197
1198 IWL_DEBUG_RATE(priv, "LQ: try to switch to MIMO2\n");
1199
1200 tbl->lq_type = LQ_MIMO2;
1201 tbl->is_dup = lq_sta->is_dup;
1202 tbl->action = 0;
1203 tbl->max_search = IWL_MAX_SEARCH;
1204 rate_mask = lq_sta->active_mimo2_rate;
1205
1206 if (iwl_legacy_is_ht40_tx_allowed(priv, ctx, &sta->ht_cap))
1207 tbl->is_ht40 = 1;
1208 else
1209 tbl->is_ht40 = 0;
1210
1211 iwl4965_rs_set_expected_tpt_table(lq_sta, tbl);
1212
1213 rate = iwl4965_rs_get_best_rate(priv, lq_sta, tbl, rate_mask, index);
1214
1215 IWL_DEBUG_RATE(priv, "LQ: MIMO2 best rate %d mask %X\n",
1216 rate, rate_mask);
1217 if ((rate == IWL_RATE_INVALID) || !((1 << rate) & rate_mask)) {
1218 IWL_DEBUG_RATE(priv,
1219 "Can't switch with index %d rate mask %x\n",
1220 rate, rate_mask);
1221 return -1;
1222 }
1223 tbl->current_rate = iwl4965_rate_n_flags_from_tbl(priv,
1224 tbl, rate, is_green);
1225
1226 IWL_DEBUG_RATE(priv, "LQ: Switch to new mcs %X index is green %X\n",
1227 tbl->current_rate, is_green);
1228 return 0;
1229}
1230
1231/*
1232 * Set up search table for SISO
1233 */
1234static int iwl4965_rs_switch_to_siso(struct iwl_priv *priv,
1235 struct iwl_lq_sta *lq_sta,
1236 struct ieee80211_conf *conf,
1237 struct ieee80211_sta *sta,
1238 struct iwl_scale_tbl_info *tbl, int index)
1239{
1240 u16 rate_mask;
1241 u8 is_green = lq_sta->is_green;
1242 s32 rate;
1243 struct iwl_station_priv *sta_priv = (void *)sta->drv_priv;
1244 struct iwl_rxon_context *ctx = sta_priv->common.ctx;
1245
1246 if (!conf_is_ht(conf) || !sta->ht_cap.ht_supported)
1247 return -1;
1248
1249 IWL_DEBUG_RATE(priv, "LQ: try to switch to SISO\n");
1250
1251 tbl->is_dup = lq_sta->is_dup;
1252 tbl->lq_type = LQ_SISO;
1253 tbl->action = 0;
1254 tbl->max_search = IWL_MAX_SEARCH;
1255 rate_mask = lq_sta->active_siso_rate;
1256
1257 if (iwl_legacy_is_ht40_tx_allowed(priv, ctx, &sta->ht_cap))
1258 tbl->is_ht40 = 1;
1259 else
1260 tbl->is_ht40 = 0;
1261
1262 if (is_green)
1263 tbl->is_SGI = 0; /*11n spec: no SGI in SISO+Greenfield*/
1264
1265 iwl4965_rs_set_expected_tpt_table(lq_sta, tbl);
1266 rate = iwl4965_rs_get_best_rate(priv, lq_sta, tbl, rate_mask, index);
1267
1268 IWL_DEBUG_RATE(priv, "LQ: get best rate %d mask %X\n", rate, rate_mask);
1269 if ((rate == IWL_RATE_INVALID) || !((1 << rate) & rate_mask)) {
1270 IWL_DEBUG_RATE(priv,
1271 "can not switch with index %d rate mask %x\n",
1272 rate, rate_mask);
1273 return -1;
1274 }
1275 tbl->current_rate = iwl4965_rate_n_flags_from_tbl(priv,
1276 tbl, rate, is_green);
1277 IWL_DEBUG_RATE(priv, "LQ: Switch to new mcs %X index is green %X\n",
1278 tbl->current_rate, is_green);
1279 return 0;
1280}
1281
1282/*
1283 * Try to switch to new modulation mode from legacy
1284 */
1285static int iwl4965_rs_move_legacy_other(struct iwl_priv *priv,
1286 struct iwl_lq_sta *lq_sta,
1287 struct ieee80211_conf *conf,
1288 struct ieee80211_sta *sta,
1289 int index)
1290{
1291 struct iwl_scale_tbl_info *tbl = &(lq_sta->lq_info[lq_sta->active_tbl]);
1292 struct iwl_scale_tbl_info *search_tbl =
1293 &(lq_sta->lq_info[(1 - lq_sta->active_tbl)]);
1294 struct iwl_rate_scale_data *window = &(tbl->win[index]);
1295 u32 sz = (sizeof(struct iwl_scale_tbl_info) -
1296 (sizeof(struct iwl_rate_scale_data) * IWL_RATE_COUNT));
1297 u8 start_action;
1298 u8 valid_tx_ant = priv->hw_params.valid_tx_ant;
1299 u8 tx_chains_num = priv->hw_params.tx_chains_num;
1300 int ret = 0;
1301 u8 update_search_tbl_counter = 0;
1302
1303 tbl->action = IWL_LEGACY_SWITCH_SISO;
1304
1305 start_action = tbl->action;
1306 for (; ;) {
1307 lq_sta->action_counter++;
1308 switch (tbl->action) {
1309 case IWL_LEGACY_SWITCH_ANTENNA1:
1310 case IWL_LEGACY_SWITCH_ANTENNA2:
1311 IWL_DEBUG_RATE(priv, "LQ: Legacy toggle Antenna\n");
1312
1313 if ((tbl->action == IWL_LEGACY_SWITCH_ANTENNA1 &&
1314 tx_chains_num <= 1) ||
1315 (tbl->action == IWL_LEGACY_SWITCH_ANTENNA2 &&
1316 tx_chains_num <= 2))
1317 break;
1318
1319 /* Don't change antenna if success has been great */
1320 if (window->success_ratio >= IWL_RS_GOOD_RATIO)
1321 break;
1322
1323 /* Set up search table to try other antenna */
1324 memcpy(search_tbl, tbl, sz);
1325
1326 if (iwl4965_rs_toggle_antenna(valid_tx_ant,
1327 &search_tbl->current_rate, search_tbl)) {
1328 update_search_tbl_counter = 1;
1329 iwl4965_rs_set_expected_tpt_table(lq_sta,
1330 search_tbl);
1331 goto out;
1332 }
1333 break;
1334 case IWL_LEGACY_SWITCH_SISO:
1335 IWL_DEBUG_RATE(priv, "LQ: Legacy switch to SISO\n");
1336
1337 /* Set up search table to try SISO */
1338 memcpy(search_tbl, tbl, sz);
1339 search_tbl->is_SGI = 0;
1340 ret = iwl4965_rs_switch_to_siso(priv, lq_sta, conf, sta,
1341 search_tbl, index);
1342 if (!ret) {
1343 lq_sta->action_counter = 0;
1344 goto out;
1345 }
1346
1347 break;
1348 case IWL_LEGACY_SWITCH_MIMO2_AB:
1349 case IWL_LEGACY_SWITCH_MIMO2_AC:
1350 case IWL_LEGACY_SWITCH_MIMO2_BC:
1351 IWL_DEBUG_RATE(priv, "LQ: Legacy switch to MIMO2\n");
1352
1353 /* Set up search table to try MIMO */
1354 memcpy(search_tbl, tbl, sz);
1355 search_tbl->is_SGI = 0;
1356
1357 if (tbl->action == IWL_LEGACY_SWITCH_MIMO2_AB)
1358 search_tbl->ant_type = ANT_AB;
1359 else if (tbl->action == IWL_LEGACY_SWITCH_MIMO2_AC)
1360 search_tbl->ant_type = ANT_AC;
1361 else
1362 search_tbl->ant_type = ANT_BC;
1363
1364 if (!iwl4965_rs_is_valid_ant(valid_tx_ant,
1365 search_tbl->ant_type))
1366 break;
1367
1368 ret = iwl4965_rs_switch_to_mimo2(priv, lq_sta,
1369 conf, sta,
1370 search_tbl, index);
1371 if (!ret) {
1372 lq_sta->action_counter = 0;
1373 goto out;
1374 }
1375 break;
1376 }
1377 tbl->action++;
1378 if (tbl->action > IWL_LEGACY_SWITCH_MIMO2_BC)
1379 tbl->action = IWL_LEGACY_SWITCH_ANTENNA1;
1380
1381 if (tbl->action == start_action)
1382 break;
1383
1384 }
1385 search_tbl->lq_type = LQ_NONE;
1386 return 0;
1387
1388out:
1389 lq_sta->search_better_tbl = 1;
1390 tbl->action++;
1391 if (tbl->action > IWL_LEGACY_SWITCH_MIMO2_BC)
1392 tbl->action = IWL_LEGACY_SWITCH_ANTENNA1;
1393 if (update_search_tbl_counter)
1394 search_tbl->action = tbl->action;
1395 return 0;
1396
1397}
1398
1399/*
1400 * Try to switch to new modulation mode from SISO
1401 */
1402static int iwl4965_rs_move_siso_to_other(struct iwl_priv *priv,
1403 struct iwl_lq_sta *lq_sta,
1404 struct ieee80211_conf *conf,
1405 struct ieee80211_sta *sta, int index)
1406{
1407 u8 is_green = lq_sta->is_green;
1408 struct iwl_scale_tbl_info *tbl = &(lq_sta->lq_info[lq_sta->active_tbl]);
1409 struct iwl_scale_tbl_info *search_tbl =
1410 &(lq_sta->lq_info[(1 - lq_sta->active_tbl)]);
1411 struct iwl_rate_scale_data *window = &(tbl->win[index]);
1412 struct ieee80211_sta_ht_cap *ht_cap = &sta->ht_cap;
1413 u32 sz = (sizeof(struct iwl_scale_tbl_info) -
1414 (sizeof(struct iwl_rate_scale_data) * IWL_RATE_COUNT));
1415 u8 start_action;
1416 u8 valid_tx_ant = priv->hw_params.valid_tx_ant;
1417 u8 tx_chains_num = priv->hw_params.tx_chains_num;
1418 u8 update_search_tbl_counter = 0;
1419 int ret;
1420
1421 start_action = tbl->action;
1422
1423 for (;;) {
1424 lq_sta->action_counter++;
1425 switch (tbl->action) {
1426 case IWL_SISO_SWITCH_ANTENNA1:
1427 case IWL_SISO_SWITCH_ANTENNA2:
1428 IWL_DEBUG_RATE(priv, "LQ: SISO toggle Antenna\n");
1429 if ((tbl->action == IWL_SISO_SWITCH_ANTENNA1 &&
1430 tx_chains_num <= 1) ||
1431 (tbl->action == IWL_SISO_SWITCH_ANTENNA2 &&
1432 tx_chains_num <= 2))
1433 break;
1434
1435 if (window->success_ratio >= IWL_RS_GOOD_RATIO)
1436 break;
1437
1438 memcpy(search_tbl, tbl, sz);
1439 if (iwl4965_rs_toggle_antenna(valid_tx_ant,
1440 &search_tbl->current_rate, search_tbl)) {
1441 update_search_tbl_counter = 1;
1442 goto out;
1443 }
1444 break;
1445 case IWL_SISO_SWITCH_MIMO2_AB:
1446 case IWL_SISO_SWITCH_MIMO2_AC:
1447 case IWL_SISO_SWITCH_MIMO2_BC:
1448 IWL_DEBUG_RATE(priv, "LQ: SISO switch to MIMO2\n");
1449 memcpy(search_tbl, tbl, sz);
1450 search_tbl->is_SGI = 0;
1451
1452 if (tbl->action == IWL_SISO_SWITCH_MIMO2_AB)
1453 search_tbl->ant_type = ANT_AB;
1454 else if (tbl->action == IWL_SISO_SWITCH_MIMO2_AC)
1455 search_tbl->ant_type = ANT_AC;
1456 else
1457 search_tbl->ant_type = ANT_BC;
1458
1459 if (!iwl4965_rs_is_valid_ant(valid_tx_ant,
1460 search_tbl->ant_type))
1461 break;
1462
1463 ret = iwl4965_rs_switch_to_mimo2(priv, lq_sta,
1464 conf, sta,
1465 search_tbl, index);
1466 if (!ret)
1467 goto out;
1468 break;
1469 case IWL_SISO_SWITCH_GI:
1470 if (!tbl->is_ht40 && !(ht_cap->cap &
1471 IEEE80211_HT_CAP_SGI_20))
1472 break;
1473 if (tbl->is_ht40 && !(ht_cap->cap &
1474 IEEE80211_HT_CAP_SGI_40))
1475 break;
1476
1477 IWL_DEBUG_RATE(priv, "LQ: SISO toggle SGI/NGI\n");
1478
1479 memcpy(search_tbl, tbl, sz);
1480 if (is_green) {
1481 if (!tbl->is_SGI)
1482 break;
1483 else
1484 IWL_ERR(priv,
1485 "SGI was set in GF+SISO\n");
1486 }
1487 search_tbl->is_SGI = !tbl->is_SGI;
1488 iwl4965_rs_set_expected_tpt_table(lq_sta, search_tbl);
1489 if (tbl->is_SGI) {
1490 s32 tpt = lq_sta->last_tpt / 100;
1491 if (tpt >= search_tbl->expected_tpt[index])
1492 break;
1493 }
1494 search_tbl->current_rate =
1495 iwl4965_rate_n_flags_from_tbl(priv, search_tbl,
1496 index, is_green);
1497 update_search_tbl_counter = 1;
1498 goto out;
1499 }
1500 tbl->action++;
1501 if (tbl->action > IWL_SISO_SWITCH_GI)
1502 tbl->action = IWL_SISO_SWITCH_ANTENNA1;
1503
1504 if (tbl->action == start_action)
1505 break;
1506 }
1507 search_tbl->lq_type = LQ_NONE;
1508 return 0;
1509
1510 out:
1511 lq_sta->search_better_tbl = 1;
1512 tbl->action++;
1513 if (tbl->action > IWL_SISO_SWITCH_GI)
1514 tbl->action = IWL_SISO_SWITCH_ANTENNA1;
1515 if (update_search_tbl_counter)
1516 search_tbl->action = tbl->action;
1517
1518 return 0;
1519}
1520
1521/*
1522 * Try to switch to new modulation mode from MIMO2
1523 */
1524static int iwl4965_rs_move_mimo2_to_other(struct iwl_priv *priv,
1525 struct iwl_lq_sta *lq_sta,
1526 struct ieee80211_conf *conf,
1527 struct ieee80211_sta *sta, int index)
1528{
1529 s8 is_green = lq_sta->is_green;
1530 struct iwl_scale_tbl_info *tbl = &(lq_sta->lq_info[lq_sta->active_tbl]);
1531 struct iwl_scale_tbl_info *search_tbl =
1532 &(lq_sta->lq_info[(1 - lq_sta->active_tbl)]);
1533 struct iwl_rate_scale_data *window = &(tbl->win[index]);
1534 struct ieee80211_sta_ht_cap *ht_cap = &sta->ht_cap;
1535 u32 sz = (sizeof(struct iwl_scale_tbl_info) -
1536 (sizeof(struct iwl_rate_scale_data) * IWL_RATE_COUNT));
1537 u8 start_action;
1538 u8 valid_tx_ant = priv->hw_params.valid_tx_ant;
1539 u8 tx_chains_num = priv->hw_params.tx_chains_num;
1540 u8 update_search_tbl_counter = 0;
1541 int ret;
1542
1543 start_action = tbl->action;
1544 for (;;) {
1545 lq_sta->action_counter++;
1546 switch (tbl->action) {
1547 case IWL_MIMO2_SWITCH_ANTENNA1:
1548 case IWL_MIMO2_SWITCH_ANTENNA2:
1549 IWL_DEBUG_RATE(priv, "LQ: MIMO2 toggle Antennas\n");
1550
1551 if (tx_chains_num <= 2)
1552 break;
1553
1554 if (window->success_ratio >= IWL_RS_GOOD_RATIO)
1555 break;
1556
1557 memcpy(search_tbl, tbl, sz);
1558 if (iwl4965_rs_toggle_antenna(valid_tx_ant,
1559 &search_tbl->current_rate, search_tbl)) {
1560 update_search_tbl_counter = 1;
1561 goto out;
1562 }
1563 break;
1564 case IWL_MIMO2_SWITCH_SISO_A:
1565 case IWL_MIMO2_SWITCH_SISO_B:
1566 case IWL_MIMO2_SWITCH_SISO_C:
1567 IWL_DEBUG_RATE(priv, "LQ: MIMO2 switch to SISO\n");
1568
1569 /* Set up new search table for SISO */
1570 memcpy(search_tbl, tbl, sz);
1571
1572 if (tbl->action == IWL_MIMO2_SWITCH_SISO_A)
1573 search_tbl->ant_type = ANT_A;
1574 else if (tbl->action == IWL_MIMO2_SWITCH_SISO_B)
1575 search_tbl->ant_type = ANT_B;
1576 else
1577 search_tbl->ant_type = ANT_C;
1578
1579 if (!iwl4965_rs_is_valid_ant(valid_tx_ant,
1580 search_tbl->ant_type))
1581 break;
1582
1583 ret = iwl4965_rs_switch_to_siso(priv, lq_sta,
1584 conf, sta,
1585 search_tbl, index);
1586 if (!ret)
1587 goto out;
1588
1589 break;
1590
1591 case IWL_MIMO2_SWITCH_GI:
1592 if (!tbl->is_ht40 && !(ht_cap->cap &
1593 IEEE80211_HT_CAP_SGI_20))
1594 break;
1595 if (tbl->is_ht40 && !(ht_cap->cap &
1596 IEEE80211_HT_CAP_SGI_40))
1597 break;
1598
1599 IWL_DEBUG_RATE(priv, "LQ: MIMO2 toggle SGI/NGI\n");
1600
1601 /* Set up new search table for MIMO2 */
1602 memcpy(search_tbl, tbl, sz);
1603 search_tbl->is_SGI = !tbl->is_SGI;
1604 iwl4965_rs_set_expected_tpt_table(lq_sta, search_tbl);
1605 /*
1606 * If active table already uses the fastest possible
1607 * modulation (dual stream with short guard interval),
1608 * and it's working well, there's no need to look
1609 * for a better type of modulation!
1610 */
1611 if (tbl->is_SGI) {
1612 s32 tpt = lq_sta->last_tpt / 100;
1613 if (tpt >= search_tbl->expected_tpt[index])
1614 break;
1615 }
1616 search_tbl->current_rate =
1617 iwl4965_rate_n_flags_from_tbl(priv, search_tbl,
1618 index, is_green);
1619 update_search_tbl_counter = 1;
1620 goto out;
1621
1622 }
1623 tbl->action++;
1624 if (tbl->action > IWL_MIMO2_SWITCH_GI)
1625 tbl->action = IWL_MIMO2_SWITCH_ANTENNA1;
1626
1627 if (tbl->action == start_action)
1628 break;
1629 }
1630 search_tbl->lq_type = LQ_NONE;
1631 return 0;
1632 out:
1633 lq_sta->search_better_tbl = 1;
1634 tbl->action++;
1635 if (tbl->action > IWL_MIMO2_SWITCH_GI)
1636 tbl->action = IWL_MIMO2_SWITCH_ANTENNA1;
1637 if (update_search_tbl_counter)
1638 search_tbl->action = tbl->action;
1639
1640 return 0;
1641
1642}
1643
1644/*
1645 * Check whether we should continue using same modulation mode, or
1646 * begin search for a new mode, based on:
1647 * 1) # tx successes or failures while using this mode
1648 * 2) # times calling this function
1649 * 3) elapsed time in this mode (not used, for now)
1650 */
1651static void
1652iwl4965_rs_stay_in_table(struct iwl_lq_sta *lq_sta, bool force_search)
1653{
1654 struct iwl_scale_tbl_info *tbl;
1655 int i;
1656 int active_tbl;
1657 int flush_interval_passed = 0;
1658 struct iwl_priv *priv;
1659
1660 priv = lq_sta->drv;
1661 active_tbl = lq_sta->active_tbl;
1662
1663 tbl = &(lq_sta->lq_info[active_tbl]);
1664
1665 /* If we've been disallowing search, see if we should now allow it */
1666 if (lq_sta->stay_in_tbl) {
1667
1668 /* Elapsed time using current modulation mode */
1669 if (lq_sta->flush_timer)
1670 flush_interval_passed =
1671 time_after(jiffies,
1672 (unsigned long)(lq_sta->flush_timer +
1673 IWL_RATE_SCALE_FLUSH_INTVL));
1674
1675 /*
1676 * Check if we should allow search for new modulation mode.
1677 * If many frames have failed or succeeded, or we've used
1678 * this same modulation for a long time, allow search, and
1679 * reset history stats that keep track of whether we should
1680 * allow a new search. Also (below) reset all bitmaps and
1681 * stats in active history.
1682 */
1683 if (force_search ||
1684 (lq_sta->total_failed > lq_sta->max_failure_limit) ||
1685 (lq_sta->total_success > lq_sta->max_success_limit) ||
1686 ((!lq_sta->search_better_tbl) && (lq_sta->flush_timer)
1687 && (flush_interval_passed))) {
1688 IWL_DEBUG_RATE(priv, "LQ: stay is expired %d %d %d\n:",
1689 lq_sta->total_failed,
1690 lq_sta->total_success,
1691 flush_interval_passed);
1692
1693 /* Allow search for new mode */
1694 lq_sta->stay_in_tbl = 0; /* only place reset */
1695 lq_sta->total_failed = 0;
1696 lq_sta->total_success = 0;
1697 lq_sta->flush_timer = 0;
1698
1699 /*
1700 * Else if we've used this modulation mode enough repetitions
1701 * (regardless of elapsed time or success/failure), reset
1702 * history bitmaps and rate-specific stats for all rates in
1703 * active table.
1704 */
1705 } else {
1706 lq_sta->table_count++;
1707 if (lq_sta->table_count >=
1708 lq_sta->table_count_limit) {
1709 lq_sta->table_count = 0;
1710
1711 IWL_DEBUG_RATE(priv,
1712 "LQ: stay in table clear win\n");
1713 for (i = 0; i < IWL_RATE_COUNT; i++)
1714 iwl4965_rs_rate_scale_clear_window(
1715 &(tbl->win[i]));
1716 }
1717 }
1718
1719 /* If transitioning to allow "search", reset all history
1720 * bitmaps and stats in active table (this will become the new
1721 * "search" table). */
1722 if (!lq_sta->stay_in_tbl) {
1723 for (i = 0; i < IWL_RATE_COUNT; i++)
1724 iwl4965_rs_rate_scale_clear_window(
1725 &(tbl->win[i]));
1726 }
1727 }
1728}
1729
1730/*
1731 * setup rate table in uCode
1732 * return rate_n_flags as used in the table
1733 */
1734static u32 iwl4965_rs_update_rate_tbl(struct iwl_priv *priv,
1735 struct iwl_rxon_context *ctx,
1736 struct iwl_lq_sta *lq_sta,
1737 struct iwl_scale_tbl_info *tbl,
1738 int index, u8 is_green)
1739{
1740 u32 rate;
1741
1742 /* Update uCode's rate table. */
1743 rate = iwl4965_rate_n_flags_from_tbl(priv, tbl, index, is_green);
1744 iwl4965_rs_fill_link_cmd(priv, lq_sta, rate);
1745 iwl_legacy_send_lq_cmd(priv, ctx, &lq_sta->lq, CMD_ASYNC, false);
1746
1747 return rate;
1748}
1749
1750/*
1751 * Do rate scaling and search for new modulation mode.
1752 */
1753static void iwl4965_rs_rate_scale_perform(struct iwl_priv *priv,
1754 struct sk_buff *skb,
1755 struct ieee80211_sta *sta,
1756 struct iwl_lq_sta *lq_sta)
1757{
1758 struct ieee80211_hw *hw = priv->hw;
1759 struct ieee80211_conf *conf = &hw->conf;
1760 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
1761 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
1762 int low = IWL_RATE_INVALID;
1763 int high = IWL_RATE_INVALID;
1764 int index;
1765 int i;
1766 struct iwl_rate_scale_data *window = NULL;
1767 int current_tpt = IWL_INVALID_VALUE;
1768 int low_tpt = IWL_INVALID_VALUE;
1769 int high_tpt = IWL_INVALID_VALUE;
1770 u32 fail_count;
1771 s8 scale_action = 0;
1772 u16 rate_mask;
1773 u8 update_lq = 0;
1774 struct iwl_scale_tbl_info *tbl, *tbl1;
1775 u16 rate_scale_index_msk = 0;
1776 u32 rate;
1777 u8 is_green = 0;
1778 u8 active_tbl = 0;
1779 u8 done_search = 0;
1780 u16 high_low;
1781 s32 sr;
1782 u8 tid = MAX_TID_COUNT;
1783 struct iwl_tid_data *tid_data;
1784 struct iwl_station_priv *sta_priv = (void *)sta->drv_priv;
1785 struct iwl_rxon_context *ctx = sta_priv->common.ctx;
1786
1787 IWL_DEBUG_RATE(priv, "rate scale calculate new rate for skb\n");
1788
1789 /* Send management frames and NO_ACK data using lowest rate. */
1790 /* TODO: this could probably be improved.. */
1791 if (!ieee80211_is_data(hdr->frame_control) ||
1792 info->flags & IEEE80211_TX_CTL_NO_ACK)
1793 return;
1794
1795 if (!sta || !lq_sta)
1796 return;
1797
1798 lq_sta->supp_rates = sta->supp_rates[lq_sta->band];
1799
1800 tid = iwl4965_rs_tl_add_packet(lq_sta, hdr);
1801 if ((tid != MAX_TID_COUNT) && (lq_sta->tx_agg_tid_en & (1 << tid))) {
1802 tid_data = &priv->stations[lq_sta->lq.sta_id].tid[tid];
1803 if (tid_data->agg.state == IWL_AGG_OFF)
1804 lq_sta->is_agg = 0;
1805 else
1806 lq_sta->is_agg = 1;
1807 } else
1808 lq_sta->is_agg = 0;
1809
1810 /*
1811 * Select rate-scale / modulation-mode table to work with in
1812 * the rest of this function: "search" if searching for better
1813 * modulation mode, or "active" if doing rate scaling within a mode.
1814 */
1815 if (!lq_sta->search_better_tbl)
1816 active_tbl = lq_sta->active_tbl;
1817 else
1818 active_tbl = 1 - lq_sta->active_tbl;
1819
1820 tbl = &(lq_sta->lq_info[active_tbl]);
1821 if (is_legacy(tbl->lq_type))
1822 lq_sta->is_green = 0;
1823 else
1824 lq_sta->is_green = iwl4965_rs_use_green(sta);
1825 is_green = lq_sta->is_green;
1826
1827 /* current tx rate */
1828 index = lq_sta->last_txrate_idx;
1829
1830 IWL_DEBUG_RATE(priv, "Rate scale index %d for type %d\n", index,
1831 tbl->lq_type);
1832
1833 /* rates available for this association, and for modulation mode */
1834 rate_mask = iwl4965_rs_get_supported_rates(lq_sta, hdr, tbl->lq_type);
1835
1836 IWL_DEBUG_RATE(priv, "mask 0x%04X\n", rate_mask);
1837
1838 /* mask with station rate restriction */
1839 if (is_legacy(tbl->lq_type)) {
1840 if (lq_sta->band == IEEE80211_BAND_5GHZ)
1841 /* supp_rates has no CCK bits in A mode */
1842 rate_scale_index_msk = (u16) (rate_mask &
1843 (lq_sta->supp_rates << IWL_FIRST_OFDM_RATE));
1844 else
1845 rate_scale_index_msk = (u16) (rate_mask &
1846 lq_sta->supp_rates);
1847
1848 } else
1849 rate_scale_index_msk = rate_mask;
1850
1851 if (!rate_scale_index_msk)
1852 rate_scale_index_msk = rate_mask;
1853
1854 if (!((1 << index) & rate_scale_index_msk)) {
1855 IWL_ERR(priv, "Current Rate is not valid\n");
1856 if (lq_sta->search_better_tbl) {
1857 /* revert to active table if search table is not valid*/
1858 tbl->lq_type = LQ_NONE;
1859 lq_sta->search_better_tbl = 0;
1860 tbl = &(lq_sta->lq_info[lq_sta->active_tbl]);
1861 /* get "active" rate info */
1862 index = iwl4965_hwrate_to_plcp_idx(tbl->current_rate);
1863 rate = iwl4965_rs_update_rate_tbl(priv, ctx, lq_sta,
1864 tbl, index, is_green);
1865 }
1866 return;
1867 }
1868
1869 /* Get expected throughput table and history window for current rate */
1870 if (!tbl->expected_tpt) {
1871 IWL_ERR(priv, "tbl->expected_tpt is NULL\n");
1872 return;
1873 }
1874
1875 /* force user max rate if set by user */
1876 if ((lq_sta->max_rate_idx != -1) &&
1877 (lq_sta->max_rate_idx < index)) {
1878 index = lq_sta->max_rate_idx;
1879 update_lq = 1;
1880 window = &(tbl->win[index]);
1881 goto lq_update;
1882 }
1883
1884 window = &(tbl->win[index]);
1885
1886 /*
1887 * If there is not enough history to calculate actual average
1888 * throughput, keep analyzing results of more tx frames, without
1889 * changing rate or mode (bypass most of the rest of this function).
1890 * Set up new rate table in uCode only if old rate is not supported
1891 * in current association (use new rate found above).
1892 */
1893 fail_count = window->counter - window->success_counter;
1894 if ((fail_count < IWL_RATE_MIN_FAILURE_TH) &&
1895 (window->success_counter < IWL_RATE_MIN_SUCCESS_TH)) {
1896 IWL_DEBUG_RATE(priv, "LQ: still below TH. succ=%d total=%d "
1897 "for index %d\n",
1898 window->success_counter, window->counter, index);
1899
1900 /* Can't calculate this yet; not enough history */
1901 window->average_tpt = IWL_INVALID_VALUE;
1902
1903 /* Should we stay with this modulation mode,
1904 * or search for a new one? */
1905 iwl4965_rs_stay_in_table(lq_sta, false);
1906
1907 goto out;
1908 }
1909 /* Else we have enough samples; calculate estimate of
1910 * actual average throughput */
1911 if (window->average_tpt != ((window->success_ratio *
1912 tbl->expected_tpt[index] + 64) / 128)) {
1913 IWL_ERR(priv,
1914 "expected_tpt should have been calculated by now\n");
1915 window->average_tpt = ((window->success_ratio *
1916 tbl->expected_tpt[index] + 64) / 128);
1917 }
1918
1919 /* If we are searching for better modulation mode, check success. */
1920 if (lq_sta->search_better_tbl) {
1921 /* If good success, continue using the "search" mode;
1922 * no need to send new link quality command, since we're
1923 * continuing to use the setup that we've been trying. */
1924 if (window->average_tpt > lq_sta->last_tpt) {
1925
1926 IWL_DEBUG_RATE(priv, "LQ: SWITCHING TO NEW TABLE "
1927 "suc=%d cur-tpt=%d old-tpt=%d\n",
1928 window->success_ratio,
1929 window->average_tpt,
1930 lq_sta->last_tpt);
1931
1932 if (!is_legacy(tbl->lq_type))
1933 lq_sta->enable_counter = 1;
1934
1935 /* Swap tables; "search" becomes "active" */
1936 lq_sta->active_tbl = active_tbl;
1937 current_tpt = window->average_tpt;
1938
1939 /* Else poor success; go back to mode in "active" table */
1940 } else {
1941
1942 IWL_DEBUG_RATE(priv, "LQ: GOING BACK TO THE OLD TABLE "
1943 "suc=%d cur-tpt=%d old-tpt=%d\n",
1944 window->success_ratio,
1945 window->average_tpt,
1946 lq_sta->last_tpt);
1947
1948 /* Nullify "search" table */
1949 tbl->lq_type = LQ_NONE;
1950
1951 /* Revert to "active" table */
1952 active_tbl = lq_sta->active_tbl;
1953 tbl = &(lq_sta->lq_info[active_tbl]);
1954
1955 /* Revert to "active" rate and throughput info */
1956 index = iwl4965_hwrate_to_plcp_idx(tbl->current_rate);
1957 current_tpt = lq_sta->last_tpt;
1958
1959 /* Need to set up a new rate table in uCode */
1960 update_lq = 1;
1961 }
1962
1963 /* Either way, we've made a decision; modulation mode
1964 * search is done, allow rate adjustment next time. */
1965 lq_sta->search_better_tbl = 0;
1966 done_search = 1; /* Don't switch modes below! */
1967 goto lq_update;
1968 }
1969
1970 /* (Else) not in search of better modulation mode, try for better
1971 * starting rate, while staying in this mode. */
1972 high_low = iwl4965_rs_get_adjacent_rate(priv, index,
1973 rate_scale_index_msk,
1974 tbl->lq_type);
1975 low = high_low & 0xff;
1976 high = (high_low >> 8) & 0xff;
1977
1978 /* If user set max rate, dont allow higher than user constrain */
1979 if ((lq_sta->max_rate_idx != -1) &&
1980 (lq_sta->max_rate_idx < high))
1981 high = IWL_RATE_INVALID;
1982
1983 sr = window->success_ratio;
1984
1985 /* Collect measured throughputs for current and adjacent rates */
1986 current_tpt = window->average_tpt;
1987 if (low != IWL_RATE_INVALID)
1988 low_tpt = tbl->win[low].average_tpt;
1989 if (high != IWL_RATE_INVALID)
1990 high_tpt = tbl->win[high].average_tpt;
1991
1992 scale_action = 0;
1993
1994 /* Too many failures, decrease rate */
1995 if ((sr <= IWL_RATE_DECREASE_TH) || (current_tpt == 0)) {
1996 IWL_DEBUG_RATE(priv,
1997 "decrease rate because of low success_ratio\n");
1998 scale_action = -1;
1999
2000 /* No throughput measured yet for adjacent rates; try increase. */
2001 } else if ((low_tpt == IWL_INVALID_VALUE) &&
2002 (high_tpt == IWL_INVALID_VALUE)) {
2003
2004 if (high != IWL_RATE_INVALID && sr >= IWL_RATE_INCREASE_TH)
2005 scale_action = 1;
2006 else if (low != IWL_RATE_INVALID)
2007 scale_action = 0;
2008 }
2009
2010 /* Both adjacent throughputs are measured, but neither one has better
2011 * throughput; we're using the best rate, don't change it! */
2012 else if ((low_tpt != IWL_INVALID_VALUE) &&
2013 (high_tpt != IWL_INVALID_VALUE) &&
2014 (low_tpt < current_tpt) &&
2015 (high_tpt < current_tpt))
2016 scale_action = 0;
2017
2018 /* At least one adjacent rate's throughput is measured,
2019 * and may have better performance. */
2020 else {
2021 /* Higher adjacent rate's throughput is measured */
2022 if (high_tpt != IWL_INVALID_VALUE) {
2023 /* Higher rate has better throughput */
2024 if (high_tpt > current_tpt &&
2025 sr >= IWL_RATE_INCREASE_TH) {
2026 scale_action = 1;
2027 } else {
2028 scale_action = 0;
2029 }
2030
2031 /* Lower adjacent rate's throughput is measured */
2032 } else if (low_tpt != IWL_INVALID_VALUE) {
2033 /* Lower rate has better throughput */
2034 if (low_tpt > current_tpt) {
2035 IWL_DEBUG_RATE(priv,
2036 "decrease rate because of low tpt\n");
2037 scale_action = -1;
2038 } else if (sr >= IWL_RATE_INCREASE_TH) {
2039 scale_action = 1;
2040 }
2041 }
2042 }
2043
2044 /* Sanity check; asked for decrease, but success rate or throughput
2045 * has been good at old rate. Don't change it. */
2046 if ((scale_action == -1) && (low != IWL_RATE_INVALID) &&
2047 ((sr > IWL_RATE_HIGH_TH) ||
2048 (current_tpt > (100 * tbl->expected_tpt[low]))))
2049 scale_action = 0;
2050
2051 switch (scale_action) {
2052 case -1:
2053 /* Decrease starting rate, update uCode's rate table */
2054 if (low != IWL_RATE_INVALID) {
2055 update_lq = 1;
2056 index = low;
2057 }
2058
2059 break;
2060 case 1:
2061 /* Increase starting rate, update uCode's rate table */
2062 if (high != IWL_RATE_INVALID) {
2063 update_lq = 1;
2064 index = high;
2065 }
2066
2067 break;
2068 case 0:
2069 /* No change */
2070 default:
2071 break;
2072 }
2073
2074 IWL_DEBUG_RATE(priv, "choose rate scale index %d action %d low %d "
2075 "high %d type %d\n",
2076 index, scale_action, low, high, tbl->lq_type);
2077
2078lq_update:
2079 /* Replace uCode's rate table for the destination station. */
2080 if (update_lq)
2081 rate = iwl4965_rs_update_rate_tbl(priv, ctx, lq_sta,
2082 tbl, index, is_green);
2083
2084 /* Should we stay with this modulation mode,
2085 * or search for a new one? */
2086 iwl4965_rs_stay_in_table(lq_sta, false);
2087
2088 /*
2089 * Search for new modulation mode if we're:
2090 * 1) Not changing rates right now
2091 * 2) Not just finishing up a search
2092 * 3) Allowing a new search
2093 */
2094 if (!update_lq && !done_search &&
2095 !lq_sta->stay_in_tbl && window->counter) {
2096 /* Save current throughput to compare with "search" throughput*/
2097 lq_sta->last_tpt = current_tpt;
2098
2099 /* Select a new "search" modulation mode to try.
2100 * If one is found, set up the new "search" table. */
2101 if (is_legacy(tbl->lq_type))
2102 iwl4965_rs_move_legacy_other(priv, lq_sta,
2103 conf, sta, index);
2104 else if (is_siso(tbl->lq_type))
2105 iwl4965_rs_move_siso_to_other(priv, lq_sta,
2106 conf, sta, index);
2107 else /* (is_mimo2(tbl->lq_type)) */
2108 iwl4965_rs_move_mimo2_to_other(priv, lq_sta,
2109 conf, sta, index);
2110
2111 /* If new "search" mode was selected, set up in uCode table */
2112 if (lq_sta->search_better_tbl) {
2113 /* Access the "search" table, clear its history. */
2114 tbl = &(lq_sta->lq_info[(1 - lq_sta->active_tbl)]);
2115 for (i = 0; i < IWL_RATE_COUNT; i++)
2116 iwl4965_rs_rate_scale_clear_window(
2117 &(tbl->win[i]));
2118
2119 /* Use new "search" start rate */
2120 index = iwl4965_hwrate_to_plcp_idx(tbl->current_rate);
2121
2122 IWL_DEBUG_RATE(priv,
2123 "Switch current mcs: %X index: %d\n",
2124 tbl->current_rate, index);
2125 iwl4965_rs_fill_link_cmd(priv, lq_sta,
2126 tbl->current_rate);
2127 iwl_legacy_send_lq_cmd(priv, ctx,
2128 &lq_sta->lq, CMD_ASYNC, false);
2129 } else
2130 done_search = 1;
2131 }
2132
2133 if (done_search && !lq_sta->stay_in_tbl) {
2134 /* If the "active" (non-search) mode was legacy,
2135 * and we've tried switching antennas,
2136 * but we haven't been able to try HT modes (not available),
2137 * stay with best antenna legacy modulation for a while
2138 * before next round of mode comparisons. */
2139 tbl1 = &(lq_sta->lq_info[lq_sta->active_tbl]);
2140 if (is_legacy(tbl1->lq_type) && !conf_is_ht(conf) &&
2141 lq_sta->action_counter > tbl1->max_search) {
2142 IWL_DEBUG_RATE(priv, "LQ: STAY in legacy table\n");
2143 iwl4965_rs_set_stay_in_table(priv, 1, lq_sta);
2144 }
2145
2146 /* If we're in an HT mode, and all 3 mode switch actions
2147 * have been tried and compared, stay in this best modulation
2148 * mode for a while before next round of mode comparisons. */
2149 if (lq_sta->enable_counter &&
2150 (lq_sta->action_counter >= tbl1->max_search)) {
2151 if ((lq_sta->last_tpt > IWL_AGG_TPT_THREHOLD) &&
2152 (lq_sta->tx_agg_tid_en & (1 << tid)) &&
2153 (tid != MAX_TID_COUNT)) {
2154 tid_data =
2155 &priv->stations[lq_sta->lq.sta_id].tid[tid];
2156 if (tid_data->agg.state == IWL_AGG_OFF) {
2157 IWL_DEBUG_RATE(priv,
2158 "try to aggregate tid %d\n",
2159 tid);
2160 iwl4965_rs_tl_turn_on_agg(priv, tid,
2161 lq_sta, sta);
2162 }
2163 }
2164 iwl4965_rs_set_stay_in_table(priv, 0, lq_sta);
2165 }
2166 }
2167
2168out:
2169 tbl->current_rate = iwl4965_rate_n_flags_from_tbl(priv, tbl,
2170 index, is_green);
2171 i = index;
2172 lq_sta->last_txrate_idx = i;
2173}
2174
2175/**
2176 * iwl4965_rs_initialize_lq - Initialize a station's hardware rate table
2177 *
2178 * The uCode's station table contains a table of fallback rates
2179 * for automatic fallback during transmission.
2180 *
2181 * NOTE: This sets up a default set of values. These will be replaced later
2182 * if the driver's iwl-4965-rs rate scaling algorithm is used, instead of
2183 * rc80211_simple.
2184 *
2185 * NOTE: Run REPLY_ADD_STA command to set up station table entry, before
2186 * calling this function (which runs REPLY_TX_LINK_QUALITY_CMD,
2187 * which requires station table entry to exist).
2188 */
2189static void iwl4965_rs_initialize_lq(struct iwl_priv *priv,
2190 struct ieee80211_conf *conf,
2191 struct ieee80211_sta *sta,
2192 struct iwl_lq_sta *lq_sta)
2193{
2194 struct iwl_scale_tbl_info *tbl;
2195 int rate_idx;
2196 int i;
2197 u32 rate;
2198 u8 use_green = iwl4965_rs_use_green(sta);
2199 u8 active_tbl = 0;
2200 u8 valid_tx_ant;
2201 struct iwl_station_priv *sta_priv;
2202 struct iwl_rxon_context *ctx;
2203
2204 if (!sta || !lq_sta)
2205 return;
2206
2207 sta_priv = (void *)sta->drv_priv;
2208 ctx = sta_priv->common.ctx;
2209
2210 i = lq_sta->last_txrate_idx;
2211
2212 valid_tx_ant = priv->hw_params.valid_tx_ant;
2213
2214 if (!lq_sta->search_better_tbl)
2215 active_tbl = lq_sta->active_tbl;
2216 else
2217 active_tbl = 1 - lq_sta->active_tbl;
2218
2219 tbl = &(lq_sta->lq_info[active_tbl]);
2220
2221 if ((i < 0) || (i >= IWL_RATE_COUNT))
2222 i = 0;
2223
2224 rate = iwl_rates[i].plcp;
2225 tbl->ant_type = iwl4965_first_antenna(valid_tx_ant);
2226 rate |= tbl->ant_type << RATE_MCS_ANT_POS;
2227
2228 if (i >= IWL_FIRST_CCK_RATE && i <= IWL_LAST_CCK_RATE)
2229 rate |= RATE_MCS_CCK_MSK;
2230
2231 iwl4965_rs_get_tbl_info_from_mcs(rate, priv->band, tbl, &rate_idx);
2232 if (!iwl4965_rs_is_valid_ant(valid_tx_ant, tbl->ant_type))
2233 iwl4965_rs_toggle_antenna(valid_tx_ant, &rate, tbl);
2234
2235 rate = iwl4965_rate_n_flags_from_tbl(priv, tbl, rate_idx, use_green);
2236 tbl->current_rate = rate;
2237 iwl4965_rs_set_expected_tpt_table(lq_sta, tbl);
2238 iwl4965_rs_fill_link_cmd(NULL, lq_sta, rate);
2239 priv->stations[lq_sta->lq.sta_id].lq = &lq_sta->lq;
2240 iwl_legacy_send_lq_cmd(priv, ctx, &lq_sta->lq, CMD_SYNC, true);
2241}
2242
2243static void
2244iwl4965_rs_get_rate(void *priv_r, struct ieee80211_sta *sta, void *priv_sta,
2245 struct ieee80211_tx_rate_control *txrc)
2246{
2247
2248 struct sk_buff *skb = txrc->skb;
2249 struct ieee80211_supported_band *sband = txrc->sband;
2250 struct iwl_priv *priv __maybe_unused = (struct iwl_priv *)priv_r;
2251 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
2252 struct iwl_lq_sta *lq_sta = priv_sta;
2253 int rate_idx;
2254
2255 IWL_DEBUG_RATE_LIMIT(priv, "rate scale calculate new rate for skb\n");
2256
2257 /* Get max rate if user set max rate */
2258 if (lq_sta) {
2259 lq_sta->max_rate_idx = txrc->max_rate_idx;
2260 if ((sband->band == IEEE80211_BAND_5GHZ) &&
2261 (lq_sta->max_rate_idx != -1))
2262 lq_sta->max_rate_idx += IWL_FIRST_OFDM_RATE;
2263 if ((lq_sta->max_rate_idx < 0) ||
2264 (lq_sta->max_rate_idx >= IWL_RATE_COUNT))
2265 lq_sta->max_rate_idx = -1;
2266 }
2267
2268 /* Treat uninitialized rate scaling data same as non-existing. */
2269 if (lq_sta && !lq_sta->drv) {
2270 IWL_DEBUG_RATE(priv, "Rate scaling not initialized yet.\n");
2271 priv_sta = NULL;
2272 }
2273
2274 /* Send management frames and NO_ACK data using lowest rate. */
2275 if (rate_control_send_low(sta, priv_sta, txrc))
2276 return;
2277
2278 rate_idx = lq_sta->last_txrate_idx;
2279
2280 if (lq_sta->last_rate_n_flags & RATE_MCS_HT_MSK) {
2281 rate_idx -= IWL_FIRST_OFDM_RATE;
2282 /* 6M and 9M shared same MCS index */
2283 rate_idx = (rate_idx > 0) ? (rate_idx - 1) : 0;
2284 if (iwl4965_rs_extract_rate(lq_sta->last_rate_n_flags) >=
2285 IWL_RATE_MIMO2_6M_PLCP)
2286 rate_idx = rate_idx + MCS_INDEX_PER_STREAM;
2287 info->control.rates[0].flags = IEEE80211_TX_RC_MCS;
2288 if (lq_sta->last_rate_n_flags & RATE_MCS_SGI_MSK)
2289 info->control.rates[0].flags |=
2290 IEEE80211_TX_RC_SHORT_GI;
2291 if (lq_sta->last_rate_n_flags & RATE_MCS_DUP_MSK)
2292 info->control.rates[0].flags |=
2293 IEEE80211_TX_RC_DUP_DATA;
2294 if (lq_sta->last_rate_n_flags & RATE_MCS_HT40_MSK)
2295 info->control.rates[0].flags |=
2296 IEEE80211_TX_RC_40_MHZ_WIDTH;
2297 if (lq_sta->last_rate_n_flags & RATE_MCS_GF_MSK)
2298 info->control.rates[0].flags |=
2299 IEEE80211_TX_RC_GREEN_FIELD;
2300 } else {
2301 /* Check for invalid rates */
2302 if ((rate_idx < 0) || (rate_idx >= IWL_RATE_COUNT_LEGACY) ||
2303 ((sband->band == IEEE80211_BAND_5GHZ) &&
2304 (rate_idx < IWL_FIRST_OFDM_RATE)))
2305 rate_idx = rate_lowest_index(sband, sta);
2306 /* On valid 5 GHz rate, adjust index */
2307 else if (sband->band == IEEE80211_BAND_5GHZ)
2308 rate_idx -= IWL_FIRST_OFDM_RATE;
2309 info->control.rates[0].flags = 0;
2310 }
2311 info->control.rates[0].idx = rate_idx;
2312
2313}
2314
2315static void *iwl4965_rs_alloc_sta(void *priv_rate, struct ieee80211_sta *sta,
2316 gfp_t gfp)
2317{
2318 struct iwl_lq_sta *lq_sta;
2319 struct iwl_station_priv *sta_priv =
2320 (struct iwl_station_priv *) sta->drv_priv;
2321 struct iwl_priv *priv;
2322
2323 priv = (struct iwl_priv *)priv_rate;
2324 IWL_DEBUG_RATE(priv, "create station rate scale window\n");
2325
2326 lq_sta = &sta_priv->lq_sta;
2327
2328 return lq_sta;
2329}
2330
2331/*
2332 * Called after adding a new station to initialize rate scaling
2333 */
2334void
2335iwl4965_rs_rate_init(struct iwl_priv *priv,
2336 struct ieee80211_sta *sta,
2337 u8 sta_id)
2338{
2339 int i, j;
2340 struct ieee80211_hw *hw = priv->hw;
2341 struct ieee80211_conf *conf = &priv->hw->conf;
2342 struct ieee80211_sta_ht_cap *ht_cap = &sta->ht_cap;
2343 struct iwl_station_priv *sta_priv;
2344 struct iwl_lq_sta *lq_sta;
2345 struct ieee80211_supported_band *sband;
2346
2347 sta_priv = (struct iwl_station_priv *) sta->drv_priv;
2348 lq_sta = &sta_priv->lq_sta;
2349 sband = hw->wiphy->bands[conf->channel->band];
2350
2351
2352 lq_sta->lq.sta_id = sta_id;
2353
2354 for (j = 0; j < LQ_SIZE; j++)
2355 for (i = 0; i < IWL_RATE_COUNT; i++)
2356 iwl4965_rs_rate_scale_clear_window(
2357 &lq_sta->lq_info[j].win[i]);
2358
2359 lq_sta->flush_timer = 0;
2360 lq_sta->supp_rates = sta->supp_rates[sband->band];
2361 for (j = 0; j < LQ_SIZE; j++)
2362 for (i = 0; i < IWL_RATE_COUNT; i++)
2363 iwl4965_rs_rate_scale_clear_window(
2364 &lq_sta->lq_info[j].win[i]);
2365
2366 IWL_DEBUG_RATE(priv, "LQ:"
2367 "*** rate scale station global init for station %d ***\n",
2368 sta_id);
2369 /* TODO: what is a good starting rate for STA? About middle? Maybe not
2370 * the lowest or the highest rate.. Could consider using RSSI from
2371 * previous packets? Need to have IEEE 802.1X auth succeed immediately
2372 * after assoc.. */
2373
2374 lq_sta->is_dup = 0;
2375 lq_sta->max_rate_idx = -1;
2376 lq_sta->missed_rate_counter = IWL_MISSED_RATE_MAX;
2377 lq_sta->is_green = iwl4965_rs_use_green(sta);
2378 lq_sta->active_legacy_rate = priv->active_rate & ~(0x1000);
2379 lq_sta->band = priv->band;
2380 /*
2381 * active_siso_rate mask includes 9 MBits (bit 5), and CCK (bits 0-3),
2382 * supp_rates[] does not; shift to convert format, force 9 MBits off.
2383 */
2384 lq_sta->active_siso_rate = ht_cap->mcs.rx_mask[0] << 1;
2385 lq_sta->active_siso_rate |= ht_cap->mcs.rx_mask[0] & 0x1;
2386 lq_sta->active_siso_rate &= ~((u16)0x2);
2387 lq_sta->active_siso_rate <<= IWL_FIRST_OFDM_RATE;
2388
2389 /* Same here */
2390 lq_sta->active_mimo2_rate = ht_cap->mcs.rx_mask[1] << 1;
2391 lq_sta->active_mimo2_rate |= ht_cap->mcs.rx_mask[1] & 0x1;
2392 lq_sta->active_mimo2_rate &= ~((u16)0x2);
2393 lq_sta->active_mimo2_rate <<= IWL_FIRST_OFDM_RATE;
2394
2395 /* These values will be overridden later */
2396 lq_sta->lq.general_params.single_stream_ant_msk =
2397 iwl4965_first_antenna(priv->hw_params.valid_tx_ant);
2398 lq_sta->lq.general_params.dual_stream_ant_msk =
2399 priv->hw_params.valid_tx_ant &
2400 ~iwl4965_first_antenna(priv->hw_params.valid_tx_ant);
2401 if (!lq_sta->lq.general_params.dual_stream_ant_msk) {
2402 lq_sta->lq.general_params.dual_stream_ant_msk = ANT_AB;
2403 } else if (iwl4965_num_of_ant(priv->hw_params.valid_tx_ant) == 2) {
2404 lq_sta->lq.general_params.dual_stream_ant_msk =
2405 priv->hw_params.valid_tx_ant;
2406 }
2407
2408 /* as default allow aggregation for all tids */
2409 lq_sta->tx_agg_tid_en = IWL_AGG_ALL_TID;
2410 lq_sta->drv = priv;
2411
2412 /* Set last_txrate_idx to lowest rate */
2413 lq_sta->last_txrate_idx = rate_lowest_index(sband, sta);
2414 if (sband->band == IEEE80211_BAND_5GHZ)
2415 lq_sta->last_txrate_idx += IWL_FIRST_OFDM_RATE;
2416 lq_sta->is_agg = 0;
2417
2418#ifdef CONFIG_MAC80211_DEBUGFS
2419 lq_sta->dbg_fixed_rate = 0;
2420#endif
2421
2422 iwl4965_rs_initialize_lq(priv, conf, sta, lq_sta);
2423}
2424
2425static void iwl4965_rs_fill_link_cmd(struct iwl_priv *priv,
2426 struct iwl_lq_sta *lq_sta, u32 new_rate)
2427{
2428 struct iwl_scale_tbl_info tbl_type;
2429 int index = 0;
2430 int rate_idx;
2431 int repeat_rate = 0;
2432 u8 ant_toggle_cnt = 0;
2433 u8 use_ht_possible = 1;
2434 u8 valid_tx_ant = 0;
2435 struct iwl_link_quality_cmd *lq_cmd = &lq_sta->lq;
2436
2437 /* Override starting rate (index 0) if needed for debug purposes */
2438 iwl4965_rs_dbgfs_set_mcs(lq_sta, &new_rate, index);
2439
2440 /* Interpret new_rate (rate_n_flags) */
2441 iwl4965_rs_get_tbl_info_from_mcs(new_rate, lq_sta->band,
2442 &tbl_type, &rate_idx);
2443
2444 /* How many times should we repeat the initial rate? */
2445 if (is_legacy(tbl_type.lq_type)) {
2446 ant_toggle_cnt = 1;
2447 repeat_rate = IWL_NUMBER_TRY;
2448 } else {
2449 repeat_rate = IWL_HT_NUMBER_TRY;
2450 }
2451
2452 lq_cmd->general_params.mimo_delimiter =
2453 is_mimo(tbl_type.lq_type) ? 1 : 0;
2454
2455 /* Fill 1st table entry (index 0) */
2456 lq_cmd->rs_table[index].rate_n_flags = cpu_to_le32(new_rate);
2457
2458 if (iwl4965_num_of_ant(tbl_type.ant_type) == 1) {
2459 lq_cmd->general_params.single_stream_ant_msk =
2460 tbl_type.ant_type;
2461 } else if (iwl4965_num_of_ant(tbl_type.ant_type) == 2) {
2462 lq_cmd->general_params.dual_stream_ant_msk =
2463 tbl_type.ant_type;
2464 } /* otherwise we don't modify the existing value */
2465
2466 index++;
2467 repeat_rate--;
2468 if (priv)
2469 valid_tx_ant = priv->hw_params.valid_tx_ant;
2470
2471 /* Fill rest of rate table */
2472 while (index < LINK_QUAL_MAX_RETRY_NUM) {
2473 /* Repeat initial/next rate.
2474 * For legacy IWL_NUMBER_TRY == 1, this loop will not execute.
2475 * For HT IWL_HT_NUMBER_TRY == 3, this executes twice. */
2476 while (repeat_rate > 0 && (index < LINK_QUAL_MAX_RETRY_NUM)) {
2477 if (is_legacy(tbl_type.lq_type)) {
2478 if (ant_toggle_cnt < NUM_TRY_BEFORE_ANT_TOGGLE)
2479 ant_toggle_cnt++;
2480 else if (priv &&
2481 iwl4965_rs_toggle_antenna(valid_tx_ant,
2482 &new_rate, &tbl_type))
2483 ant_toggle_cnt = 1;
2484 }
2485
2486 /* Override next rate if needed for debug purposes */
2487 iwl4965_rs_dbgfs_set_mcs(lq_sta, &new_rate, index);
2488
2489 /* Fill next table entry */
2490 lq_cmd->rs_table[index].rate_n_flags =
2491 cpu_to_le32(new_rate);
2492 repeat_rate--;
2493 index++;
2494 }
2495
2496 iwl4965_rs_get_tbl_info_from_mcs(new_rate,
2497 lq_sta->band, &tbl_type,
2498 &rate_idx);
2499
2500 /* Indicate to uCode which entries might be MIMO.
2501 * If initial rate was MIMO, this will finally end up
2502 * as (IWL_HT_NUMBER_TRY * 2), after 2nd pass, otherwise 0. */
2503 if (is_mimo(tbl_type.lq_type))
2504 lq_cmd->general_params.mimo_delimiter = index;
2505
2506 /* Get next rate */
2507 new_rate = iwl4965_rs_get_lower_rate(lq_sta,
2508 &tbl_type, rate_idx,
2509 use_ht_possible);
2510
2511 /* How many times should we repeat the next rate? */
2512 if (is_legacy(tbl_type.lq_type)) {
2513 if (ant_toggle_cnt < NUM_TRY_BEFORE_ANT_TOGGLE)
2514 ant_toggle_cnt++;
2515 else if (priv &&
2516 iwl4965_rs_toggle_antenna(valid_tx_ant,
2517 &new_rate, &tbl_type))
2518 ant_toggle_cnt = 1;
2519
2520 repeat_rate = IWL_NUMBER_TRY;
2521 } else {
2522 repeat_rate = IWL_HT_NUMBER_TRY;
2523 }
2524
2525 /* Don't allow HT rates after next pass.
2526 * iwl4965_rs_get_lower_rate() will change type to LQ_A or LQ_G. */
2527 use_ht_possible = 0;
2528
2529 /* Override next rate if needed for debug purposes */
2530 iwl4965_rs_dbgfs_set_mcs(lq_sta, &new_rate, index);
2531
2532 /* Fill next table entry */
2533 lq_cmd->rs_table[index].rate_n_flags = cpu_to_le32(new_rate);
2534
2535 index++;
2536 repeat_rate--;
2537 }
2538
2539 lq_cmd->agg_params.agg_frame_cnt_limit = LINK_QUAL_AGG_FRAME_LIMIT_DEF;
2540 lq_cmd->agg_params.agg_dis_start_th = LINK_QUAL_AGG_DISABLE_START_DEF;
2541
2542 lq_cmd->agg_params.agg_time_limit =
2543 cpu_to_le16(LINK_QUAL_AGG_TIME_LIMIT_DEF);
2544}
2545
2546static void
2547*iwl4965_rs_alloc(struct ieee80211_hw *hw, struct dentry *debugfsdir)
2548{
2549 return hw->priv;
2550}
2551/* rate scale requires free function to be implemented */
2552static void iwl4965_rs_free(void *priv_rate)
2553{
2554 return;
2555}
2556
2557static void iwl4965_rs_free_sta(void *priv_r, struct ieee80211_sta *sta,
2558 void *priv_sta)
2559{
2560 struct iwl_priv *priv __maybe_unused = priv_r;
2561
2562 IWL_DEBUG_RATE(priv, "enter\n");
2563 IWL_DEBUG_RATE(priv, "leave\n");
2564}
2565
2566
2567#ifdef CONFIG_MAC80211_DEBUGFS
2568static int iwl4965_open_file_generic(struct inode *inode, struct file *file)
2569{
2570 file->private_data = inode->i_private;
2571 return 0;
2572}
2573static void iwl4965_rs_dbgfs_set_mcs(struct iwl_lq_sta *lq_sta,
2574 u32 *rate_n_flags, int index)
2575{
2576 struct iwl_priv *priv;
2577 u8 valid_tx_ant;
2578 u8 ant_sel_tx;
2579
2580 priv = lq_sta->drv;
2581 valid_tx_ant = priv->hw_params.valid_tx_ant;
2582 if (lq_sta->dbg_fixed_rate) {
2583 ant_sel_tx =
2584 ((lq_sta->dbg_fixed_rate & RATE_MCS_ANT_ABC_MSK)
2585 >> RATE_MCS_ANT_POS);
2586 if ((valid_tx_ant & ant_sel_tx) == ant_sel_tx) {
2587 *rate_n_flags = lq_sta->dbg_fixed_rate;
2588 IWL_DEBUG_RATE(priv, "Fixed rate ON\n");
2589 } else {
2590 lq_sta->dbg_fixed_rate = 0;
2591 IWL_ERR(priv,
2592 "Invalid antenna selection 0x%X, Valid is 0x%X\n",
2593 ant_sel_tx, valid_tx_ant);
2594 IWL_DEBUG_RATE(priv, "Fixed rate OFF\n");
2595 }
2596 } else {
2597 IWL_DEBUG_RATE(priv, "Fixed rate OFF\n");
2598 }
2599}
2600
2601static ssize_t iwl4965_rs_sta_dbgfs_scale_table_write(struct file *file,
2602 const char __user *user_buf, size_t count, loff_t *ppos)
2603{
2604 struct iwl_lq_sta *lq_sta = file->private_data;
2605 struct iwl_priv *priv;
2606 char buf[64];
2607 int buf_size;
2608 u32 parsed_rate;
2609 struct iwl_station_priv *sta_priv =
2610 container_of(lq_sta, struct iwl_station_priv, lq_sta);
2611 struct iwl_rxon_context *ctx = sta_priv->common.ctx;
2612
2613 priv = lq_sta->drv;
2614 memset(buf, 0, sizeof(buf));
2615 buf_size = min(count, sizeof(buf) - 1);
2616 if (copy_from_user(buf, user_buf, buf_size))
2617 return -EFAULT;
2618
2619 if (sscanf(buf, "%x", &parsed_rate) == 1)
2620 lq_sta->dbg_fixed_rate = parsed_rate;
2621 else
2622 lq_sta->dbg_fixed_rate = 0;
2623
2624 lq_sta->active_legacy_rate = 0x0FFF; /* 1 - 54 MBits, includes CCK */
2625 lq_sta->active_siso_rate = 0x1FD0; /* 6 - 60 MBits, no 9, no CCK */
2626 lq_sta->active_mimo2_rate = 0x1FD0; /* 6 - 60 MBits, no 9, no CCK */
2627
2628 IWL_DEBUG_RATE(priv, "sta_id %d rate 0x%X\n",
2629 lq_sta->lq.sta_id, lq_sta->dbg_fixed_rate);
2630
2631 if (lq_sta->dbg_fixed_rate) {
2632 iwl4965_rs_fill_link_cmd(NULL, lq_sta, lq_sta->dbg_fixed_rate);
2633 iwl_legacy_send_lq_cmd(lq_sta->drv, ctx, &lq_sta->lq, CMD_ASYNC,
2634 false);
2635 }
2636
2637 return count;
2638}
2639
2640static ssize_t iwl4965_rs_sta_dbgfs_scale_table_read(struct file *file,
2641 char __user *user_buf, size_t count, loff_t *ppos)
2642{
2643 char *buff;
2644 int desc = 0;
2645 int i = 0;
2646 int index = 0;
2647 ssize_t ret;
2648
2649 struct iwl_lq_sta *lq_sta = file->private_data;
2650 struct iwl_priv *priv;
2651 struct iwl_scale_tbl_info *tbl = &(lq_sta->lq_info[lq_sta->active_tbl]);
2652
2653 priv = lq_sta->drv;
2654 buff = kmalloc(1024, GFP_KERNEL);
2655 if (!buff)
2656 return -ENOMEM;
2657
2658 desc += sprintf(buff+desc, "sta_id %d\n", lq_sta->lq.sta_id);
2659 desc += sprintf(buff+desc, "failed=%d success=%d rate=0%X\n",
2660 lq_sta->total_failed, lq_sta->total_success,
2661 lq_sta->active_legacy_rate);
2662 desc += sprintf(buff+desc, "fixed rate 0x%X\n",
2663 lq_sta->dbg_fixed_rate);
2664 desc += sprintf(buff+desc, "valid_tx_ant %s%s%s\n",
2665 (priv->hw_params.valid_tx_ant & ANT_A) ? "ANT_A," : "",
2666 (priv->hw_params.valid_tx_ant & ANT_B) ? "ANT_B," : "",
2667 (priv->hw_params.valid_tx_ant & ANT_C) ? "ANT_C" : "");
2668 desc += sprintf(buff+desc, "lq type %s\n",
2669 (is_legacy(tbl->lq_type)) ? "legacy" : "HT");
2670 if (is_Ht(tbl->lq_type)) {
2671 desc += sprintf(buff+desc, " %s",
2672 (is_siso(tbl->lq_type)) ? "SISO" : "MIMO2");
2673 desc += sprintf(buff+desc, " %s",
2674 (tbl->is_ht40) ? "40MHz" : "20MHz");
2675 desc += sprintf(buff+desc, " %s %s %s\n",
2676 (tbl->is_SGI) ? "SGI" : "",
2677 (lq_sta->is_green) ? "GF enabled" : "",
2678 (lq_sta->is_agg) ? "AGG on" : "");
2679 }
2680 desc += sprintf(buff+desc, "last tx rate=0x%X\n",
2681 lq_sta->last_rate_n_flags);
2682 desc += sprintf(buff+desc, "general:"
2683 "flags=0x%X mimo-d=%d s-ant0x%x d-ant=0x%x\n",
2684 lq_sta->lq.general_params.flags,
2685 lq_sta->lq.general_params.mimo_delimiter,
2686 lq_sta->lq.general_params.single_stream_ant_msk,
2687 lq_sta->lq.general_params.dual_stream_ant_msk);
2688
2689 desc += sprintf(buff+desc, "agg:"
2690 "time_limit=%d dist_start_th=%d frame_cnt_limit=%d\n",
2691 le16_to_cpu(lq_sta->lq.agg_params.agg_time_limit),
2692 lq_sta->lq.agg_params.agg_dis_start_th,
2693 lq_sta->lq.agg_params.agg_frame_cnt_limit);
2694
2695 desc += sprintf(buff+desc,
2696 "Start idx [0]=0x%x [1]=0x%x [2]=0x%x [3]=0x%x\n",
2697 lq_sta->lq.general_params.start_rate_index[0],
2698 lq_sta->lq.general_params.start_rate_index[1],
2699 lq_sta->lq.general_params.start_rate_index[2],
2700 lq_sta->lq.general_params.start_rate_index[3]);
2701
2702 for (i = 0; i < LINK_QUAL_MAX_RETRY_NUM; i++) {
2703 index = iwl4965_hwrate_to_plcp_idx(
2704 le32_to_cpu(lq_sta->lq.rs_table[i].rate_n_flags));
2705 if (is_legacy(tbl->lq_type)) {
2706 desc += sprintf(buff+desc, " rate[%d] 0x%X %smbps\n",
2707 i,
2708 le32_to_cpu(lq_sta->lq.rs_table[i].rate_n_flags),
2709 iwl_rate_mcs[index].mbps);
2710 } else {
2711 desc += sprintf(buff+desc,
2712 " rate[%d] 0x%X %smbps (%s)\n",
2713 i,
2714 le32_to_cpu(lq_sta->lq.rs_table[i].rate_n_flags),
2715 iwl_rate_mcs[index].mbps, iwl_rate_mcs[index].mcs);
2716 }
2717 }
2718
2719 ret = simple_read_from_buffer(user_buf, count, ppos, buff, desc);
2720 kfree(buff);
2721 return ret;
2722}
2723
2724static const struct file_operations rs_sta_dbgfs_scale_table_ops = {
2725 .write = iwl4965_rs_sta_dbgfs_scale_table_write,
2726 .read = iwl4965_rs_sta_dbgfs_scale_table_read,
2727 .open = iwl4965_open_file_generic,
2728 .llseek = default_llseek,
2729};
2730static ssize_t iwl4965_rs_sta_dbgfs_stats_table_read(struct file *file,
2731 char __user *user_buf, size_t count, loff_t *ppos)
2732{
2733 char *buff;
2734 int desc = 0;
2735 int i, j;
2736 ssize_t ret;
2737
2738 struct iwl_lq_sta *lq_sta = file->private_data;
2739
2740 buff = kmalloc(1024, GFP_KERNEL);
2741 if (!buff)
2742 return -ENOMEM;
2743
2744 for (i = 0; i < LQ_SIZE; i++) {
2745 desc += sprintf(buff+desc,
2746 "%s type=%d SGI=%d HT40=%d DUP=%d GF=%d\n"
2747 "rate=0x%X\n",
2748 lq_sta->active_tbl == i ? "*" : "x",
2749 lq_sta->lq_info[i].lq_type,
2750 lq_sta->lq_info[i].is_SGI,
2751 lq_sta->lq_info[i].is_ht40,
2752 lq_sta->lq_info[i].is_dup,
2753 lq_sta->is_green,
2754 lq_sta->lq_info[i].current_rate);
2755 for (j = 0; j < IWL_RATE_COUNT; j++) {
2756 desc += sprintf(buff+desc,
2757 "counter=%d success=%d %%=%d\n",
2758 lq_sta->lq_info[i].win[j].counter,
2759 lq_sta->lq_info[i].win[j].success_counter,
2760 lq_sta->lq_info[i].win[j].success_ratio);
2761 }
2762 }
2763 ret = simple_read_from_buffer(user_buf, count, ppos, buff, desc);
2764 kfree(buff);
2765 return ret;
2766}
2767
2768static const struct file_operations rs_sta_dbgfs_stats_table_ops = {
2769 .read = iwl4965_rs_sta_dbgfs_stats_table_read,
2770 .open = iwl4965_open_file_generic,
2771 .llseek = default_llseek,
2772};
2773
2774static ssize_t iwl4965_rs_sta_dbgfs_rate_scale_data_read(struct file *file,
2775 char __user *user_buf, size_t count, loff_t *ppos)
2776{
2777 char buff[120];
2778 int desc = 0;
2779 ssize_t ret;
2780
2781 struct iwl_lq_sta *lq_sta = file->private_data;
2782 struct iwl_priv *priv;
2783 struct iwl_scale_tbl_info *tbl = &lq_sta->lq_info[lq_sta->active_tbl];
2784
2785 priv = lq_sta->drv;
2786
2787 if (is_Ht(tbl->lq_type))
2788 desc += sprintf(buff+desc,
2789 "Bit Rate= %d Mb/s\n",
2790 tbl->expected_tpt[lq_sta->last_txrate_idx]);
2791 else
2792 desc += sprintf(buff+desc,
2793 "Bit Rate= %d Mb/s\n",
2794 iwl_rates[lq_sta->last_txrate_idx].ieee >> 1);
2795
2796 ret = simple_read_from_buffer(user_buf, count, ppos, buff, desc);
2797 return ret;
2798}
2799
2800static const struct file_operations rs_sta_dbgfs_rate_scale_data_ops = {
2801 .read = iwl4965_rs_sta_dbgfs_rate_scale_data_read,
2802 .open = iwl4965_open_file_generic,
2803 .llseek = default_llseek,
2804};
2805
2806static void iwl4965_rs_add_debugfs(void *priv, void *priv_sta,
2807 struct dentry *dir)
2808{
2809 struct iwl_lq_sta *lq_sta = priv_sta;
2810 lq_sta->rs_sta_dbgfs_scale_table_file =
2811 debugfs_create_file("rate_scale_table", S_IRUSR | S_IWUSR, dir,
2812 lq_sta, &rs_sta_dbgfs_scale_table_ops);
2813 lq_sta->rs_sta_dbgfs_stats_table_file =
2814 debugfs_create_file("rate_stats_table", S_IRUSR, dir,
2815 lq_sta, &rs_sta_dbgfs_stats_table_ops);
2816 lq_sta->rs_sta_dbgfs_rate_scale_data_file =
2817 debugfs_create_file("rate_scale_data", S_IRUSR, dir,
2818 lq_sta, &rs_sta_dbgfs_rate_scale_data_ops);
2819 lq_sta->rs_sta_dbgfs_tx_agg_tid_en_file =
2820 debugfs_create_u8("tx_agg_tid_enable", S_IRUSR | S_IWUSR, dir,
2821 &lq_sta->tx_agg_tid_en);
2822
2823}
2824
2825static void iwl4965_rs_remove_debugfs(void *priv, void *priv_sta)
2826{
2827 struct iwl_lq_sta *lq_sta = priv_sta;
2828 debugfs_remove(lq_sta->rs_sta_dbgfs_scale_table_file);
2829 debugfs_remove(lq_sta->rs_sta_dbgfs_stats_table_file);
2830 debugfs_remove(lq_sta->rs_sta_dbgfs_rate_scale_data_file);
2831 debugfs_remove(lq_sta->rs_sta_dbgfs_tx_agg_tid_en_file);
2832}
2833#endif
2834
2835/*
2836 * Initialization of rate scaling information is done by driver after
2837 * the station is added. Since mac80211 calls this function before a
2838 * station is added we ignore it.
2839 */
2840static void
2841iwl4965_rs_rate_init_stub(void *priv_r, struct ieee80211_supported_band *sband,
2842 struct ieee80211_sta *sta, void *priv_sta)
2843{
2844}
2845static struct rate_control_ops rs_4965_ops = {
2846 .module = NULL,
2847 .name = IWL4965_RS_NAME,
2848 .tx_status = iwl4965_rs_tx_status,
2849 .get_rate = iwl4965_rs_get_rate,
2850 .rate_init = iwl4965_rs_rate_init_stub,
2851 .alloc = iwl4965_rs_alloc,
2852 .free = iwl4965_rs_free,
2853 .alloc_sta = iwl4965_rs_alloc_sta,
2854 .free_sta = iwl4965_rs_free_sta,
2855#ifdef CONFIG_MAC80211_DEBUGFS
2856 .add_sta_debugfs = iwl4965_rs_add_debugfs,
2857 .remove_sta_debugfs = iwl4965_rs_remove_debugfs,
2858#endif
2859};
2860
2861int iwl4965_rate_control_register(void)
2862{
2863 pr_err("Registering 4965 rate control operations\n");
2864 return ieee80211_rate_control_register(&rs_4965_ops);
2865}
2866
2867void iwl4965_rate_control_unregister(void)
2868{
2869 ieee80211_rate_control_unregister(&rs_4965_ops);
2870}
diff --git a/drivers/net/wireless/iwlegacy/iwl-4965-rx.c b/drivers/net/wireless/iwlegacy/iwl-4965-rx.c
new file mode 100644
index 000000000000..b9fa2f6411a7
--- /dev/null
+++ b/drivers/net/wireless/iwlegacy/iwl-4965-rx.c
@@ -0,0 +1,291 @@
1/******************************************************************************
2 *
3 * GPL LICENSE SUMMARY
4 *
5 * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of version 2 of the GNU General Public License as
9 * published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful, but
12 * WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
19 * USA
20 *
21 * The full GNU General Public License is included in this distribution
22 * in the file called LICENSE.GPL.
23 *
24 * Contact Information:
25 * Intel Linux Wireless <ilw@linux.intel.com>
26 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
27 *
28 *****************************************************************************/
29
30#include <linux/kernel.h>
31#include <linux/module.h>
32#include <linux/init.h>
33#include <linux/sched.h>
34
35#include "iwl-dev.h"
36#include "iwl-core.h"
37#include "iwl-4965-calib.h"
38#include "iwl-sta.h"
39#include "iwl-io.h"
40#include "iwl-helpers.h"
41#include "iwl-4965-hw.h"
42#include "iwl-4965.h"
43
44void iwl4965_rx_missed_beacon_notif(struct iwl_priv *priv,
45 struct iwl_rx_mem_buffer *rxb)
46
47{
48 struct iwl_rx_packet *pkt = rxb_addr(rxb);
49 struct iwl_missed_beacon_notif *missed_beacon;
50
51 missed_beacon = &pkt->u.missed_beacon;
52 if (le32_to_cpu(missed_beacon->consecutive_missed_beacons) >
53 priv->missed_beacon_threshold) {
54 IWL_DEBUG_CALIB(priv,
55 "missed bcn cnsq %d totl %d rcd %d expctd %d\n",
56 le32_to_cpu(missed_beacon->consecutive_missed_beacons),
57 le32_to_cpu(missed_beacon->total_missed_becons),
58 le32_to_cpu(missed_beacon->num_recvd_beacons),
59 le32_to_cpu(missed_beacon->num_expected_beacons));
60 if (!test_bit(STATUS_SCANNING, &priv->status))
61 iwl4965_init_sensitivity(priv);
62 }
63}
64
65/* Calculate noise level, based on measurements during network silence just
66 * before arriving beacon. This measurement can be done only if we know
67 * exactly when to expect beacons, therefore only when we're associated. */
68static void iwl4965_rx_calc_noise(struct iwl_priv *priv)
69{
70 struct statistics_rx_non_phy *rx_info;
71 int num_active_rx = 0;
72 int total_silence = 0;
73 int bcn_silence_a, bcn_silence_b, bcn_silence_c;
74 int last_rx_noise;
75
76 rx_info = &(priv->_4965.statistics.rx.general);
77 bcn_silence_a =
78 le32_to_cpu(rx_info->beacon_silence_rssi_a) & IN_BAND_FILTER;
79 bcn_silence_b =
80 le32_to_cpu(rx_info->beacon_silence_rssi_b) & IN_BAND_FILTER;
81 bcn_silence_c =
82 le32_to_cpu(rx_info->beacon_silence_rssi_c) & IN_BAND_FILTER;
83
84 if (bcn_silence_a) {
85 total_silence += bcn_silence_a;
86 num_active_rx++;
87 }
88 if (bcn_silence_b) {
89 total_silence += bcn_silence_b;
90 num_active_rx++;
91 }
92 if (bcn_silence_c) {
93 total_silence += bcn_silence_c;
94 num_active_rx++;
95 }
96
97 /* Average among active antennas */
98 if (num_active_rx)
99 last_rx_noise = (total_silence / num_active_rx) - 107;
100 else
101 last_rx_noise = IWL_NOISE_MEAS_NOT_AVAILABLE;
102
103 IWL_DEBUG_CALIB(priv, "inband silence a %u, b %u, c %u, dBm %d\n",
104 bcn_silence_a, bcn_silence_b, bcn_silence_c,
105 last_rx_noise);
106}
107
108#ifdef CONFIG_IWLWIFI_LEGACY_DEBUGFS
109/*
110 * based on the assumption of all statistics counter are in DWORD
111 * FIXME: This function is for debugging, do not deal with
112 * the case of counters roll-over.
113 */
114static void iwl4965_accumulative_statistics(struct iwl_priv *priv,
115 __le32 *stats)
116{
117 int i, size;
118 __le32 *prev_stats;
119 u32 *accum_stats;
120 u32 *delta, *max_delta;
121 struct statistics_general_common *general, *accum_general;
122 struct statistics_tx *tx, *accum_tx;
123
124 prev_stats = (__le32 *)&priv->_4965.statistics;
125 accum_stats = (u32 *)&priv->_4965.accum_statistics;
126 size = sizeof(struct iwl_notif_statistics);
127 general = &priv->_4965.statistics.general.common;
128 accum_general = &priv->_4965.accum_statistics.general.common;
129 tx = &priv->_4965.statistics.tx;
130 accum_tx = &priv->_4965.accum_statistics.tx;
131 delta = (u32 *)&priv->_4965.delta_statistics;
132 max_delta = (u32 *)&priv->_4965.max_delta;
133
134 for (i = sizeof(__le32); i < size;
135 i += sizeof(__le32), stats++, prev_stats++, delta++,
136 max_delta++, accum_stats++) {
137 if (le32_to_cpu(*stats) > le32_to_cpu(*prev_stats)) {
138 *delta = (le32_to_cpu(*stats) -
139 le32_to_cpu(*prev_stats));
140 *accum_stats += *delta;
141 if (*delta > *max_delta)
142 *max_delta = *delta;
143 }
144 }
145
146 /* reset accumulative statistics for "no-counter" type statistics */
147 accum_general->temperature = general->temperature;
148 accum_general->ttl_timestamp = general->ttl_timestamp;
149}
150#endif
151
152#define REG_RECALIB_PERIOD (60)
153
154/**
155 * iwl4965_good_plcp_health - checks for plcp error.
156 *
157 * When the plcp error is exceeding the thresholds, reset the radio
158 * to improve the throughput.
159 */
160bool iwl4965_good_plcp_health(struct iwl_priv *priv,
161 struct iwl_rx_packet *pkt)
162{
163 bool rc = true;
164 int combined_plcp_delta;
165 unsigned int plcp_msec;
166 unsigned long plcp_received_jiffies;
167
168 if (priv->cfg->base_params->plcp_delta_threshold ==
169 IWL_MAX_PLCP_ERR_THRESHOLD_DISABLE) {
170 IWL_DEBUG_RADIO(priv, "plcp_err check disabled\n");
171 return rc;
172 }
173
174 /*
175 * check for plcp_err and trigger radio reset if it exceeds
176 * the plcp error threshold plcp_delta.
177 */
178 plcp_received_jiffies = jiffies;
179 plcp_msec = jiffies_to_msecs((long) plcp_received_jiffies -
180 (long) priv->plcp_jiffies);
181 priv->plcp_jiffies = plcp_received_jiffies;
182 /*
183 * check to make sure plcp_msec is not 0 to prevent division
184 * by zero.
185 */
186 if (plcp_msec) {
187 struct statistics_rx_phy *ofdm;
188 struct statistics_rx_ht_phy *ofdm_ht;
189
190 ofdm = &pkt->u.stats.rx.ofdm;
191 ofdm_ht = &pkt->u.stats.rx.ofdm_ht;
192 combined_plcp_delta =
193 (le32_to_cpu(ofdm->plcp_err) -
194 le32_to_cpu(priv->_4965.statistics.
195 rx.ofdm.plcp_err)) +
196 (le32_to_cpu(ofdm_ht->plcp_err) -
197 le32_to_cpu(priv->_4965.statistics.
198 rx.ofdm_ht.plcp_err));
199
200 if ((combined_plcp_delta > 0) &&
201 ((combined_plcp_delta * 100) / plcp_msec) >
202 priv->cfg->base_params->plcp_delta_threshold) {
203 /*
204 * if plcp_err exceed the threshold,
205 * the following data is printed in csv format:
206 * Text: plcp_err exceeded %d,
207 * Received ofdm.plcp_err,
208 * Current ofdm.plcp_err,
209 * Received ofdm_ht.plcp_err,
210 * Current ofdm_ht.plcp_err,
211 * combined_plcp_delta,
212 * plcp_msec
213 */
214 IWL_DEBUG_RADIO(priv, "plcp_err exceeded %u, "
215 "%u, %u, %u, %u, %d, %u mSecs\n",
216 priv->cfg->base_params->plcp_delta_threshold,
217 le32_to_cpu(ofdm->plcp_err),
218 le32_to_cpu(ofdm->plcp_err),
219 le32_to_cpu(ofdm_ht->plcp_err),
220 le32_to_cpu(ofdm_ht->plcp_err),
221 combined_plcp_delta, plcp_msec);
222
223 rc = false;
224 }
225 }
226 return rc;
227}
228
229void iwl4965_rx_statistics(struct iwl_priv *priv,
230 struct iwl_rx_mem_buffer *rxb)
231{
232 int change;
233 struct iwl_rx_packet *pkt = rxb_addr(rxb);
234
235 IWL_DEBUG_RX(priv,
236 "Statistics notification received (%d vs %d).\n",
237 (int)sizeof(struct iwl_notif_statistics),
238 le32_to_cpu(pkt->len_n_flags) &
239 FH_RSCSR_FRAME_SIZE_MSK);
240
241 change = ((priv->_4965.statistics.general.common.temperature !=
242 pkt->u.stats.general.common.temperature) ||
243 ((priv->_4965.statistics.flag &
244 STATISTICS_REPLY_FLG_HT40_MODE_MSK) !=
245 (pkt->u.stats.flag &
246 STATISTICS_REPLY_FLG_HT40_MODE_MSK)));
247#ifdef CONFIG_IWLWIFI_LEGACY_DEBUGFS
248 iwl4965_accumulative_statistics(priv, (__le32 *)&pkt->u.stats);
249#endif
250
251 iwl_legacy_recover_from_statistics(priv, pkt);
252
253 memcpy(&priv->_4965.statistics, &pkt->u.stats,
254 sizeof(priv->_4965.statistics));
255
256 set_bit(STATUS_STATISTICS, &priv->status);
257
258 /* Reschedule the statistics timer to occur in
259 * REG_RECALIB_PERIOD seconds to ensure we get a
260 * thermal update even if the uCode doesn't give
261 * us one */
262 mod_timer(&priv->statistics_periodic, jiffies +
263 msecs_to_jiffies(REG_RECALIB_PERIOD * 1000));
264
265 if (unlikely(!test_bit(STATUS_SCANNING, &priv->status)) &&
266 (pkt->hdr.cmd == STATISTICS_NOTIFICATION)) {
267 iwl4965_rx_calc_noise(priv);
268 queue_work(priv->workqueue, &priv->run_time_calib_work);
269 }
270 if (priv->cfg->ops->lib->temp_ops.temperature && change)
271 priv->cfg->ops->lib->temp_ops.temperature(priv);
272}
273
274void iwl4965_reply_statistics(struct iwl_priv *priv,
275 struct iwl_rx_mem_buffer *rxb)
276{
277 struct iwl_rx_packet *pkt = rxb_addr(rxb);
278
279 if (le32_to_cpu(pkt->u.stats.flag) & UCODE_STATISTICS_CLEAR_MSK) {
280#ifdef CONFIG_IWLWIFI_LEGACY_DEBUGFS
281 memset(&priv->_4965.accum_statistics, 0,
282 sizeof(struct iwl_notif_statistics));
283 memset(&priv->_4965.delta_statistics, 0,
284 sizeof(struct iwl_notif_statistics));
285 memset(&priv->_4965.max_delta, 0,
286 sizeof(struct iwl_notif_statistics));
287#endif
288 IWL_DEBUG_RX(priv, "Statistics have been cleared\n");
289 }
290 iwl4965_rx_statistics(priv, rxb);
291}
diff --git a/drivers/net/wireless/iwlegacy/iwl-4965-sta.c b/drivers/net/wireless/iwlegacy/iwl-4965-sta.c
new file mode 100644
index 000000000000..057da2c3bf95
--- /dev/null
+++ b/drivers/net/wireless/iwlegacy/iwl-4965-sta.c
@@ -0,0 +1,720 @@
1/******************************************************************************
2 *
3 * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved.
4 *
5 * Portions of this file are derived from the ipw3945 project, as well
6 * as portions of the ieee80211 subsystem header files.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of version 2 of the GNU General Public License as
10 * published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it will be useful, but WITHOUT
13 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 * more details.
16 *
17 * You should have received a copy of the GNU General Public License along with
18 * this program; if not, write to the Free Software Foundation, Inc.,
19 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
20 *
21 * The full GNU General Public License is included in this distribution in the
22 * file called LICENSE.
23 *
24 * Contact Information:
25 * Intel Linux Wireless <ilw@linux.intel.com>
26 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
27 *
28 *****************************************************************************/
29
30#include <net/mac80211.h>
31
32#include "iwl-dev.h"
33#include "iwl-core.h"
34#include "iwl-sta.h"
35#include "iwl-4965.h"
36
37static struct iwl_link_quality_cmd *
38iwl4965_sta_alloc_lq(struct iwl_priv *priv, u8 sta_id)
39{
40 int i, r;
41 struct iwl_link_quality_cmd *link_cmd;
42 u32 rate_flags = 0;
43 __le32 rate_n_flags;
44
45 link_cmd = kzalloc(sizeof(struct iwl_link_quality_cmd), GFP_KERNEL);
46 if (!link_cmd) {
47 IWL_ERR(priv, "Unable to allocate memory for LQ cmd.\n");
48 return NULL;
49 }
50 /* Set up the rate scaling to start at selected rate, fall back
51 * all the way down to 1M in IEEE order, and then spin on 1M */
52 if (priv->band == IEEE80211_BAND_5GHZ)
53 r = IWL_RATE_6M_INDEX;
54 else
55 r = IWL_RATE_1M_INDEX;
56
57 if (r >= IWL_FIRST_CCK_RATE && r <= IWL_LAST_CCK_RATE)
58 rate_flags |= RATE_MCS_CCK_MSK;
59
60 rate_flags |= iwl4965_first_antenna(priv->hw_params.valid_tx_ant) <<
61 RATE_MCS_ANT_POS;
62 rate_n_flags = iwl4965_hw_set_rate_n_flags(iwl_rates[r].plcp, rate_flags);
63 for (i = 0; i < LINK_QUAL_MAX_RETRY_NUM; i++)
64 link_cmd->rs_table[i].rate_n_flags = rate_n_flags;
65
66 link_cmd->general_params.single_stream_ant_msk =
67 iwl4965_first_antenna(priv->hw_params.valid_tx_ant);
68
69 link_cmd->general_params.dual_stream_ant_msk =
70 priv->hw_params.valid_tx_ant &
71 ~iwl4965_first_antenna(priv->hw_params.valid_tx_ant);
72 if (!link_cmd->general_params.dual_stream_ant_msk) {
73 link_cmd->general_params.dual_stream_ant_msk = ANT_AB;
74 } else if (iwl4965_num_of_ant(priv->hw_params.valid_tx_ant) == 2) {
75 link_cmd->general_params.dual_stream_ant_msk =
76 priv->hw_params.valid_tx_ant;
77 }
78
79 link_cmd->agg_params.agg_dis_start_th = LINK_QUAL_AGG_DISABLE_START_DEF;
80 link_cmd->agg_params.agg_time_limit =
81 cpu_to_le16(LINK_QUAL_AGG_TIME_LIMIT_DEF);
82
83 link_cmd->sta_id = sta_id;
84
85 return link_cmd;
86}
87
88/*
89 * iwl4965_add_bssid_station - Add the special IBSS BSSID station
90 *
91 * Function sleeps.
92 */
93int
94iwl4965_add_bssid_station(struct iwl_priv *priv, struct iwl_rxon_context *ctx,
95 const u8 *addr, u8 *sta_id_r)
96{
97 int ret;
98 u8 sta_id;
99 struct iwl_link_quality_cmd *link_cmd;
100 unsigned long flags;
101
102 if (sta_id_r)
103 *sta_id_r = IWL_INVALID_STATION;
104
105 ret = iwl_legacy_add_station_common(priv, ctx, addr, 0, NULL, &sta_id);
106 if (ret) {
107 IWL_ERR(priv, "Unable to add station %pM\n", addr);
108 return ret;
109 }
110
111 if (sta_id_r)
112 *sta_id_r = sta_id;
113
114 spin_lock_irqsave(&priv->sta_lock, flags);
115 priv->stations[sta_id].used |= IWL_STA_LOCAL;
116 spin_unlock_irqrestore(&priv->sta_lock, flags);
117
118 /* Set up default rate scaling table in device's station table */
119 link_cmd = iwl4965_sta_alloc_lq(priv, sta_id);
120 if (!link_cmd) {
121 IWL_ERR(priv,
122 "Unable to initialize rate scaling for station %pM.\n",
123 addr);
124 return -ENOMEM;
125 }
126
127 ret = iwl_legacy_send_lq_cmd(priv, ctx, link_cmd, CMD_SYNC, true);
128 if (ret)
129 IWL_ERR(priv, "Link quality command failed (%d)\n", ret);
130
131 spin_lock_irqsave(&priv->sta_lock, flags);
132 priv->stations[sta_id].lq = link_cmd;
133 spin_unlock_irqrestore(&priv->sta_lock, flags);
134
135 return 0;
136}
137
138static int iwl4965_static_wepkey_cmd(struct iwl_priv *priv,
139 struct iwl_rxon_context *ctx,
140 bool send_if_empty)
141{
142 int i, not_empty = 0;
143 u8 buff[sizeof(struct iwl_wep_cmd) +
144 sizeof(struct iwl_wep_key) * WEP_KEYS_MAX];
145 struct iwl_wep_cmd *wep_cmd = (struct iwl_wep_cmd *)buff;
146 size_t cmd_size = sizeof(struct iwl_wep_cmd);
147 struct iwl_host_cmd cmd = {
148 .id = ctx->wep_key_cmd,
149 .data = wep_cmd,
150 .flags = CMD_SYNC,
151 };
152
153 might_sleep();
154
155 memset(wep_cmd, 0, cmd_size +
156 (sizeof(struct iwl_wep_key) * WEP_KEYS_MAX));
157
158 for (i = 0; i < WEP_KEYS_MAX ; i++) {
159 wep_cmd->key[i].key_index = i;
160 if (ctx->wep_keys[i].key_size) {
161 wep_cmd->key[i].key_offset = i;
162 not_empty = 1;
163 } else {
164 wep_cmd->key[i].key_offset = WEP_INVALID_OFFSET;
165 }
166
167 wep_cmd->key[i].key_size = ctx->wep_keys[i].key_size;
168 memcpy(&wep_cmd->key[i].key[3], ctx->wep_keys[i].key,
169 ctx->wep_keys[i].key_size);
170 }
171
172 wep_cmd->global_key_type = WEP_KEY_WEP_TYPE;
173 wep_cmd->num_keys = WEP_KEYS_MAX;
174
175 cmd_size += sizeof(struct iwl_wep_key) * WEP_KEYS_MAX;
176
177 cmd.len = cmd_size;
178
179 if (not_empty || send_if_empty)
180 return iwl_legacy_send_cmd(priv, &cmd);
181 else
182 return 0;
183}
184
185int iwl4965_restore_default_wep_keys(struct iwl_priv *priv,
186 struct iwl_rxon_context *ctx)
187{
188 lockdep_assert_held(&priv->mutex);
189
190 return iwl4965_static_wepkey_cmd(priv, ctx, false);
191}
192
193int iwl4965_remove_default_wep_key(struct iwl_priv *priv,
194 struct iwl_rxon_context *ctx,
195 struct ieee80211_key_conf *keyconf)
196{
197 int ret;
198
199 lockdep_assert_held(&priv->mutex);
200
201 IWL_DEBUG_WEP(priv, "Removing default WEP key: idx=%d\n",
202 keyconf->keyidx);
203
204 memset(&ctx->wep_keys[keyconf->keyidx], 0, sizeof(ctx->wep_keys[0]));
205 if (iwl_legacy_is_rfkill(priv)) {
206 IWL_DEBUG_WEP(priv,
207 "Not sending REPLY_WEPKEY command due to RFKILL.\n");
208 /* but keys in device are clear anyway so return success */
209 return 0;
210 }
211 ret = iwl4965_static_wepkey_cmd(priv, ctx, 1);
212 IWL_DEBUG_WEP(priv, "Remove default WEP key: idx=%d ret=%d\n",
213 keyconf->keyidx, ret);
214
215 return ret;
216}
217
218int iwl4965_set_default_wep_key(struct iwl_priv *priv,
219 struct iwl_rxon_context *ctx,
220 struct ieee80211_key_conf *keyconf)
221{
222 int ret;
223
224 lockdep_assert_held(&priv->mutex);
225
226 if (keyconf->keylen != WEP_KEY_LEN_128 &&
227 keyconf->keylen != WEP_KEY_LEN_64) {
228 IWL_DEBUG_WEP(priv, "Bad WEP key length %d\n", keyconf->keylen);
229 return -EINVAL;
230 }
231
232 keyconf->flags &= ~IEEE80211_KEY_FLAG_GENERATE_IV;
233 keyconf->hw_key_idx = HW_KEY_DEFAULT;
234 priv->stations[ctx->ap_sta_id].keyinfo.cipher = keyconf->cipher;
235
236 ctx->wep_keys[keyconf->keyidx].key_size = keyconf->keylen;
237 memcpy(&ctx->wep_keys[keyconf->keyidx].key, &keyconf->key,
238 keyconf->keylen);
239
240 ret = iwl4965_static_wepkey_cmd(priv, ctx, false);
241 IWL_DEBUG_WEP(priv, "Set default WEP key: len=%d idx=%d ret=%d\n",
242 keyconf->keylen, keyconf->keyidx, ret);
243
244 return ret;
245}
246
247static int iwl4965_set_wep_dynamic_key_info(struct iwl_priv *priv,
248 struct iwl_rxon_context *ctx,
249 struct ieee80211_key_conf *keyconf,
250 u8 sta_id)
251{
252 unsigned long flags;
253 __le16 key_flags = 0;
254 struct iwl_legacy_addsta_cmd sta_cmd;
255
256 lockdep_assert_held(&priv->mutex);
257
258 keyconf->flags &= ~IEEE80211_KEY_FLAG_GENERATE_IV;
259
260 key_flags |= (STA_KEY_FLG_WEP | STA_KEY_FLG_MAP_KEY_MSK);
261 key_flags |= cpu_to_le16(keyconf->keyidx << STA_KEY_FLG_KEYID_POS);
262 key_flags &= ~STA_KEY_FLG_INVALID;
263
264 if (keyconf->keylen == WEP_KEY_LEN_128)
265 key_flags |= STA_KEY_FLG_KEY_SIZE_MSK;
266
267 if (sta_id == ctx->bcast_sta_id)
268 key_flags |= STA_KEY_MULTICAST_MSK;
269
270 spin_lock_irqsave(&priv->sta_lock, flags);
271
272 priv->stations[sta_id].keyinfo.cipher = keyconf->cipher;
273 priv->stations[sta_id].keyinfo.keylen = keyconf->keylen;
274 priv->stations[sta_id].keyinfo.keyidx = keyconf->keyidx;
275
276 memcpy(priv->stations[sta_id].keyinfo.key,
277 keyconf->key, keyconf->keylen);
278
279 memcpy(&priv->stations[sta_id].sta.key.key[3],
280 keyconf->key, keyconf->keylen);
281
282 if ((priv->stations[sta_id].sta.key.key_flags & STA_KEY_FLG_ENCRYPT_MSK)
283 == STA_KEY_FLG_NO_ENC)
284 priv->stations[sta_id].sta.key.key_offset =
285 iwl_legacy_get_free_ucode_key_index(priv);
286 /* else, we are overriding an existing key => no need to allocated room
287 * in uCode. */
288
289 WARN(priv->stations[sta_id].sta.key.key_offset == WEP_INVALID_OFFSET,
290 "no space for a new key");
291
292 priv->stations[sta_id].sta.key.key_flags = key_flags;
293 priv->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_KEY_MASK;
294 priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
295
296 memcpy(&sta_cmd, &priv->stations[sta_id].sta,
297 sizeof(struct iwl_legacy_addsta_cmd));
298 spin_unlock_irqrestore(&priv->sta_lock, flags);
299
300 return iwl_legacy_send_add_sta(priv, &sta_cmd, CMD_SYNC);
301}
302
303static int iwl4965_set_ccmp_dynamic_key_info(struct iwl_priv *priv,
304 struct iwl_rxon_context *ctx,
305 struct ieee80211_key_conf *keyconf,
306 u8 sta_id)
307{
308 unsigned long flags;
309 __le16 key_flags = 0;
310 struct iwl_legacy_addsta_cmd sta_cmd;
311
312 lockdep_assert_held(&priv->mutex);
313
314 key_flags |= (STA_KEY_FLG_CCMP | STA_KEY_FLG_MAP_KEY_MSK);
315 key_flags |= cpu_to_le16(keyconf->keyidx << STA_KEY_FLG_KEYID_POS);
316 key_flags &= ~STA_KEY_FLG_INVALID;
317
318 if (sta_id == ctx->bcast_sta_id)
319 key_flags |= STA_KEY_MULTICAST_MSK;
320
321 keyconf->flags |= IEEE80211_KEY_FLAG_GENERATE_IV;
322
323 spin_lock_irqsave(&priv->sta_lock, flags);
324 priv->stations[sta_id].keyinfo.cipher = keyconf->cipher;
325 priv->stations[sta_id].keyinfo.keylen = keyconf->keylen;
326
327 memcpy(priv->stations[sta_id].keyinfo.key, keyconf->key,
328 keyconf->keylen);
329
330 memcpy(priv->stations[sta_id].sta.key.key, keyconf->key,
331 keyconf->keylen);
332
333 if ((priv->stations[sta_id].sta.key.key_flags & STA_KEY_FLG_ENCRYPT_MSK)
334 == STA_KEY_FLG_NO_ENC)
335 priv->stations[sta_id].sta.key.key_offset =
336 iwl_legacy_get_free_ucode_key_index(priv);
337 /* else, we are overriding an existing key => no need to allocated room
338 * in uCode. */
339
340 WARN(priv->stations[sta_id].sta.key.key_offset == WEP_INVALID_OFFSET,
341 "no space for a new key");
342
343 priv->stations[sta_id].sta.key.key_flags = key_flags;
344 priv->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_KEY_MASK;
345 priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
346
347 memcpy(&sta_cmd, &priv->stations[sta_id].sta,
348 sizeof(struct iwl_legacy_addsta_cmd));
349 spin_unlock_irqrestore(&priv->sta_lock, flags);
350
351 return iwl_legacy_send_add_sta(priv, &sta_cmd, CMD_SYNC);
352}
353
354static int iwl4965_set_tkip_dynamic_key_info(struct iwl_priv *priv,
355 struct iwl_rxon_context *ctx,
356 struct ieee80211_key_conf *keyconf,
357 u8 sta_id)
358{
359 unsigned long flags;
360 int ret = 0;
361 __le16 key_flags = 0;
362
363 key_flags |= (STA_KEY_FLG_TKIP | STA_KEY_FLG_MAP_KEY_MSK);
364 key_flags |= cpu_to_le16(keyconf->keyidx << STA_KEY_FLG_KEYID_POS);
365 key_flags &= ~STA_KEY_FLG_INVALID;
366
367 if (sta_id == ctx->bcast_sta_id)
368 key_flags |= STA_KEY_MULTICAST_MSK;
369
370 keyconf->flags |= IEEE80211_KEY_FLAG_GENERATE_IV;
371 keyconf->flags |= IEEE80211_KEY_FLAG_GENERATE_MMIC;
372
373 spin_lock_irqsave(&priv->sta_lock, flags);
374
375 priv->stations[sta_id].keyinfo.cipher = keyconf->cipher;
376 priv->stations[sta_id].keyinfo.keylen = 16;
377
378 if ((priv->stations[sta_id].sta.key.key_flags & STA_KEY_FLG_ENCRYPT_MSK)
379 == STA_KEY_FLG_NO_ENC)
380 priv->stations[sta_id].sta.key.key_offset =
381 iwl_legacy_get_free_ucode_key_index(priv);
382 /* else, we are overriding an existing key => no need to allocated room
383 * in uCode. */
384
385 WARN(priv->stations[sta_id].sta.key.key_offset == WEP_INVALID_OFFSET,
386 "no space for a new key");
387
388 priv->stations[sta_id].sta.key.key_flags = key_flags;
389
390
391 /* This copy is acutally not needed: we get the key with each TX */
392 memcpy(priv->stations[sta_id].keyinfo.key, keyconf->key, 16);
393
394 memcpy(priv->stations[sta_id].sta.key.key, keyconf->key, 16);
395
396 spin_unlock_irqrestore(&priv->sta_lock, flags);
397
398 return ret;
399}
400
401void iwl4965_update_tkip_key(struct iwl_priv *priv,
402 struct iwl_rxon_context *ctx,
403 struct ieee80211_key_conf *keyconf,
404 struct ieee80211_sta *sta, u32 iv32, u16 *phase1key)
405{
406 u8 sta_id;
407 unsigned long flags;
408 int i;
409
410 if (iwl_legacy_scan_cancel(priv)) {
411 /* cancel scan failed, just live w/ bad key and rely
412 briefly on SW decryption */
413 return;
414 }
415
416 sta_id = iwl_legacy_sta_id_or_broadcast(priv, ctx, sta);
417 if (sta_id == IWL_INVALID_STATION)
418 return;
419
420 spin_lock_irqsave(&priv->sta_lock, flags);
421
422 priv->stations[sta_id].sta.key.tkip_rx_tsc_byte2 = (u8) iv32;
423
424 for (i = 0; i < 5; i++)
425 priv->stations[sta_id].sta.key.tkip_rx_ttak[i] =
426 cpu_to_le16(phase1key[i]);
427
428 priv->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_KEY_MASK;
429 priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
430
431 iwl_legacy_send_add_sta(priv, &priv->stations[sta_id].sta, CMD_ASYNC);
432
433 spin_unlock_irqrestore(&priv->sta_lock, flags);
434
435}
436
437int iwl4965_remove_dynamic_key(struct iwl_priv *priv,
438 struct iwl_rxon_context *ctx,
439 struct ieee80211_key_conf *keyconf,
440 u8 sta_id)
441{
442 unsigned long flags;
443 u16 key_flags;
444 u8 keyidx;
445 struct iwl_legacy_addsta_cmd sta_cmd;
446
447 lockdep_assert_held(&priv->mutex);
448
449 ctx->key_mapping_keys--;
450
451 spin_lock_irqsave(&priv->sta_lock, flags);
452 key_flags = le16_to_cpu(priv->stations[sta_id].sta.key.key_flags);
453 keyidx = (key_flags >> STA_KEY_FLG_KEYID_POS) & 0x3;
454
455 IWL_DEBUG_WEP(priv, "Remove dynamic key: idx=%d sta=%d\n",
456 keyconf->keyidx, sta_id);
457
458 if (keyconf->keyidx != keyidx) {
459 /* We need to remove a key with index different that the one
460 * in the uCode. This means that the key we need to remove has
461 * been replaced by another one with different index.
462 * Don't do anything and return ok
463 */
464 spin_unlock_irqrestore(&priv->sta_lock, flags);
465 return 0;
466 }
467
468 if (priv->stations[sta_id].sta.key.key_offset == WEP_INVALID_OFFSET) {
469 IWL_WARN(priv, "Removing wrong key %d 0x%x\n",
470 keyconf->keyidx, key_flags);
471 spin_unlock_irqrestore(&priv->sta_lock, flags);
472 return 0;
473 }
474
475 if (!test_and_clear_bit(priv->stations[sta_id].sta.key.key_offset,
476 &priv->ucode_key_table))
477 IWL_ERR(priv, "index %d not used in uCode key table.\n",
478 priv->stations[sta_id].sta.key.key_offset);
479 memset(&priv->stations[sta_id].keyinfo, 0,
480 sizeof(struct iwl_hw_key));
481 memset(&priv->stations[sta_id].sta.key, 0,
482 sizeof(struct iwl4965_keyinfo));
483 priv->stations[sta_id].sta.key.key_flags =
484 STA_KEY_FLG_NO_ENC | STA_KEY_FLG_INVALID;
485 priv->stations[sta_id].sta.key.key_offset = WEP_INVALID_OFFSET;
486 priv->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_KEY_MASK;
487 priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
488
489 if (iwl_legacy_is_rfkill(priv)) {
490 IWL_DEBUG_WEP(priv,
491 "Not sending REPLY_ADD_STA command because RFKILL enabled.\n");
492 spin_unlock_irqrestore(&priv->sta_lock, flags);
493 return 0;
494 }
495 memcpy(&sta_cmd, &priv->stations[sta_id].sta,
496 sizeof(struct iwl_legacy_addsta_cmd));
497 spin_unlock_irqrestore(&priv->sta_lock, flags);
498
499 return iwl_legacy_send_add_sta(priv, &sta_cmd, CMD_SYNC);
500}
501
502int iwl4965_set_dynamic_key(struct iwl_priv *priv, struct iwl_rxon_context *ctx,
503 struct ieee80211_key_conf *keyconf, u8 sta_id)
504{
505 int ret;
506
507 lockdep_assert_held(&priv->mutex);
508
509 ctx->key_mapping_keys++;
510 keyconf->hw_key_idx = HW_KEY_DYNAMIC;
511
512 switch (keyconf->cipher) {
513 case WLAN_CIPHER_SUITE_CCMP:
514 ret = iwl4965_set_ccmp_dynamic_key_info(priv, ctx,
515 keyconf, sta_id);
516 break;
517 case WLAN_CIPHER_SUITE_TKIP:
518 ret = iwl4965_set_tkip_dynamic_key_info(priv, ctx,
519 keyconf, sta_id);
520 break;
521 case WLAN_CIPHER_SUITE_WEP40:
522 case WLAN_CIPHER_SUITE_WEP104:
523 ret = iwl4965_set_wep_dynamic_key_info(priv, ctx,
524 keyconf, sta_id);
525 break;
526 default:
527 IWL_ERR(priv,
528 "Unknown alg: %s cipher = %x\n", __func__,
529 keyconf->cipher);
530 ret = -EINVAL;
531 }
532
533 IWL_DEBUG_WEP(priv,
534 "Set dynamic key: cipher=%x len=%d idx=%d sta=%d ret=%d\n",
535 keyconf->cipher, keyconf->keylen, keyconf->keyidx,
536 sta_id, ret);
537
538 return ret;
539}
540
541/**
542 * iwl4965_alloc_bcast_station - add broadcast station into driver's station table.
543 *
544 * This adds the broadcast station into the driver's station table
545 * and marks it driver active, so that it will be restored to the
546 * device at the next best time.
547 */
548int iwl4965_alloc_bcast_station(struct iwl_priv *priv,
549 struct iwl_rxon_context *ctx)
550{
551 struct iwl_link_quality_cmd *link_cmd;
552 unsigned long flags;
553 u8 sta_id;
554
555 spin_lock_irqsave(&priv->sta_lock, flags);
556 sta_id = iwl_legacy_prep_station(priv, ctx, iwl_bcast_addr,
557 false, NULL);
558 if (sta_id == IWL_INVALID_STATION) {
559 IWL_ERR(priv, "Unable to prepare broadcast station\n");
560 spin_unlock_irqrestore(&priv->sta_lock, flags);
561
562 return -EINVAL;
563 }
564
565 priv->stations[sta_id].used |= IWL_STA_DRIVER_ACTIVE;
566 priv->stations[sta_id].used |= IWL_STA_BCAST;
567 spin_unlock_irqrestore(&priv->sta_lock, flags);
568
569 link_cmd = iwl4965_sta_alloc_lq(priv, sta_id);
570 if (!link_cmd) {
571 IWL_ERR(priv,
572 "Unable to initialize rate scaling for bcast station.\n");
573 return -ENOMEM;
574 }
575
576 spin_lock_irqsave(&priv->sta_lock, flags);
577 priv->stations[sta_id].lq = link_cmd;
578 spin_unlock_irqrestore(&priv->sta_lock, flags);
579
580 return 0;
581}
582
583/**
584 * iwl4965_update_bcast_station - update broadcast station's LQ command
585 *
586 * Only used by iwl4965. Placed here to have all bcast station management
587 * code together.
588 */
589static int iwl4965_update_bcast_station(struct iwl_priv *priv,
590 struct iwl_rxon_context *ctx)
591{
592 unsigned long flags;
593 struct iwl_link_quality_cmd *link_cmd;
594 u8 sta_id = ctx->bcast_sta_id;
595
596 link_cmd = iwl4965_sta_alloc_lq(priv, sta_id);
597 if (!link_cmd) {
598 IWL_ERR(priv,
599 "Unable to initialize rate scaling for bcast station.\n");
600 return -ENOMEM;
601 }
602
603 spin_lock_irqsave(&priv->sta_lock, flags);
604 if (priv->stations[sta_id].lq)
605 kfree(priv->stations[sta_id].lq);
606 else
607 IWL_DEBUG_INFO(priv,
608 "Bcast station rate scaling has not been initialized yet.\n");
609 priv->stations[sta_id].lq = link_cmd;
610 spin_unlock_irqrestore(&priv->sta_lock, flags);
611
612 return 0;
613}
614
615int iwl4965_update_bcast_stations(struct iwl_priv *priv)
616{
617 struct iwl_rxon_context *ctx;
618 int ret = 0;
619
620 for_each_context(priv, ctx) {
621 ret = iwl4965_update_bcast_station(priv, ctx);
622 if (ret)
623 break;
624 }
625
626 return ret;
627}
628
629/**
630 * iwl4965_sta_tx_modify_enable_tid - Enable Tx for this TID in station table
631 */
632int iwl4965_sta_tx_modify_enable_tid(struct iwl_priv *priv, int sta_id, int tid)
633{
634 unsigned long flags;
635 struct iwl_legacy_addsta_cmd sta_cmd;
636
637 lockdep_assert_held(&priv->mutex);
638
639 /* Remove "disable" flag, to enable Tx for this TID */
640 spin_lock_irqsave(&priv->sta_lock, flags);
641 priv->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_TID_DISABLE_TX;
642 priv->stations[sta_id].sta.tid_disable_tx &= cpu_to_le16(~(1 << tid));
643 priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
644 memcpy(&sta_cmd, &priv->stations[sta_id].sta,
645 sizeof(struct iwl_legacy_addsta_cmd));
646 spin_unlock_irqrestore(&priv->sta_lock, flags);
647
648 return iwl_legacy_send_add_sta(priv, &sta_cmd, CMD_SYNC);
649}
650
651int iwl4965_sta_rx_agg_start(struct iwl_priv *priv, struct ieee80211_sta *sta,
652 int tid, u16 ssn)
653{
654 unsigned long flags;
655 int sta_id;
656 struct iwl_legacy_addsta_cmd sta_cmd;
657
658 lockdep_assert_held(&priv->mutex);
659
660 sta_id = iwl_legacy_sta_id(sta);
661 if (sta_id == IWL_INVALID_STATION)
662 return -ENXIO;
663
664 spin_lock_irqsave(&priv->sta_lock, flags);
665 priv->stations[sta_id].sta.station_flags_msk = 0;
666 priv->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_ADDBA_TID_MSK;
667 priv->stations[sta_id].sta.add_immediate_ba_tid = (u8)tid;
668 priv->stations[sta_id].sta.add_immediate_ba_ssn = cpu_to_le16(ssn);
669 priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
670 memcpy(&sta_cmd, &priv->stations[sta_id].sta,
671 sizeof(struct iwl_legacy_addsta_cmd));
672 spin_unlock_irqrestore(&priv->sta_lock, flags);
673
674 return iwl_legacy_send_add_sta(priv, &sta_cmd, CMD_SYNC);
675}
676
677int iwl4965_sta_rx_agg_stop(struct iwl_priv *priv, struct ieee80211_sta *sta,
678 int tid)
679{
680 unsigned long flags;
681 int sta_id;
682 struct iwl_legacy_addsta_cmd sta_cmd;
683
684 lockdep_assert_held(&priv->mutex);
685
686 sta_id = iwl_legacy_sta_id(sta);
687 if (sta_id == IWL_INVALID_STATION) {
688 IWL_ERR(priv, "Invalid station for AGG tid %d\n", tid);
689 return -ENXIO;
690 }
691
692 spin_lock_irqsave(&priv->sta_lock, flags);
693 priv->stations[sta_id].sta.station_flags_msk = 0;
694 priv->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_DELBA_TID_MSK;
695 priv->stations[sta_id].sta.remove_immediate_ba_tid = (u8)tid;
696 priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
697 memcpy(&sta_cmd, &priv->stations[sta_id].sta,
698 sizeof(struct iwl_legacy_addsta_cmd));
699 spin_unlock_irqrestore(&priv->sta_lock, flags);
700
701 return iwl_legacy_send_add_sta(priv, &sta_cmd, CMD_SYNC);
702}
703
704void
705iwl4965_sta_modify_sleep_tx_count(struct iwl_priv *priv, int sta_id, int cnt)
706{
707 unsigned long flags;
708
709 spin_lock_irqsave(&priv->sta_lock, flags);
710 priv->stations[sta_id].sta.station_flags |= STA_FLG_PWR_SAVE_MSK;
711 priv->stations[sta_id].sta.station_flags_msk = STA_FLG_PWR_SAVE_MSK;
712 priv->stations[sta_id].sta.sta.modify_mask =
713 STA_MODIFY_SLEEP_TX_COUNT_MSK;
714 priv->stations[sta_id].sta.sleep_tx_count = cpu_to_le16(cnt);
715 priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
716 iwl_legacy_send_add_sta(priv,
717 &priv->stations[sta_id].sta, CMD_ASYNC);
718 spin_unlock_irqrestore(&priv->sta_lock, flags);
719
720}
diff --git a/drivers/net/wireless/iwlegacy/iwl-4965-tx.c b/drivers/net/wireless/iwlegacy/iwl-4965-tx.c
new file mode 100644
index 000000000000..2e0f0dbf87ec
--- /dev/null
+++ b/drivers/net/wireless/iwlegacy/iwl-4965-tx.c
@@ -0,0 +1,1359 @@
1/******************************************************************************
2 *
3 * GPL LICENSE SUMMARY
4 *
5 * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of version 2 of the GNU General Public License as
9 * published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful, but
12 * WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
19 * USA
20 *
21 * The full GNU General Public License is included in this distribution
22 * in the file called LICENSE.GPL.
23 *
24 * Contact Information:
25 * Intel Linux Wireless <ilw@linux.intel.com>
26 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
27 *
28 *****************************************************************************/
29
30#include <linux/kernel.h>
31#include <linux/module.h>
32#include <linux/init.h>
33#include <linux/sched.h>
34
35#include "iwl-dev.h"
36#include "iwl-core.h"
37#include "iwl-sta.h"
38#include "iwl-io.h"
39#include "iwl-helpers.h"
40#include "iwl-4965-hw.h"
41#include "iwl-4965.h"
42
43/*
44 * mac80211 queues, ACs, hardware queues, FIFOs.
45 *
46 * Cf. http://wireless.kernel.org/en/developers/Documentation/mac80211/queues
47 *
48 * Mac80211 uses the following numbers, which we get as from it
49 * by way of skb_get_queue_mapping(skb):
50 *
51 * VO 0
52 * VI 1
53 * BE 2
54 * BK 3
55 *
56 *
57 * Regular (not A-MPDU) frames are put into hardware queues corresponding
58 * to the FIFOs, see comments in iwl-prph.h. Aggregated frames get their
59 * own queue per aggregation session (RA/TID combination), such queues are
60 * set up to map into FIFOs too, for which we need an AC->FIFO mapping. In
61 * order to map frames to the right queue, we also need an AC->hw queue
62 * mapping. This is implemented here.
63 *
64 * Due to the way hw queues are set up (by the hw specific modules like
65 * iwl-4965.c), the AC->hw queue mapping is the identity
66 * mapping.
67 */
68
69static const u8 tid_to_ac[] = {
70 IEEE80211_AC_BE,
71 IEEE80211_AC_BK,
72 IEEE80211_AC_BK,
73 IEEE80211_AC_BE,
74 IEEE80211_AC_VI,
75 IEEE80211_AC_VI,
76 IEEE80211_AC_VO,
77 IEEE80211_AC_VO
78};
79
80static inline int iwl4965_get_ac_from_tid(u16 tid)
81{
82 if (likely(tid < ARRAY_SIZE(tid_to_ac)))
83 return tid_to_ac[tid];
84
85 /* no support for TIDs 8-15 yet */
86 return -EINVAL;
87}
88
89static inline int
90iwl4965_get_fifo_from_tid(struct iwl_rxon_context *ctx, u16 tid)
91{
92 if (likely(tid < ARRAY_SIZE(tid_to_ac)))
93 return ctx->ac_to_fifo[tid_to_ac[tid]];
94
95 /* no support for TIDs 8-15 yet */
96 return -EINVAL;
97}
98
99/*
100 * handle build REPLY_TX command notification.
101 */
102static void iwl4965_tx_cmd_build_basic(struct iwl_priv *priv,
103 struct sk_buff *skb,
104 struct iwl_tx_cmd *tx_cmd,
105 struct ieee80211_tx_info *info,
106 struct ieee80211_hdr *hdr,
107 u8 std_id)
108{
109 __le16 fc = hdr->frame_control;
110 __le32 tx_flags = tx_cmd->tx_flags;
111
112 tx_cmd->stop_time.life_time = TX_CMD_LIFE_TIME_INFINITE;
113 if (!(info->flags & IEEE80211_TX_CTL_NO_ACK)) {
114 tx_flags |= TX_CMD_FLG_ACK_MSK;
115 if (ieee80211_is_mgmt(fc))
116 tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK;
117 if (ieee80211_is_probe_resp(fc) &&
118 !(le16_to_cpu(hdr->seq_ctrl) & 0xf))
119 tx_flags |= TX_CMD_FLG_TSF_MSK;
120 } else {
121 tx_flags &= (~TX_CMD_FLG_ACK_MSK);
122 tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK;
123 }
124
125 if (ieee80211_is_back_req(fc))
126 tx_flags |= TX_CMD_FLG_ACK_MSK | TX_CMD_FLG_IMM_BA_RSP_MASK;
127
128 tx_cmd->sta_id = std_id;
129 if (ieee80211_has_morefrags(fc))
130 tx_flags |= TX_CMD_FLG_MORE_FRAG_MSK;
131
132 if (ieee80211_is_data_qos(fc)) {
133 u8 *qc = ieee80211_get_qos_ctl(hdr);
134 tx_cmd->tid_tspec = qc[0] & 0xf;
135 tx_flags &= ~TX_CMD_FLG_SEQ_CTL_MSK;
136 } else {
137 tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK;
138 }
139
140 iwl_legacy_tx_cmd_protection(priv, info, fc, &tx_flags);
141
142 tx_flags &= ~(TX_CMD_FLG_ANT_SEL_MSK);
143 if (ieee80211_is_mgmt(fc)) {
144 if (ieee80211_is_assoc_req(fc) || ieee80211_is_reassoc_req(fc))
145 tx_cmd->timeout.pm_frame_timeout = cpu_to_le16(3);
146 else
147 tx_cmd->timeout.pm_frame_timeout = cpu_to_le16(2);
148 } else {
149 tx_cmd->timeout.pm_frame_timeout = 0;
150 }
151
152 tx_cmd->driver_txop = 0;
153 tx_cmd->tx_flags = tx_flags;
154 tx_cmd->next_frame_len = 0;
155}
156
157#define RTS_DFAULT_RETRY_LIMIT 60
158
159static void iwl4965_tx_cmd_build_rate(struct iwl_priv *priv,
160 struct iwl_tx_cmd *tx_cmd,
161 struct ieee80211_tx_info *info,
162 __le16 fc)
163{
164 u32 rate_flags;
165 int rate_idx;
166 u8 rts_retry_limit;
167 u8 data_retry_limit;
168 u8 rate_plcp;
169
170 /* Set retry limit on DATA packets and Probe Responses*/
171 if (ieee80211_is_probe_resp(fc))
172 data_retry_limit = 3;
173 else
174 data_retry_limit = IWL4965_DEFAULT_TX_RETRY;
175 tx_cmd->data_retry_limit = data_retry_limit;
176
177 /* Set retry limit on RTS packets */
178 rts_retry_limit = RTS_DFAULT_RETRY_LIMIT;
179 if (data_retry_limit < rts_retry_limit)
180 rts_retry_limit = data_retry_limit;
181 tx_cmd->rts_retry_limit = rts_retry_limit;
182
183 /* DATA packets will use the uCode station table for rate/antenna
184 * selection */
185 if (ieee80211_is_data(fc)) {
186 tx_cmd->initial_rate_index = 0;
187 tx_cmd->tx_flags |= TX_CMD_FLG_STA_RATE_MSK;
188 return;
189 }
190
191 /**
192 * If the current TX rate stored in mac80211 has the MCS bit set, it's
193 * not really a TX rate. Thus, we use the lowest supported rate for
194 * this band. Also use the lowest supported rate if the stored rate
195 * index is invalid.
196 */
197 rate_idx = info->control.rates[0].idx;
198 if (info->control.rates[0].flags & IEEE80211_TX_RC_MCS ||
199 (rate_idx < 0) || (rate_idx > IWL_RATE_COUNT_LEGACY))
200 rate_idx = rate_lowest_index(&priv->bands[info->band],
201 info->control.sta);
202 /* For 5 GHZ band, remap mac80211 rate indices into driver indices */
203 if (info->band == IEEE80211_BAND_5GHZ)
204 rate_idx += IWL_FIRST_OFDM_RATE;
205 /* Get PLCP rate for tx_cmd->rate_n_flags */
206 rate_plcp = iwl_rates[rate_idx].plcp;
207 /* Zero out flags for this packet */
208 rate_flags = 0;
209
210 /* Set CCK flag as needed */
211 if ((rate_idx >= IWL_FIRST_CCK_RATE) && (rate_idx <= IWL_LAST_CCK_RATE))
212 rate_flags |= RATE_MCS_CCK_MSK;
213
214 /* Set up antennas */
215 priv->mgmt_tx_ant = iwl4965_toggle_tx_ant(priv, priv->mgmt_tx_ant,
216 priv->hw_params.valid_tx_ant);
217
218 rate_flags |= iwl4965_ant_idx_to_flags(priv->mgmt_tx_ant);
219
220 /* Set the rate in the TX cmd */
221 tx_cmd->rate_n_flags = iwl4965_hw_set_rate_n_flags(rate_plcp, rate_flags);
222}
223
224static void iwl4965_tx_cmd_build_hwcrypto(struct iwl_priv *priv,
225 struct ieee80211_tx_info *info,
226 struct iwl_tx_cmd *tx_cmd,
227 struct sk_buff *skb_frag,
228 int sta_id)
229{
230 struct ieee80211_key_conf *keyconf = info->control.hw_key;
231
232 switch (keyconf->cipher) {
233 case WLAN_CIPHER_SUITE_CCMP:
234 tx_cmd->sec_ctl = TX_CMD_SEC_CCM;
235 memcpy(tx_cmd->key, keyconf->key, keyconf->keylen);
236 if (info->flags & IEEE80211_TX_CTL_AMPDU)
237 tx_cmd->tx_flags |= TX_CMD_FLG_AGG_CCMP_MSK;
238 IWL_DEBUG_TX(priv, "tx_cmd with AES hwcrypto\n");
239 break;
240
241 case WLAN_CIPHER_SUITE_TKIP:
242 tx_cmd->sec_ctl = TX_CMD_SEC_TKIP;
243 ieee80211_get_tkip_key(keyconf, skb_frag,
244 IEEE80211_TKIP_P2_KEY, tx_cmd->key);
245 IWL_DEBUG_TX(priv, "tx_cmd with tkip hwcrypto\n");
246 break;
247
248 case WLAN_CIPHER_SUITE_WEP104:
249 tx_cmd->sec_ctl |= TX_CMD_SEC_KEY128;
250 /* fall through */
251 case WLAN_CIPHER_SUITE_WEP40:
252 tx_cmd->sec_ctl |= (TX_CMD_SEC_WEP |
253 (keyconf->keyidx & TX_CMD_SEC_MSK) << TX_CMD_SEC_SHIFT);
254
255 memcpy(&tx_cmd->key[3], keyconf->key, keyconf->keylen);
256
257 IWL_DEBUG_TX(priv, "Configuring packet for WEP encryption "
258 "with key %d\n", keyconf->keyidx);
259 break;
260
261 default:
262 IWL_ERR(priv, "Unknown encode cipher %x\n", keyconf->cipher);
263 break;
264 }
265}
266
267/*
268 * start REPLY_TX command process
269 */
270int iwl4965_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
271{
272 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
273 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
274 struct ieee80211_sta *sta = info->control.sta;
275 struct iwl_station_priv *sta_priv = NULL;
276 struct iwl_tx_queue *txq;
277 struct iwl_queue *q;
278 struct iwl_device_cmd *out_cmd;
279 struct iwl_cmd_meta *out_meta;
280 struct iwl_tx_cmd *tx_cmd;
281 struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
282 int txq_id;
283 dma_addr_t phys_addr;
284 dma_addr_t txcmd_phys;
285 dma_addr_t scratch_phys;
286 u16 len, firstlen, secondlen;
287 u16 seq_number = 0;
288 __le16 fc;
289 u8 hdr_len;
290 u8 sta_id;
291 u8 wait_write_ptr = 0;
292 u8 tid = 0;
293 u8 *qc = NULL;
294 unsigned long flags;
295 bool is_agg = false;
296
297 if (info->control.vif)
298 ctx = iwl_legacy_rxon_ctx_from_vif(info->control.vif);
299
300 spin_lock_irqsave(&priv->lock, flags);
301 if (iwl_legacy_is_rfkill(priv)) {
302 IWL_DEBUG_DROP(priv, "Dropping - RF KILL\n");
303 goto drop_unlock;
304 }
305
306 fc = hdr->frame_control;
307
308#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
309 if (ieee80211_is_auth(fc))
310 IWL_DEBUG_TX(priv, "Sending AUTH frame\n");
311 else if (ieee80211_is_assoc_req(fc))
312 IWL_DEBUG_TX(priv, "Sending ASSOC frame\n");
313 else if (ieee80211_is_reassoc_req(fc))
314 IWL_DEBUG_TX(priv, "Sending REASSOC frame\n");
315#endif
316
317 hdr_len = ieee80211_hdrlen(fc);
318
319 /* Find index into station table for destination station */
320 sta_id = iwl_legacy_sta_id_or_broadcast(priv, ctx, info->control.sta);
321 if (sta_id == IWL_INVALID_STATION) {
322 IWL_DEBUG_DROP(priv, "Dropping - INVALID STATION: %pM\n",
323 hdr->addr1);
324 goto drop_unlock;
325 }
326
327 IWL_DEBUG_TX(priv, "station Id %d\n", sta_id);
328
329 if (sta)
330 sta_priv = (void *)sta->drv_priv;
331
332 if (sta_priv && sta_priv->asleep &&
333 (info->flags & IEEE80211_TX_CTL_PSPOLL_RESPONSE)) {
334 /*
335 * This sends an asynchronous command to the device,
336 * but we can rely on it being processed before the
337 * next frame is processed -- and the next frame to
338 * this station is the one that will consume this
339 * counter.
340 * For now set the counter to just 1 since we do not
341 * support uAPSD yet.
342 */
343 iwl4965_sta_modify_sleep_tx_count(priv, sta_id, 1);
344 }
345
346 /*
347 * Send this frame after DTIM -- there's a special queue
348 * reserved for this for contexts that support AP mode.
349 */
350 if (info->flags & IEEE80211_TX_CTL_SEND_AFTER_DTIM) {
351 txq_id = ctx->mcast_queue;
352 /*
353 * The microcode will clear the more data
354 * bit in the last frame it transmits.
355 */
356 hdr->frame_control |=
357 cpu_to_le16(IEEE80211_FCTL_MOREDATA);
358 } else
359 txq_id = ctx->ac_to_queue[skb_get_queue_mapping(skb)];
360
361 /* irqs already disabled/saved above when locking priv->lock */
362 spin_lock(&priv->sta_lock);
363
364 if (ieee80211_is_data_qos(fc)) {
365 qc = ieee80211_get_qos_ctl(hdr);
366 tid = qc[0] & IEEE80211_QOS_CTL_TID_MASK;
367 if (WARN_ON_ONCE(tid >= MAX_TID_COUNT)) {
368 spin_unlock(&priv->sta_lock);
369 goto drop_unlock;
370 }
371 seq_number = priv->stations[sta_id].tid[tid].seq_number;
372 seq_number &= IEEE80211_SCTL_SEQ;
373 hdr->seq_ctrl = hdr->seq_ctrl &
374 cpu_to_le16(IEEE80211_SCTL_FRAG);
375 hdr->seq_ctrl |= cpu_to_le16(seq_number);
376 seq_number += 0x10;
377 /* aggregation is on for this <sta,tid> */
378 if (info->flags & IEEE80211_TX_CTL_AMPDU &&
379 priv->stations[sta_id].tid[tid].agg.state == IWL_AGG_ON) {
380 txq_id = priv->stations[sta_id].tid[tid].agg.txq_id;
381 is_agg = true;
382 }
383 }
384
385 txq = &priv->txq[txq_id];
386 q = &txq->q;
387
388 if (unlikely(iwl_legacy_queue_space(q) < q->high_mark)) {
389 spin_unlock(&priv->sta_lock);
390 goto drop_unlock;
391 }
392
393 if (ieee80211_is_data_qos(fc)) {
394 priv->stations[sta_id].tid[tid].tfds_in_queue++;
395 if (!ieee80211_has_morefrags(fc))
396 priv->stations[sta_id].tid[tid].seq_number = seq_number;
397 }
398
399 spin_unlock(&priv->sta_lock);
400
401 /* Set up driver data for this TFD */
402 memset(&(txq->txb[q->write_ptr]), 0, sizeof(struct iwl_tx_info));
403 txq->txb[q->write_ptr].skb = skb;
404 txq->txb[q->write_ptr].ctx = ctx;
405
406 /* Set up first empty entry in queue's array of Tx/cmd buffers */
407 out_cmd = txq->cmd[q->write_ptr];
408 out_meta = &txq->meta[q->write_ptr];
409 tx_cmd = &out_cmd->cmd.tx;
410 memset(&out_cmd->hdr, 0, sizeof(out_cmd->hdr));
411 memset(tx_cmd, 0, sizeof(struct iwl_tx_cmd));
412
413 /*
414 * Set up the Tx-command (not MAC!) header.
415 * Store the chosen Tx queue and TFD index within the sequence field;
416 * after Tx, uCode's Tx response will return this value so driver can
417 * locate the frame within the tx queue and do post-tx processing.
418 */
419 out_cmd->hdr.cmd = REPLY_TX;
420 out_cmd->hdr.sequence = cpu_to_le16((u16)(QUEUE_TO_SEQ(txq_id) |
421 INDEX_TO_SEQ(q->write_ptr)));
422
423 /* Copy MAC header from skb into command buffer */
424 memcpy(tx_cmd->hdr, hdr, hdr_len);
425
426
427 /* Total # bytes to be transmitted */
428 len = (u16)skb->len;
429 tx_cmd->len = cpu_to_le16(len);
430
431 if (info->control.hw_key)
432 iwl4965_tx_cmd_build_hwcrypto(priv, info, tx_cmd, skb, sta_id);
433
434 /* TODO need this for burst mode later on */
435 iwl4965_tx_cmd_build_basic(priv, skb, tx_cmd, info, hdr, sta_id);
436 iwl_legacy_dbg_log_tx_data_frame(priv, len, hdr);
437
438 iwl4965_tx_cmd_build_rate(priv, tx_cmd, info, fc);
439
440 iwl_legacy_update_stats(priv, true, fc, len);
441 /*
442 * Use the first empty entry in this queue's command buffer array
443 * to contain the Tx command and MAC header concatenated together
444 * (payload data will be in another buffer).
445 * Size of this varies, due to varying MAC header length.
446 * If end is not dword aligned, we'll have 2 extra bytes at the end
447 * of the MAC header (device reads on dword boundaries).
448 * We'll tell device about this padding later.
449 */
450 len = sizeof(struct iwl_tx_cmd) +
451 sizeof(struct iwl_cmd_header) + hdr_len;
452 firstlen = (len + 3) & ~3;
453
454 /* Tell NIC about any 2-byte padding after MAC header */
455 if (firstlen != len)
456 tx_cmd->tx_flags |= TX_CMD_FLG_MH_PAD_MSK;
457
458 /* Physical address of this Tx command's header (not MAC header!),
459 * within command buffer array. */
460 txcmd_phys = pci_map_single(priv->pci_dev,
461 &out_cmd->hdr, firstlen,
462 PCI_DMA_BIDIRECTIONAL);
463 dma_unmap_addr_set(out_meta, mapping, txcmd_phys);
464 dma_unmap_len_set(out_meta, len, firstlen);
465 /* Add buffer containing Tx command and MAC(!) header to TFD's
466 * first entry */
467 priv->cfg->ops->lib->txq_attach_buf_to_tfd(priv, txq,
468 txcmd_phys, firstlen, 1, 0);
469
470 if (!ieee80211_has_morefrags(hdr->frame_control)) {
471 txq->need_update = 1;
472 } else {
473 wait_write_ptr = 1;
474 txq->need_update = 0;
475 }
476
477 /* Set up TFD's 2nd entry to point directly to remainder of skb,
478 * if any (802.11 null frames have no payload). */
479 secondlen = skb->len - hdr_len;
480 if (secondlen > 0) {
481 phys_addr = pci_map_single(priv->pci_dev, skb->data + hdr_len,
482 secondlen, PCI_DMA_TODEVICE);
483 priv->cfg->ops->lib->txq_attach_buf_to_tfd(priv, txq,
484 phys_addr, secondlen,
485 0, 0);
486 }
487
488 scratch_phys = txcmd_phys + sizeof(struct iwl_cmd_header) +
489 offsetof(struct iwl_tx_cmd, scratch);
490
491 /* take back ownership of DMA buffer to enable update */
492 pci_dma_sync_single_for_cpu(priv->pci_dev, txcmd_phys,
493 firstlen, PCI_DMA_BIDIRECTIONAL);
494 tx_cmd->dram_lsb_ptr = cpu_to_le32(scratch_phys);
495 tx_cmd->dram_msb_ptr = iwl_legacy_get_dma_hi_addr(scratch_phys);
496
497 IWL_DEBUG_TX(priv, "sequence nr = 0X%x\n",
498 le16_to_cpu(out_cmd->hdr.sequence));
499 IWL_DEBUG_TX(priv, "tx_flags = 0X%x\n", le32_to_cpu(tx_cmd->tx_flags));
500 iwl_print_hex_dump(priv, IWL_DL_TX, (u8 *)tx_cmd, sizeof(*tx_cmd));
501 iwl_print_hex_dump(priv, IWL_DL_TX, (u8 *)tx_cmd->hdr, hdr_len);
502
503 /* Set up entry for this TFD in Tx byte-count array */
504 if (info->flags & IEEE80211_TX_CTL_AMPDU)
505 priv->cfg->ops->lib->txq_update_byte_cnt_tbl(priv, txq,
506 le16_to_cpu(tx_cmd->len));
507
508 pci_dma_sync_single_for_device(priv->pci_dev, txcmd_phys,
509 firstlen, PCI_DMA_BIDIRECTIONAL);
510
511 trace_iwlwifi_legacy_dev_tx(priv,
512 &((struct iwl_tfd *)txq->tfds)[txq->q.write_ptr],
513 sizeof(struct iwl_tfd),
514 &out_cmd->hdr, firstlen,
515 skb->data + hdr_len, secondlen);
516
517 /* Tell device the write index *just past* this latest filled TFD */
518 q->write_ptr = iwl_legacy_queue_inc_wrap(q->write_ptr, q->n_bd);
519 iwl_legacy_txq_update_write_ptr(priv, txq);
520 spin_unlock_irqrestore(&priv->lock, flags);
521
522 /*
523 * At this point the frame is "transmitted" successfully
524 * and we will get a TX status notification eventually,
525 * regardless of the value of ret. "ret" only indicates
526 * whether or not we should update the write pointer.
527 */
528
529 /*
530 * Avoid atomic ops if it isn't an associated client.
531 * Also, if this is a packet for aggregation, don't
532 * increase the counter because the ucode will stop
533 * aggregation queues when their respective station
534 * goes to sleep.
535 */
536 if (sta_priv && sta_priv->client && !is_agg)
537 atomic_inc(&sta_priv->pending_frames);
538
539 if ((iwl_legacy_queue_space(q) < q->high_mark) &&
540 priv->mac80211_registered) {
541 if (wait_write_ptr) {
542 spin_lock_irqsave(&priv->lock, flags);
543 txq->need_update = 1;
544 iwl_legacy_txq_update_write_ptr(priv, txq);
545 spin_unlock_irqrestore(&priv->lock, flags);
546 } else {
547 iwl_legacy_stop_queue(priv, txq);
548 }
549 }
550
551 return 0;
552
553drop_unlock:
554 spin_unlock_irqrestore(&priv->lock, flags);
555 return -1;
556}
557
558static inline int iwl4965_alloc_dma_ptr(struct iwl_priv *priv,
559 struct iwl_dma_ptr *ptr, size_t size)
560{
561 ptr->addr = dma_alloc_coherent(&priv->pci_dev->dev, size, &ptr->dma,
562 GFP_KERNEL);
563 if (!ptr->addr)
564 return -ENOMEM;
565 ptr->size = size;
566 return 0;
567}
568
569static inline void iwl4965_free_dma_ptr(struct iwl_priv *priv,
570 struct iwl_dma_ptr *ptr)
571{
572 if (unlikely(!ptr->addr))
573 return;
574
575 dma_free_coherent(&priv->pci_dev->dev, ptr->size, ptr->addr, ptr->dma);
576 memset(ptr, 0, sizeof(*ptr));
577}
578
579/**
580 * iwl4965_hw_txq_ctx_free - Free TXQ Context
581 *
582 * Destroy all TX DMA queues and structures
583 */
584void iwl4965_hw_txq_ctx_free(struct iwl_priv *priv)
585{
586 int txq_id;
587
588 /* Tx queues */
589 if (priv->txq) {
590 for (txq_id = 0; txq_id < priv->hw_params.max_txq_num; txq_id++)
591 if (txq_id == priv->cmd_queue)
592 iwl_legacy_cmd_queue_free(priv);
593 else
594 iwl_legacy_tx_queue_free(priv, txq_id);
595 }
596 iwl4965_free_dma_ptr(priv, &priv->kw);
597
598 iwl4965_free_dma_ptr(priv, &priv->scd_bc_tbls);
599
600 /* free tx queue structure */
601 iwl_legacy_txq_mem(priv);
602}
603
604/**
605 * iwl4965_txq_ctx_alloc - allocate TX queue context
606 * Allocate all Tx DMA structures and initialize them
607 *
608 * @param priv
609 * @return error code
610 */
611int iwl4965_txq_ctx_alloc(struct iwl_priv *priv)
612{
613 int ret;
614 int txq_id, slots_num;
615 unsigned long flags;
616
617 /* Free all tx/cmd queues and keep-warm buffer */
618 iwl4965_hw_txq_ctx_free(priv);
619
620 ret = iwl4965_alloc_dma_ptr(priv, &priv->scd_bc_tbls,
621 priv->hw_params.scd_bc_tbls_size);
622 if (ret) {
623 IWL_ERR(priv, "Scheduler BC Table allocation failed\n");
624 goto error_bc_tbls;
625 }
626 /* Alloc keep-warm buffer */
627 ret = iwl4965_alloc_dma_ptr(priv, &priv->kw, IWL_KW_SIZE);
628 if (ret) {
629 IWL_ERR(priv, "Keep Warm allocation failed\n");
630 goto error_kw;
631 }
632
633 /* allocate tx queue structure */
634 ret = iwl_legacy_alloc_txq_mem(priv);
635 if (ret)
636 goto error;
637
638 spin_lock_irqsave(&priv->lock, flags);
639
640 /* Turn off all Tx DMA fifos */
641 iwl4965_txq_set_sched(priv, 0);
642
643 /* Tell NIC where to find the "keep warm" buffer */
644 iwl_legacy_write_direct32(priv, FH_KW_MEM_ADDR_REG, priv->kw.dma >> 4);
645
646 spin_unlock_irqrestore(&priv->lock, flags);
647
648 /* Alloc and init all Tx queues, including the command queue (#4/#9) */
649 for (txq_id = 0; txq_id < priv->hw_params.max_txq_num; txq_id++) {
650 slots_num = (txq_id == priv->cmd_queue) ?
651 TFD_CMD_SLOTS : TFD_TX_CMD_SLOTS;
652 ret = iwl_legacy_tx_queue_init(priv,
653 &priv->txq[txq_id], slots_num,
654 txq_id);
655 if (ret) {
656 IWL_ERR(priv, "Tx %d queue init failed\n", txq_id);
657 goto error;
658 }
659 }
660
661 return ret;
662
663 error:
664 iwl4965_hw_txq_ctx_free(priv);
665 iwl4965_free_dma_ptr(priv, &priv->kw);
666 error_kw:
667 iwl4965_free_dma_ptr(priv, &priv->scd_bc_tbls);
668 error_bc_tbls:
669 return ret;
670}
671
672void iwl4965_txq_ctx_reset(struct iwl_priv *priv)
673{
674 int txq_id, slots_num;
675 unsigned long flags;
676
677 spin_lock_irqsave(&priv->lock, flags);
678
679 /* Turn off all Tx DMA fifos */
680 iwl4965_txq_set_sched(priv, 0);
681
682 /* Tell NIC where to find the "keep warm" buffer */
683 iwl_legacy_write_direct32(priv, FH_KW_MEM_ADDR_REG, priv->kw.dma >> 4);
684
685 spin_unlock_irqrestore(&priv->lock, flags);
686
687 /* Alloc and init all Tx queues, including the command queue (#4) */
688 for (txq_id = 0; txq_id < priv->hw_params.max_txq_num; txq_id++) {
689 slots_num = txq_id == priv->cmd_queue ?
690 TFD_CMD_SLOTS : TFD_TX_CMD_SLOTS;
691 iwl_legacy_tx_queue_reset(priv, &priv->txq[txq_id],
692 slots_num, txq_id);
693 }
694}
695
696/**
697 * iwl4965_txq_ctx_stop - Stop all Tx DMA channels
698 */
699void iwl4965_txq_ctx_stop(struct iwl_priv *priv)
700{
701 int ch;
702 unsigned long flags;
703
704 /* Turn off all Tx DMA fifos */
705 spin_lock_irqsave(&priv->lock, flags);
706
707 iwl4965_txq_set_sched(priv, 0);
708
709 /* Stop each Tx DMA channel, and wait for it to be idle */
710 for (ch = 0; ch < priv->hw_params.dma_chnl_num; ch++) {
711 iwl_legacy_write_direct32(priv,
712 FH_TCSR_CHNL_TX_CONFIG_REG(ch), 0x0);
713 if (iwl_poll_direct_bit(priv, FH_TSSR_TX_STATUS_REG,
714 FH_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(ch),
715 1000))
716 IWL_ERR(priv, "Failing on timeout while stopping"
717 " DMA channel %d [0x%08x]", ch,
718 iwl_legacy_read_direct32(priv,
719 FH_TSSR_TX_STATUS_REG));
720 }
721 spin_unlock_irqrestore(&priv->lock, flags);
722}
723
724/*
725 * Find first available (lowest unused) Tx Queue, mark it "active".
726 * Called only when finding queue for aggregation.
727 * Should never return anything < 7, because they should already
728 * be in use as EDCA AC (0-3), Command (4), reserved (5, 6)
729 */
730static int iwl4965_txq_ctx_activate_free(struct iwl_priv *priv)
731{
732 int txq_id;
733
734 for (txq_id = 0; txq_id < priv->hw_params.max_txq_num; txq_id++)
735 if (!test_and_set_bit(txq_id, &priv->txq_ctx_active_msk))
736 return txq_id;
737 return -1;
738}
739
740/**
741 * iwl4965_tx_queue_stop_scheduler - Stop queue, but keep configuration
742 */
743static void iwl4965_tx_queue_stop_scheduler(struct iwl_priv *priv,
744 u16 txq_id)
745{
746 /* Simply stop the queue, but don't change any configuration;
747 * the SCD_ACT_EN bit is the write-enable mask for the ACTIVE bit. */
748 iwl_legacy_write_prph(priv,
749 IWL49_SCD_QUEUE_STATUS_BITS(txq_id),
750 (0 << IWL49_SCD_QUEUE_STTS_REG_POS_ACTIVE)|
751 (1 << IWL49_SCD_QUEUE_STTS_REG_POS_SCD_ACT_EN));
752}
753
754/**
755 * iwl4965_tx_queue_set_q2ratid - Map unique receiver/tid combination to a queue
756 */
757static int iwl4965_tx_queue_set_q2ratid(struct iwl_priv *priv, u16 ra_tid,
758 u16 txq_id)
759{
760 u32 tbl_dw_addr;
761 u32 tbl_dw;
762 u16 scd_q2ratid;
763
764 scd_q2ratid = ra_tid & IWL_SCD_QUEUE_RA_TID_MAP_RATID_MSK;
765
766 tbl_dw_addr = priv->scd_base_addr +
767 IWL49_SCD_TRANSLATE_TBL_OFFSET_QUEUE(txq_id);
768
769 tbl_dw = iwl_legacy_read_targ_mem(priv, tbl_dw_addr);
770
771 if (txq_id & 0x1)
772 tbl_dw = (scd_q2ratid << 16) | (tbl_dw & 0x0000FFFF);
773 else
774 tbl_dw = scd_q2ratid | (tbl_dw & 0xFFFF0000);
775
776 iwl_legacy_write_targ_mem(priv, tbl_dw_addr, tbl_dw);
777
778 return 0;
779}
780
781/**
782 * iwl4965_tx_queue_agg_enable - Set up & enable aggregation for selected queue
783 *
784 * NOTE: txq_id must be greater than IWL49_FIRST_AMPDU_QUEUE,
785 * i.e. it must be one of the higher queues used for aggregation
786 */
787static int iwl4965_txq_agg_enable(struct iwl_priv *priv, int txq_id,
788 int tx_fifo, int sta_id, int tid, u16 ssn_idx)
789{
790 unsigned long flags;
791 u16 ra_tid;
792 int ret;
793
794 if ((IWL49_FIRST_AMPDU_QUEUE > txq_id) ||
795 (IWL49_FIRST_AMPDU_QUEUE +
796 priv->cfg->base_params->num_of_ampdu_queues <= txq_id)) {
797 IWL_WARN(priv,
798 "queue number out of range: %d, must be %d to %d\n",
799 txq_id, IWL49_FIRST_AMPDU_QUEUE,
800 IWL49_FIRST_AMPDU_QUEUE +
801 priv->cfg->base_params->num_of_ampdu_queues - 1);
802 return -EINVAL;
803 }
804
805 ra_tid = BUILD_RAxTID(sta_id, tid);
806
807 /* Modify device's station table to Tx this TID */
808 ret = iwl4965_sta_tx_modify_enable_tid(priv, sta_id, tid);
809 if (ret)
810 return ret;
811
812 spin_lock_irqsave(&priv->lock, flags);
813
814 /* Stop this Tx queue before configuring it */
815 iwl4965_tx_queue_stop_scheduler(priv, txq_id);
816
817 /* Map receiver-address / traffic-ID to this queue */
818 iwl4965_tx_queue_set_q2ratid(priv, ra_tid, txq_id);
819
820 /* Set this queue as a chain-building queue */
821 iwl_legacy_set_bits_prph(priv, IWL49_SCD_QUEUECHAIN_SEL, (1 << txq_id));
822
823 /* Place first TFD at index corresponding to start sequence number.
824 * Assumes that ssn_idx is valid (!= 0xFFF) */
825 priv->txq[txq_id].q.read_ptr = (ssn_idx & 0xff);
826 priv->txq[txq_id].q.write_ptr = (ssn_idx & 0xff);
827 iwl4965_set_wr_ptrs(priv, txq_id, ssn_idx);
828
829 /* Set up Tx window size and frame limit for this queue */
830 iwl_legacy_write_targ_mem(priv,
831 priv->scd_base_addr + IWL49_SCD_CONTEXT_QUEUE_OFFSET(txq_id),
832 (SCD_WIN_SIZE << IWL49_SCD_QUEUE_CTX_REG1_WIN_SIZE_POS) &
833 IWL49_SCD_QUEUE_CTX_REG1_WIN_SIZE_MSK);
834
835 iwl_legacy_write_targ_mem(priv, priv->scd_base_addr +
836 IWL49_SCD_CONTEXT_QUEUE_OFFSET(txq_id) + sizeof(u32),
837 (SCD_FRAME_LIMIT << IWL49_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS)
838 & IWL49_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK);
839
840 iwl_legacy_set_bits_prph(priv, IWL49_SCD_INTERRUPT_MASK, (1 << txq_id));
841
842 /* Set up Status area in SRAM, map to Tx DMA/FIFO, activate the queue */
843 iwl4965_tx_queue_set_status(priv, &priv->txq[txq_id], tx_fifo, 1);
844
845 spin_unlock_irqrestore(&priv->lock, flags);
846
847 return 0;
848}
849
850
851int iwl4965_tx_agg_start(struct iwl_priv *priv, struct ieee80211_vif *vif,
852 struct ieee80211_sta *sta, u16 tid, u16 *ssn)
853{
854 int sta_id;
855 int tx_fifo;
856 int txq_id;
857 int ret;
858 unsigned long flags;
859 struct iwl_tid_data *tid_data;
860
861 tx_fifo = iwl4965_get_fifo_from_tid(iwl_legacy_rxon_ctx_from_vif(vif), tid);
862 if (unlikely(tx_fifo < 0))
863 return tx_fifo;
864
865 IWL_WARN(priv, "%s on ra = %pM tid = %d\n",
866 __func__, sta->addr, tid);
867
868 sta_id = iwl_legacy_sta_id(sta);
869 if (sta_id == IWL_INVALID_STATION) {
870 IWL_ERR(priv, "Start AGG on invalid station\n");
871 return -ENXIO;
872 }
873 if (unlikely(tid >= MAX_TID_COUNT))
874 return -EINVAL;
875
876 if (priv->stations[sta_id].tid[tid].agg.state != IWL_AGG_OFF) {
877 IWL_ERR(priv, "Start AGG when state is not IWL_AGG_OFF !\n");
878 return -ENXIO;
879 }
880
881 txq_id = iwl4965_txq_ctx_activate_free(priv);
882 if (txq_id == -1) {
883 IWL_ERR(priv, "No free aggregation queue available\n");
884 return -ENXIO;
885 }
886
887 spin_lock_irqsave(&priv->sta_lock, flags);
888 tid_data = &priv->stations[sta_id].tid[tid];
889 *ssn = SEQ_TO_SN(tid_data->seq_number);
890 tid_data->agg.txq_id = txq_id;
891 iwl_legacy_set_swq_id(&priv->txq[txq_id],
892 iwl4965_get_ac_from_tid(tid), txq_id);
893 spin_unlock_irqrestore(&priv->sta_lock, flags);
894
895 ret = iwl4965_txq_agg_enable(priv, txq_id, tx_fifo,
896 sta_id, tid, *ssn);
897 if (ret)
898 return ret;
899
900 spin_lock_irqsave(&priv->sta_lock, flags);
901 tid_data = &priv->stations[sta_id].tid[tid];
902 if (tid_data->tfds_in_queue == 0) {
903 IWL_DEBUG_HT(priv, "HW queue is empty\n");
904 tid_data->agg.state = IWL_AGG_ON;
905 ieee80211_start_tx_ba_cb_irqsafe(vif, sta->addr, tid);
906 } else {
907 IWL_DEBUG_HT(priv,
908 "HW queue is NOT empty: %d packets in HW queue\n",
909 tid_data->tfds_in_queue);
910 tid_data->agg.state = IWL_EMPTYING_HW_QUEUE_ADDBA;
911 }
912 spin_unlock_irqrestore(&priv->sta_lock, flags);
913 return ret;
914}
915
916/**
917 * txq_id must be greater than IWL49_FIRST_AMPDU_QUEUE
918 * priv->lock must be held by the caller
919 */
920static int iwl4965_txq_agg_disable(struct iwl_priv *priv, u16 txq_id,
921 u16 ssn_idx, u8 tx_fifo)
922{
923 if ((IWL49_FIRST_AMPDU_QUEUE > txq_id) ||
924 (IWL49_FIRST_AMPDU_QUEUE +
925 priv->cfg->base_params->num_of_ampdu_queues <= txq_id)) {
926 IWL_WARN(priv,
927 "queue number out of range: %d, must be %d to %d\n",
928 txq_id, IWL49_FIRST_AMPDU_QUEUE,
929 IWL49_FIRST_AMPDU_QUEUE +
930 priv->cfg->base_params->num_of_ampdu_queues - 1);
931 return -EINVAL;
932 }
933
934 iwl4965_tx_queue_stop_scheduler(priv, txq_id);
935
936 iwl_legacy_clear_bits_prph(priv,
937 IWL49_SCD_QUEUECHAIN_SEL, (1 << txq_id));
938
939 priv->txq[txq_id].q.read_ptr = (ssn_idx & 0xff);
940 priv->txq[txq_id].q.write_ptr = (ssn_idx & 0xff);
941 /* supposes that ssn_idx is valid (!= 0xFFF) */
942 iwl4965_set_wr_ptrs(priv, txq_id, ssn_idx);
943
944 iwl_legacy_clear_bits_prph(priv,
945 IWL49_SCD_INTERRUPT_MASK, (1 << txq_id));
946 iwl_txq_ctx_deactivate(priv, txq_id);
947 iwl4965_tx_queue_set_status(priv, &priv->txq[txq_id], tx_fifo, 0);
948
949 return 0;
950}
951
952int iwl4965_tx_agg_stop(struct iwl_priv *priv, struct ieee80211_vif *vif,
953 struct ieee80211_sta *sta, u16 tid)
954{
955 int tx_fifo_id, txq_id, sta_id, ssn;
956 struct iwl_tid_data *tid_data;
957 int write_ptr, read_ptr;
958 unsigned long flags;
959
960 tx_fifo_id = iwl4965_get_fifo_from_tid(iwl_legacy_rxon_ctx_from_vif(vif), tid);
961 if (unlikely(tx_fifo_id < 0))
962 return tx_fifo_id;
963
964 sta_id = iwl_legacy_sta_id(sta);
965
966 if (sta_id == IWL_INVALID_STATION) {
967 IWL_ERR(priv, "Invalid station for AGG tid %d\n", tid);
968 return -ENXIO;
969 }
970
971 spin_lock_irqsave(&priv->sta_lock, flags);
972
973 tid_data = &priv->stations[sta_id].tid[tid];
974 ssn = (tid_data->seq_number & IEEE80211_SCTL_SEQ) >> 4;
975 txq_id = tid_data->agg.txq_id;
976
977 switch (priv->stations[sta_id].tid[tid].agg.state) {
978 case IWL_EMPTYING_HW_QUEUE_ADDBA:
979 /*
980 * This can happen if the peer stops aggregation
981 * again before we've had a chance to drain the
982 * queue we selected previously, i.e. before the
983 * session was really started completely.
984 */
985 IWL_DEBUG_HT(priv, "AGG stop before setup done\n");
986 goto turn_off;
987 case IWL_AGG_ON:
988 break;
989 default:
990 IWL_WARN(priv, "Stopping AGG while state not ON or starting\n");
991 }
992
993 write_ptr = priv->txq[txq_id].q.write_ptr;
994 read_ptr = priv->txq[txq_id].q.read_ptr;
995
996 /* The queue is not empty */
997 if (write_ptr != read_ptr) {
998 IWL_DEBUG_HT(priv, "Stopping a non empty AGG HW QUEUE\n");
999 priv->stations[sta_id].tid[tid].agg.state =
1000 IWL_EMPTYING_HW_QUEUE_DELBA;
1001 spin_unlock_irqrestore(&priv->sta_lock, flags);
1002 return 0;
1003 }
1004
1005 IWL_DEBUG_HT(priv, "HW queue is empty\n");
1006 turn_off:
1007 priv->stations[sta_id].tid[tid].agg.state = IWL_AGG_OFF;
1008
1009 /* do not restore/save irqs */
1010 spin_unlock(&priv->sta_lock);
1011 spin_lock(&priv->lock);
1012
1013 /*
1014 * the only reason this call can fail is queue number out of range,
1015 * which can happen if uCode is reloaded and all the station
1016 * information are lost. if it is outside the range, there is no need
1017 * to deactivate the uCode queue, just return "success" to allow
1018 * mac80211 to clean up it own data.
1019 */
1020 iwl4965_txq_agg_disable(priv, txq_id, ssn, tx_fifo_id);
1021 spin_unlock_irqrestore(&priv->lock, flags);
1022
1023 ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid);
1024
1025 return 0;
1026}
1027
1028int iwl4965_txq_check_empty(struct iwl_priv *priv,
1029 int sta_id, u8 tid, int txq_id)
1030{
1031 struct iwl_queue *q = &priv->txq[txq_id].q;
1032 u8 *addr = priv->stations[sta_id].sta.sta.addr;
1033 struct iwl_tid_data *tid_data = &priv->stations[sta_id].tid[tid];
1034 struct iwl_rxon_context *ctx;
1035
1036 ctx = &priv->contexts[priv->stations[sta_id].ctxid];
1037
1038 lockdep_assert_held(&priv->sta_lock);
1039
1040 switch (priv->stations[sta_id].tid[tid].agg.state) {
1041 case IWL_EMPTYING_HW_QUEUE_DELBA:
1042 /* We are reclaiming the last packet of the */
1043 /* aggregated HW queue */
1044 if ((txq_id == tid_data->agg.txq_id) &&
1045 (q->read_ptr == q->write_ptr)) {
1046 u16 ssn = SEQ_TO_SN(tid_data->seq_number);
1047 int tx_fifo = iwl4965_get_fifo_from_tid(ctx, tid);
1048 IWL_DEBUG_HT(priv,
1049 "HW queue empty: continue DELBA flow\n");
1050 iwl4965_txq_agg_disable(priv, txq_id, ssn, tx_fifo);
1051 tid_data->agg.state = IWL_AGG_OFF;
1052 ieee80211_stop_tx_ba_cb_irqsafe(ctx->vif, addr, tid);
1053 }
1054 break;
1055 case IWL_EMPTYING_HW_QUEUE_ADDBA:
1056 /* We are reclaiming the last packet of the queue */
1057 if (tid_data->tfds_in_queue == 0) {
1058 IWL_DEBUG_HT(priv,
1059 "HW queue empty: continue ADDBA flow\n");
1060 tid_data->agg.state = IWL_AGG_ON;
1061 ieee80211_start_tx_ba_cb_irqsafe(ctx->vif, addr, tid);
1062 }
1063 break;
1064 }
1065
1066 return 0;
1067}
1068
1069static void iwl4965_non_agg_tx_status(struct iwl_priv *priv,
1070 struct iwl_rxon_context *ctx,
1071 const u8 *addr1)
1072{
1073 struct ieee80211_sta *sta;
1074 struct iwl_station_priv *sta_priv;
1075
1076 rcu_read_lock();
1077 sta = ieee80211_find_sta(ctx->vif, addr1);
1078 if (sta) {
1079 sta_priv = (void *)sta->drv_priv;
1080 /* avoid atomic ops if this isn't a client */
1081 if (sta_priv->client &&
1082 atomic_dec_return(&sta_priv->pending_frames) == 0)
1083 ieee80211_sta_block_awake(priv->hw, sta, false);
1084 }
1085 rcu_read_unlock();
1086}
1087
1088static void
1089iwl4965_tx_status(struct iwl_priv *priv, struct iwl_tx_info *tx_info,
1090 bool is_agg)
1091{
1092 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) tx_info->skb->data;
1093
1094 if (!is_agg)
1095 iwl4965_non_agg_tx_status(priv, tx_info->ctx, hdr->addr1);
1096
1097 ieee80211_tx_status_irqsafe(priv->hw, tx_info->skb);
1098}
1099
1100int iwl4965_tx_queue_reclaim(struct iwl_priv *priv, int txq_id, int index)
1101{
1102 struct iwl_tx_queue *txq = &priv->txq[txq_id];
1103 struct iwl_queue *q = &txq->q;
1104 struct iwl_tx_info *tx_info;
1105 int nfreed = 0;
1106 struct ieee80211_hdr *hdr;
1107
1108 if ((index >= q->n_bd) || (iwl_legacy_queue_used(q, index) == 0)) {
1109 IWL_ERR(priv, "Read index for DMA queue txq id (%d), index %d, "
1110 "is out of range [0-%d] %d %d.\n", txq_id,
1111 index, q->n_bd, q->write_ptr, q->read_ptr);
1112 return 0;
1113 }
1114
1115 for (index = iwl_legacy_queue_inc_wrap(index, q->n_bd);
1116 q->read_ptr != index;
1117 q->read_ptr = iwl_legacy_queue_inc_wrap(q->read_ptr, q->n_bd)) {
1118
1119 tx_info = &txq->txb[txq->q.read_ptr];
1120 iwl4965_tx_status(priv, tx_info,
1121 txq_id >= IWL4965_FIRST_AMPDU_QUEUE);
1122
1123 hdr = (struct ieee80211_hdr *)tx_info->skb->data;
1124 if (hdr && ieee80211_is_data_qos(hdr->frame_control))
1125 nfreed++;
1126 tx_info->skb = NULL;
1127
1128 priv->cfg->ops->lib->txq_free_tfd(priv, txq);
1129 }
1130 return nfreed;
1131}
1132
1133/**
1134 * iwl4965_tx_status_reply_compressed_ba - Update tx status from block-ack
1135 *
1136 * Go through block-ack's bitmap of ACK'd frames, update driver's record of
1137 * ACK vs. not. This gets sent to mac80211, then to rate scaling algo.
1138 */
1139static int iwl4965_tx_status_reply_compressed_ba(struct iwl_priv *priv,
1140 struct iwl_ht_agg *agg,
1141 struct iwl_compressed_ba_resp *ba_resp)
1142
1143{
1144 int i, sh, ack;
1145 u16 seq_ctl = le16_to_cpu(ba_resp->seq_ctl);
1146 u16 scd_flow = le16_to_cpu(ba_resp->scd_flow);
1147 int successes = 0;
1148 struct ieee80211_tx_info *info;
1149 u64 bitmap, sent_bitmap;
1150
1151 if (unlikely(!agg->wait_for_ba)) {
1152 if (unlikely(ba_resp->bitmap))
1153 IWL_ERR(priv, "Received BA when not expected\n");
1154 return -EINVAL;
1155 }
1156
1157 /* Mark that the expected block-ack response arrived */
1158 agg->wait_for_ba = 0;
1159 IWL_DEBUG_TX_REPLY(priv, "BA %d %d\n", agg->start_idx,
1160 ba_resp->seq_ctl);
1161
1162 /* Calculate shift to align block-ack bits with our Tx window bits */
1163 sh = agg->start_idx - SEQ_TO_INDEX(seq_ctl >> 4);
1164 if (sh < 0) /* tbw something is wrong with indices */
1165 sh += 0x100;
1166
1167 if (agg->frame_count > (64 - sh)) {
1168 IWL_DEBUG_TX_REPLY(priv, "more frames than bitmap size");
1169 return -1;
1170 }
1171
1172 /* don't use 64-bit values for now */
1173 bitmap = le64_to_cpu(ba_resp->bitmap) >> sh;
1174
1175 /* check for success or failure according to the
1176 * transmitted bitmap and block-ack bitmap */
1177 sent_bitmap = bitmap & agg->bitmap;
1178
1179 /* For each frame attempted in aggregation,
1180 * update driver's record of tx frame's status. */
1181 i = 0;
1182 while (sent_bitmap) {
1183 ack = sent_bitmap & 1ULL;
1184 successes += ack;
1185 IWL_DEBUG_TX_REPLY(priv, "%s ON i=%d idx=%d raw=%d\n",
1186 ack ? "ACK" : "NACK", i,
1187 (agg->start_idx + i) & 0xff,
1188 agg->start_idx + i);
1189 sent_bitmap >>= 1;
1190 ++i;
1191 }
1192
1193 IWL_DEBUG_TX_REPLY(priv, "Bitmap %llx\n",
1194 (unsigned long long)bitmap);
1195
1196 info = IEEE80211_SKB_CB(priv->txq[scd_flow].txb[agg->start_idx].skb);
1197 memset(&info->status, 0, sizeof(info->status));
1198 info->flags |= IEEE80211_TX_STAT_ACK;
1199 info->flags |= IEEE80211_TX_STAT_AMPDU;
1200 info->status.ampdu_ack_len = successes;
1201 info->status.ampdu_len = agg->frame_count;
1202 iwl4965_hwrate_to_tx_control(priv, agg->rate_n_flags, info);
1203
1204 return 0;
1205}
1206
1207/**
1208 * translate ucode response to mac80211 tx status control values
1209 */
1210void iwl4965_hwrate_to_tx_control(struct iwl_priv *priv, u32 rate_n_flags,
1211 struct ieee80211_tx_info *info)
1212{
1213 struct ieee80211_tx_rate *r = &info->control.rates[0];
1214
1215 info->antenna_sel_tx =
1216 ((rate_n_flags & RATE_MCS_ANT_ABC_MSK) >> RATE_MCS_ANT_POS);
1217 if (rate_n_flags & RATE_MCS_HT_MSK)
1218 r->flags |= IEEE80211_TX_RC_MCS;
1219 if (rate_n_flags & RATE_MCS_GF_MSK)
1220 r->flags |= IEEE80211_TX_RC_GREEN_FIELD;
1221 if (rate_n_flags & RATE_MCS_HT40_MSK)
1222 r->flags |= IEEE80211_TX_RC_40_MHZ_WIDTH;
1223 if (rate_n_flags & RATE_MCS_DUP_MSK)
1224 r->flags |= IEEE80211_TX_RC_DUP_DATA;
1225 if (rate_n_flags & RATE_MCS_SGI_MSK)
1226 r->flags |= IEEE80211_TX_RC_SHORT_GI;
1227 r->idx = iwl4965_hwrate_to_mac80211_idx(rate_n_flags, info->band);
1228}
1229
1230/**
1231 * iwl4965_rx_reply_compressed_ba - Handler for REPLY_COMPRESSED_BA
1232 *
1233 * Handles block-acknowledge notification from device, which reports success
1234 * of frames sent via aggregation.
1235 */
1236void iwl4965_rx_reply_compressed_ba(struct iwl_priv *priv,
1237 struct iwl_rx_mem_buffer *rxb)
1238{
1239 struct iwl_rx_packet *pkt = rxb_addr(rxb);
1240 struct iwl_compressed_ba_resp *ba_resp = &pkt->u.compressed_ba;
1241 struct iwl_tx_queue *txq = NULL;
1242 struct iwl_ht_agg *agg;
1243 int index;
1244 int sta_id;
1245 int tid;
1246 unsigned long flags;
1247
1248 /* "flow" corresponds to Tx queue */
1249 u16 scd_flow = le16_to_cpu(ba_resp->scd_flow);
1250
1251 /* "ssn" is start of block-ack Tx window, corresponds to index
1252 * (in Tx queue's circular buffer) of first TFD/frame in window */
1253 u16 ba_resp_scd_ssn = le16_to_cpu(ba_resp->scd_ssn);
1254
1255 if (scd_flow >= priv->hw_params.max_txq_num) {
1256 IWL_ERR(priv,
1257 "BUG_ON scd_flow is bigger than number of queues\n");
1258 return;
1259 }
1260
1261 txq = &priv->txq[scd_flow];
1262 sta_id = ba_resp->sta_id;
1263 tid = ba_resp->tid;
1264 agg = &priv->stations[sta_id].tid[tid].agg;
1265 if (unlikely(agg->txq_id != scd_flow)) {
1266 /*
1267 * FIXME: this is a uCode bug which need to be addressed,
1268 * log the information and return for now!
1269 * since it is possible happen very often and in order
1270 * not to fill the syslog, don't enable the logging by default
1271 */
1272 IWL_DEBUG_TX_REPLY(priv,
1273 "BA scd_flow %d does not match txq_id %d\n",
1274 scd_flow, agg->txq_id);
1275 return;
1276 }
1277
1278 /* Find index just before block-ack window */
1279 index = iwl_legacy_queue_dec_wrap(ba_resp_scd_ssn & 0xff, txq->q.n_bd);
1280
1281 spin_lock_irqsave(&priv->sta_lock, flags);
1282
1283 IWL_DEBUG_TX_REPLY(priv, "REPLY_COMPRESSED_BA [%d] Received from %pM, "
1284 "sta_id = %d\n",
1285 agg->wait_for_ba,
1286 (u8 *) &ba_resp->sta_addr_lo32,
1287 ba_resp->sta_id);
1288 IWL_DEBUG_TX_REPLY(priv, "TID = %d, SeqCtl = %d, bitmap = 0x%llx,"
1289 "scd_flow = "
1290 "%d, scd_ssn = %d\n",
1291 ba_resp->tid,
1292 ba_resp->seq_ctl,
1293 (unsigned long long)le64_to_cpu(ba_resp->bitmap),
1294 ba_resp->scd_flow,
1295 ba_resp->scd_ssn);
1296 IWL_DEBUG_TX_REPLY(priv, "DAT start_idx = %d, bitmap = 0x%llx\n",
1297 agg->start_idx,
1298 (unsigned long long)agg->bitmap);
1299
1300 /* Update driver's record of ACK vs. not for each frame in window */
1301 iwl4965_tx_status_reply_compressed_ba(priv, agg, ba_resp);
1302
1303 /* Release all TFDs before the SSN, i.e. all TFDs in front of
1304 * block-ack window (we assume that they've been successfully
1305 * transmitted ... if not, it's too late anyway). */
1306 if (txq->q.read_ptr != (ba_resp_scd_ssn & 0xff)) {
1307 /* calculate mac80211 ampdu sw queue to wake */
1308 int freed = iwl4965_tx_queue_reclaim(priv, scd_flow, index);
1309 iwl4965_free_tfds_in_queue(priv, sta_id, tid, freed);
1310
1311 if ((iwl_legacy_queue_space(&txq->q) > txq->q.low_mark) &&
1312 priv->mac80211_registered &&
1313 (agg->state != IWL_EMPTYING_HW_QUEUE_DELBA))
1314 iwl_legacy_wake_queue(priv, txq);
1315
1316 iwl4965_txq_check_empty(priv, sta_id, tid, scd_flow);
1317 }
1318
1319 spin_unlock_irqrestore(&priv->sta_lock, flags);
1320}
1321
1322#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
1323const char *iwl4965_get_tx_fail_reason(u32 status)
1324{
1325#define TX_STATUS_FAIL(x) case TX_STATUS_FAIL_ ## x: return #x
1326#define TX_STATUS_POSTPONE(x) case TX_STATUS_POSTPONE_ ## x: return #x
1327
1328 switch (status & TX_STATUS_MSK) {
1329 case TX_STATUS_SUCCESS:
1330 return "SUCCESS";
1331 TX_STATUS_POSTPONE(DELAY);
1332 TX_STATUS_POSTPONE(FEW_BYTES);
1333 TX_STATUS_POSTPONE(QUIET_PERIOD);
1334 TX_STATUS_POSTPONE(CALC_TTAK);
1335 TX_STATUS_FAIL(INTERNAL_CROSSED_RETRY);
1336 TX_STATUS_FAIL(SHORT_LIMIT);
1337 TX_STATUS_FAIL(LONG_LIMIT);
1338 TX_STATUS_FAIL(FIFO_UNDERRUN);
1339 TX_STATUS_FAIL(DRAIN_FLOW);
1340 TX_STATUS_FAIL(RFKILL_FLUSH);
1341 TX_STATUS_FAIL(LIFE_EXPIRE);
1342 TX_STATUS_FAIL(DEST_PS);
1343 TX_STATUS_FAIL(HOST_ABORTED);
1344 TX_STATUS_FAIL(BT_RETRY);
1345 TX_STATUS_FAIL(STA_INVALID);
1346 TX_STATUS_FAIL(FRAG_DROPPED);
1347 TX_STATUS_FAIL(TID_DISABLE);
1348 TX_STATUS_FAIL(FIFO_FLUSHED);
1349 TX_STATUS_FAIL(INSUFFICIENT_CF_POLL);
1350 TX_STATUS_FAIL(PASSIVE_NO_RX);
1351 TX_STATUS_FAIL(NO_BEACON_ON_RADAR);
1352 }
1353
1354 return "UNKNOWN";
1355
1356#undef TX_STATUS_FAIL
1357#undef TX_STATUS_POSTPONE
1358}
1359#endif /* CONFIG_IWLWIFI_LEGACY_DEBUG */
diff --git a/drivers/net/wireless/iwlegacy/iwl-4965-ucode.c b/drivers/net/wireless/iwlegacy/iwl-4965-ucode.c
new file mode 100644
index 000000000000..001d148feb94
--- /dev/null
+++ b/drivers/net/wireless/iwlegacy/iwl-4965-ucode.c
@@ -0,0 +1,166 @@
1/******************************************************************************
2 *
3 * GPL LICENSE SUMMARY
4 *
5 * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of version 2 of the GNU General Public License as
9 * published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful, but
12 * WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
19 * USA
20 *
21 * The full GNU General Public License is included in this distribution
22 * in the file called LICENSE.GPL.
23 *
24 * Contact Information:
25 * Intel Linux Wireless <ilw@linux.intel.com>
26 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
27 *
28 *****************************************************************************/
29
30#include <linux/kernel.h>
31#include <linux/module.h>
32#include <linux/init.h>
33#include <linux/sched.h>
34
35#include "iwl-dev.h"
36#include "iwl-core.h"
37#include "iwl-io.h"
38#include "iwl-helpers.h"
39#include "iwl-4965-hw.h"
40#include "iwl-4965.h"
41#include "iwl-4965-calib.h"
42
43#define IWL_AC_UNSET -1
44
45/**
46 * iwl_verify_inst_sparse - verify runtime uCode image in card vs. host,
47 * using sample data 100 bytes apart. If these sample points are good,
48 * it's a pretty good bet that everything between them is good, too.
49 */
50static int
51iwl4965_verify_inst_sparse(struct iwl_priv *priv, __le32 *image, u32 len)
52{
53 u32 val;
54 int ret = 0;
55 u32 errcnt = 0;
56 u32 i;
57
58 IWL_DEBUG_INFO(priv, "ucode inst image size is %u\n", len);
59
60 for (i = 0; i < len; i += 100, image += 100/sizeof(u32)) {
61 /* read data comes through single port, auto-incr addr */
62 /* NOTE: Use the debugless read so we don't flood kernel log
63 * if IWL_DL_IO is set */
64 iwl_legacy_write_direct32(priv, HBUS_TARG_MEM_RADDR,
65 i + IWL4965_RTC_INST_LOWER_BOUND);
66 val = _iwl_legacy_read_direct32(priv, HBUS_TARG_MEM_RDAT);
67 if (val != le32_to_cpu(*image)) {
68 ret = -EIO;
69 errcnt++;
70 if (errcnt >= 3)
71 break;
72 }
73 }
74
75 return ret;
76}
77
78/**
79 * iwl4965_verify_inst_full - verify runtime uCode image in card vs. host,
80 * looking at all data.
81 */
82static int iwl4965_verify_inst_full(struct iwl_priv *priv, __le32 *image,
83 u32 len)
84{
85 u32 val;
86 u32 save_len = len;
87 int ret = 0;
88 u32 errcnt;
89
90 IWL_DEBUG_INFO(priv, "ucode inst image size is %u\n", len);
91
92 iwl_legacy_write_direct32(priv, HBUS_TARG_MEM_RADDR,
93 IWL4965_RTC_INST_LOWER_BOUND);
94
95 errcnt = 0;
96 for (; len > 0; len -= sizeof(u32), image++) {
97 /* read data comes through single port, auto-incr addr */
98 /* NOTE: Use the debugless read so we don't flood kernel log
99 * if IWL_DL_IO is set */
100 val = _iwl_legacy_read_direct32(priv, HBUS_TARG_MEM_RDAT);
101 if (val != le32_to_cpu(*image)) {
102 IWL_ERR(priv, "uCode INST section is invalid at "
103 "offset 0x%x, is 0x%x, s/b 0x%x\n",
104 save_len - len, val, le32_to_cpu(*image));
105 ret = -EIO;
106 errcnt++;
107 if (errcnt >= 20)
108 break;
109 }
110 }
111
112 if (!errcnt)
113 IWL_DEBUG_INFO(priv,
114 "ucode image in INSTRUCTION memory is good\n");
115
116 return ret;
117}
118
119/**
120 * iwl4965_verify_ucode - determine which instruction image is in SRAM,
121 * and verify its contents
122 */
123int iwl4965_verify_ucode(struct iwl_priv *priv)
124{
125 __le32 *image;
126 u32 len;
127 int ret;
128
129 /* Try bootstrap */
130 image = (__le32 *)priv->ucode_boot.v_addr;
131 len = priv->ucode_boot.len;
132 ret = iwl4965_verify_inst_sparse(priv, image, len);
133 if (!ret) {
134 IWL_DEBUG_INFO(priv, "Bootstrap uCode is good in inst SRAM\n");
135 return 0;
136 }
137
138 /* Try initialize */
139 image = (__le32 *)priv->ucode_init.v_addr;
140 len = priv->ucode_init.len;
141 ret = iwl4965_verify_inst_sparse(priv, image, len);
142 if (!ret) {
143 IWL_DEBUG_INFO(priv, "Initialize uCode is good in inst SRAM\n");
144 return 0;
145 }
146
147 /* Try runtime/protocol */
148 image = (__le32 *)priv->ucode_code.v_addr;
149 len = priv->ucode_code.len;
150 ret = iwl4965_verify_inst_sparse(priv, image, len);
151 if (!ret) {
152 IWL_DEBUG_INFO(priv, "Runtime uCode is good in inst SRAM\n");
153 return 0;
154 }
155
156 IWL_ERR(priv, "NO VALID UCODE IMAGE IN INSTRUCTION SRAM!!\n");
157
158 /* Since nothing seems to match, show first several data entries in
159 * instruction SRAM, so maybe visual inspection will give a clue.
160 * Selection of bootstrap image (vs. other images) is arbitrary. */
161 image = (__le32 *)priv->ucode_boot.v_addr;
162 len = priv->ucode_boot.len;
163 ret = iwl4965_verify_inst_full(priv, image, len);
164
165 return ret;
166}
diff --git a/drivers/net/wireless/iwlwifi/iwl-4965.c b/drivers/net/wireless/iwlegacy/iwl-4965.c
index 8998ed134d1a..080444c89022 100644
--- a/drivers/net/wireless/iwlwifi/iwl-4965.c
+++ b/drivers/net/wireless/iwlegacy/iwl-4965.c
@@ -1,6 +1,6 @@
1/****************************************************************************** 1/******************************************************************************
2 * 2 *
3 * Copyright(c) 2003 - 2010 Intel Corporation. All rights reserved. 3 * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved.
4 * 4 *
5 * This program is free software; you can redistribute it and/or modify it 5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of version 2 of the GNU General Public License as 6 * under the terms of version 2 of the GNU General Public License as
@@ -43,12 +43,11 @@
43#include "iwl-core.h" 43#include "iwl-core.h"
44#include "iwl-io.h" 44#include "iwl-io.h"
45#include "iwl-helpers.h" 45#include "iwl-helpers.h"
46#include "iwl-agn-calib.h" 46#include "iwl-4965-calib.h"
47#include "iwl-sta.h" 47#include "iwl-sta.h"
48#include "iwl-agn-led.h" 48#include "iwl-4965-led.h"
49#include "iwl-agn.h" 49#include "iwl-4965.h"
50#include "iwl-agn-debugfs.h" 50#include "iwl-4965-debugfs.h"
51#include "iwl-legacy.h"
52 51
53static int iwl4965_send_tx_power(struct iwl_priv *priv); 52static int iwl4965_send_tx_power(struct iwl_priv *priv);
54static int iwl4965_hw_get_temperature(struct iwl_priv *priv); 53static int iwl4965_hw_get_temperature(struct iwl_priv *priv);
@@ -74,11 +73,11 @@ static int iwl4965_verify_bsm(struct iwl_priv *priv)
74 IWL_DEBUG_INFO(priv, "Begin verify bsm\n"); 73 IWL_DEBUG_INFO(priv, "Begin verify bsm\n");
75 74
76 /* verify BSM SRAM contents */ 75 /* verify BSM SRAM contents */
77 val = iwl_read_prph(priv, BSM_WR_DWCOUNT_REG); 76 val = iwl_legacy_read_prph(priv, BSM_WR_DWCOUNT_REG);
78 for (reg = BSM_SRAM_LOWER_BOUND; 77 for (reg = BSM_SRAM_LOWER_BOUND;
79 reg < BSM_SRAM_LOWER_BOUND + len; 78 reg < BSM_SRAM_LOWER_BOUND + len;
80 reg += sizeof(u32), image++) { 79 reg += sizeof(u32), image++) {
81 val = iwl_read_prph(priv, reg); 80 val = iwl_legacy_read_prph(priv, reg);
82 if (val != le32_to_cpu(*image)) { 81 if (val != le32_to_cpu(*image)) {
83 IWL_ERR(priv, "BSM uCode verification failed at " 82 IWL_ERR(priv, "BSM uCode verification failed at "
84 "addr 0x%08X+%u (of %u), is 0x%x, s/b 0x%x\n", 83 "addr 0x%08X+%u (of %u), is 0x%x, s/b 0x%x\n",
@@ -158,33 +157,34 @@ static int iwl4965_load_bsm(struct iwl_priv *priv)
158 inst_len = priv->ucode_init.len; 157 inst_len = priv->ucode_init.len;
159 data_len = priv->ucode_init_data.len; 158 data_len = priv->ucode_init_data.len;
160 159
161 iwl_write_prph(priv, BSM_DRAM_INST_PTR_REG, pinst); 160 iwl_legacy_write_prph(priv, BSM_DRAM_INST_PTR_REG, pinst);
162 iwl_write_prph(priv, BSM_DRAM_DATA_PTR_REG, pdata); 161 iwl_legacy_write_prph(priv, BSM_DRAM_DATA_PTR_REG, pdata);
163 iwl_write_prph(priv, BSM_DRAM_INST_BYTECOUNT_REG, inst_len); 162 iwl_legacy_write_prph(priv, BSM_DRAM_INST_BYTECOUNT_REG, inst_len);
164 iwl_write_prph(priv, BSM_DRAM_DATA_BYTECOUNT_REG, data_len); 163 iwl_legacy_write_prph(priv, BSM_DRAM_DATA_BYTECOUNT_REG, data_len);
165 164
166 /* Fill BSM memory with bootstrap instructions */ 165 /* Fill BSM memory with bootstrap instructions */
167 for (reg_offset = BSM_SRAM_LOWER_BOUND; 166 for (reg_offset = BSM_SRAM_LOWER_BOUND;
168 reg_offset < BSM_SRAM_LOWER_BOUND + len; 167 reg_offset < BSM_SRAM_LOWER_BOUND + len;
169 reg_offset += sizeof(u32), image++) 168 reg_offset += sizeof(u32), image++)
170 _iwl_write_prph(priv, reg_offset, le32_to_cpu(*image)); 169 _iwl_legacy_write_prph(priv, reg_offset, le32_to_cpu(*image));
171 170
172 ret = iwl4965_verify_bsm(priv); 171 ret = iwl4965_verify_bsm(priv);
173 if (ret) 172 if (ret)
174 return ret; 173 return ret;
175 174
176 /* Tell BSM to copy from BSM SRAM into instruction SRAM, when asked */ 175 /* Tell BSM to copy from BSM SRAM into instruction SRAM, when asked */
177 iwl_write_prph(priv, BSM_WR_MEM_SRC_REG, 0x0); 176 iwl_legacy_write_prph(priv, BSM_WR_MEM_SRC_REG, 0x0);
178 iwl_write_prph(priv, BSM_WR_MEM_DST_REG, IWL49_RTC_INST_LOWER_BOUND); 177 iwl_legacy_write_prph(priv,
179 iwl_write_prph(priv, BSM_WR_DWCOUNT_REG, len / sizeof(u32)); 178 BSM_WR_MEM_DST_REG, IWL49_RTC_INST_LOWER_BOUND);
179 iwl_legacy_write_prph(priv, BSM_WR_DWCOUNT_REG, len / sizeof(u32));
180 180
181 /* Load bootstrap code into instruction SRAM now, 181 /* Load bootstrap code into instruction SRAM now,
182 * to prepare to load "initialize" uCode */ 182 * to prepare to load "initialize" uCode */
183 iwl_write_prph(priv, BSM_WR_CTRL_REG, BSM_WR_CTRL_REG_BIT_START); 183 iwl_legacy_write_prph(priv, BSM_WR_CTRL_REG, BSM_WR_CTRL_REG_BIT_START);
184 184
185 /* Wait for load of bootstrap uCode to finish */ 185 /* Wait for load of bootstrap uCode to finish */
186 for (i = 0; i < 100; i++) { 186 for (i = 0; i < 100; i++) {
187 done = iwl_read_prph(priv, BSM_WR_CTRL_REG); 187 done = iwl_legacy_read_prph(priv, BSM_WR_CTRL_REG);
188 if (!(done & BSM_WR_CTRL_REG_BIT_START)) 188 if (!(done & BSM_WR_CTRL_REG_BIT_START))
189 break; 189 break;
190 udelay(10); 190 udelay(10);
@@ -198,7 +198,8 @@ static int iwl4965_load_bsm(struct iwl_priv *priv)
198 198
199 /* Enable future boot loads whenever power management unit triggers it 199 /* Enable future boot loads whenever power management unit triggers it
200 * (e.g. when powering back up after power-save shutdown) */ 200 * (e.g. when powering back up after power-save shutdown) */
201 iwl_write_prph(priv, BSM_WR_CTRL_REG, BSM_WR_CTRL_REG_BIT_START_EN); 201 iwl_legacy_write_prph(priv,
202 BSM_WR_CTRL_REG, BSM_WR_CTRL_REG_BIT_START_EN);
202 203
203 204
204 return 0; 205 return 0;
@@ -224,14 +225,14 @@ static int iwl4965_set_ucode_ptrs(struct iwl_priv *priv)
224 pdata = priv->ucode_data_backup.p_addr >> 4; 225 pdata = priv->ucode_data_backup.p_addr >> 4;
225 226
226 /* Tell bootstrap uCode where to find image to load */ 227 /* Tell bootstrap uCode where to find image to load */
227 iwl_write_prph(priv, BSM_DRAM_INST_PTR_REG, pinst); 228 iwl_legacy_write_prph(priv, BSM_DRAM_INST_PTR_REG, pinst);
228 iwl_write_prph(priv, BSM_DRAM_DATA_PTR_REG, pdata); 229 iwl_legacy_write_prph(priv, BSM_DRAM_DATA_PTR_REG, pdata);
229 iwl_write_prph(priv, BSM_DRAM_DATA_BYTECOUNT_REG, 230 iwl_legacy_write_prph(priv, BSM_DRAM_DATA_BYTECOUNT_REG,
230 priv->ucode_data.len); 231 priv->ucode_data.len);
231 232
232 /* Inst byte count must be last to set up, bit 31 signals uCode 233 /* Inst byte count must be last to set up, bit 31 signals uCode
233 * that all new ptr/size info is in place */ 234 * that all new ptr/size info is in place */
234 iwl_write_prph(priv, BSM_DRAM_INST_BYTECOUNT_REG, 235 iwl_legacy_write_prph(priv, BSM_DRAM_INST_BYTECOUNT_REG,
235 priv->ucode_code.len | BSM_DRAM_INST_LOAD); 236 priv->ucode_code.len | BSM_DRAM_INST_LOAD);
236 IWL_DEBUG_INFO(priv, "Runtime uCode pointers are set.\n"); 237 IWL_DEBUG_INFO(priv, "Runtime uCode pointers are set.\n");
237 238
@@ -254,7 +255,7 @@ static void iwl4965_init_alive_start(struct iwl_priv *priv)
254 /* Bootstrap uCode has loaded initialize uCode ... verify inst image. 255 /* Bootstrap uCode has loaded initialize uCode ... verify inst image.
255 * This is a paranoid check, because we would not have gotten the 256 * This is a paranoid check, because we would not have gotten the
256 * "initialize" alive if code weren't properly loaded. */ 257 * "initialize" alive if code weren't properly loaded. */
257 if (iwl_verify_ucode(priv)) { 258 if (iwl4965_verify_ucode(priv)) {
258 /* Runtime instruction load was bad; 259 /* Runtime instruction load was bad;
259 * take it all the way back down so we can try again */ 260 * take it all the way back down so we can try again */
260 IWL_DEBUG_INFO(priv, "Bad \"initialize\" uCode load.\n"); 261 IWL_DEBUG_INFO(priv, "Bad \"initialize\" uCode load.\n");
@@ -280,7 +281,7 @@ restart:
280 queue_work(priv->workqueue, &priv->restart); 281 queue_work(priv->workqueue, &priv->restart);
281} 282}
282 283
283static bool is_ht40_channel(__le32 rxon_flags) 284static bool iw4965_is_ht40_channel(__le32 rxon_flags)
284{ 285{
285 int chan_mod = le32_to_cpu(rxon_flags & RXON_FLG_CHANNEL_MODE_MSK) 286 int chan_mod = le32_to_cpu(rxon_flags & RXON_FLG_CHANNEL_MODE_MSK)
286 >> RXON_FLG_CHANNEL_MODE_POS; 287 >> RXON_FLG_CHANNEL_MODE_POS;
@@ -288,23 +289,6 @@ static bool is_ht40_channel(__le32 rxon_flags)
288 (chan_mod == CHANNEL_MODE_MIXED)); 289 (chan_mod == CHANNEL_MODE_MIXED));
289} 290}
290 291
291/*
292 * EEPROM handlers
293 */
294static u16 iwl4965_eeprom_calib_version(struct iwl_priv *priv)
295{
296 return iwl_eeprom_query16(priv, EEPROM_4965_CALIB_VERSION_OFFSET);
297}
298
299/*
300 * Activate/Deactivate Tx DMA/FIFO channels according tx fifos mask
301 * must be called under priv->lock and mac access
302 */
303static void iwl4965_txq_set_sched(struct iwl_priv *priv, u32 mask)
304{
305 iwl_write_prph(priv, IWL49_SCD_TXFACT, mask);
306}
307
308static void iwl4965_nic_config(struct iwl_priv *priv) 292static void iwl4965_nic_config(struct iwl_priv *priv)
309{ 293{
310 unsigned long flags; 294 unsigned long flags;
@@ -312,22 +296,23 @@ static void iwl4965_nic_config(struct iwl_priv *priv)
312 296
313 spin_lock_irqsave(&priv->lock, flags); 297 spin_lock_irqsave(&priv->lock, flags);
314 298
315 radio_cfg = iwl_eeprom_query16(priv, EEPROM_RADIO_CONFIG); 299 radio_cfg = iwl_legacy_eeprom_query16(priv, EEPROM_RADIO_CONFIG);
316 300
317 /* write radio config values to register */ 301 /* write radio config values to register */
318 if (EEPROM_RF_CFG_TYPE_MSK(radio_cfg) == EEPROM_4965_RF_CFG_TYPE_MAX) 302 if (EEPROM_RF_CFG_TYPE_MSK(radio_cfg) == EEPROM_4965_RF_CFG_TYPE_MAX)
319 iwl_set_bit(priv, CSR_HW_IF_CONFIG_REG, 303 iwl_legacy_set_bit(priv, CSR_HW_IF_CONFIG_REG,
320 EEPROM_RF_CFG_TYPE_MSK(radio_cfg) | 304 EEPROM_RF_CFG_TYPE_MSK(radio_cfg) |
321 EEPROM_RF_CFG_STEP_MSK(radio_cfg) | 305 EEPROM_RF_CFG_STEP_MSK(radio_cfg) |
322 EEPROM_RF_CFG_DASH_MSK(radio_cfg)); 306 EEPROM_RF_CFG_DASH_MSK(radio_cfg));
323 307
324 /* set CSR_HW_CONFIG_REG for uCode use */ 308 /* set CSR_HW_CONFIG_REG for uCode use */
325 iwl_set_bit(priv, CSR_HW_IF_CONFIG_REG, 309 iwl_legacy_set_bit(priv, CSR_HW_IF_CONFIG_REG,
326 CSR_HW_IF_CONFIG_REG_BIT_RADIO_SI | 310 CSR_HW_IF_CONFIG_REG_BIT_RADIO_SI |
327 CSR_HW_IF_CONFIG_REG_BIT_MAC_SI); 311 CSR_HW_IF_CONFIG_REG_BIT_MAC_SI);
328 312
329 priv->calib_info = (struct iwl_eeprom_calib_info *) 313 priv->calib_info = (struct iwl_eeprom_calib_info *)
330 iwl_eeprom_query_addr(priv, EEPROM_4965_CALIB_TXPOWER_OFFSET); 314 iwl_legacy_eeprom_query_addr(priv,
315 EEPROM_4965_CALIB_TXPOWER_OFFSET);
331 316
332 spin_unlock_irqrestore(&priv->lock, flags); 317 spin_unlock_irqrestore(&priv->lock, flags);
333} 318}
@@ -340,7 +325,7 @@ static void iwl4965_chain_noise_reset(struct iwl_priv *priv)
340 struct iwl_chain_noise_data *data = &(priv->chain_noise_data); 325 struct iwl_chain_noise_data *data = &(priv->chain_noise_data);
341 326
342 if ((data->state == IWL_CHAIN_NOISE_ALIVE) && 327 if ((data->state == IWL_CHAIN_NOISE_ALIVE) &&
343 iwl_is_any_associated(priv)) { 328 iwl_legacy_is_any_associated(priv)) {
344 struct iwl_calib_diff_gain_cmd cmd; 329 struct iwl_calib_diff_gain_cmd cmd;
345 330
346 /* clear data for chain noise calibration algorithm */ 331 /* clear data for chain noise calibration algorithm */
@@ -357,7 +342,7 @@ static void iwl4965_chain_noise_reset(struct iwl_priv *priv)
357 cmd.diff_gain_a = 0; 342 cmd.diff_gain_a = 0;
358 cmd.diff_gain_b = 0; 343 cmd.diff_gain_b = 0;
359 cmd.diff_gain_c = 0; 344 cmd.diff_gain_c = 0;
360 if (iwl_send_cmd_pdu(priv, REPLY_PHY_CALIBRATION_CMD, 345 if (iwl_legacy_send_cmd_pdu(priv, REPLY_PHY_CALIBRATION_CMD,
361 sizeof(cmd), &cmd)) 346 sizeof(cmd), &cmd))
362 IWL_ERR(priv, 347 IWL_ERR(priv,
363 "Could not send REPLY_PHY_CALIBRATION_CMD\n"); 348 "Could not send REPLY_PHY_CALIBRATION_CMD\n");
@@ -366,237 +351,6 @@ static void iwl4965_chain_noise_reset(struct iwl_priv *priv)
366 } 351 }
367} 352}
368 353
369static void iwl4965_gain_computation(struct iwl_priv *priv,
370 u32 *average_noise,
371 u16 min_average_noise_antenna_i,
372 u32 min_average_noise,
373 u8 default_chain)
374{
375 int i, ret;
376 struct iwl_chain_noise_data *data = &priv->chain_noise_data;
377
378 data->delta_gain_code[min_average_noise_antenna_i] = 0;
379
380 for (i = default_chain; i < NUM_RX_CHAINS; i++) {
381 s32 delta_g = 0;
382
383 if (!(data->disconn_array[i]) &&
384 (data->delta_gain_code[i] ==
385 CHAIN_NOISE_DELTA_GAIN_INIT_VAL)) {
386 delta_g = average_noise[i] - min_average_noise;
387 data->delta_gain_code[i] = (u8)((delta_g * 10) / 15);
388 data->delta_gain_code[i] =
389 min(data->delta_gain_code[i],
390 (u8) CHAIN_NOISE_MAX_DELTA_GAIN_CODE);
391
392 data->delta_gain_code[i] =
393 (data->delta_gain_code[i] | (1 << 2));
394 } else {
395 data->delta_gain_code[i] = 0;
396 }
397 }
398 IWL_DEBUG_CALIB(priv, "delta_gain_codes: a %d b %d c %d\n",
399 data->delta_gain_code[0],
400 data->delta_gain_code[1],
401 data->delta_gain_code[2]);
402
403 /* Differential gain gets sent to uCode only once */
404 if (!data->radio_write) {
405 struct iwl_calib_diff_gain_cmd cmd;
406 data->radio_write = 1;
407
408 memset(&cmd, 0, sizeof(cmd));
409 cmd.hdr.op_code = IWL_PHY_CALIBRATE_DIFF_GAIN_CMD;
410 cmd.diff_gain_a = data->delta_gain_code[0];
411 cmd.diff_gain_b = data->delta_gain_code[1];
412 cmd.diff_gain_c = data->delta_gain_code[2];
413 ret = iwl_send_cmd_pdu(priv, REPLY_PHY_CALIBRATION_CMD,
414 sizeof(cmd), &cmd);
415 if (ret)
416 IWL_DEBUG_CALIB(priv, "fail sending cmd "
417 "REPLY_PHY_CALIBRATION_CMD\n");
418
419 /* TODO we might want recalculate
420 * rx_chain in rxon cmd */
421
422 /* Mark so we run this algo only once! */
423 data->state = IWL_CHAIN_NOISE_CALIBRATED;
424 }
425}
426
427static void iwl4965_bg_txpower_work(struct work_struct *work)
428{
429 struct iwl_priv *priv = container_of(work, struct iwl_priv,
430 txpower_work);
431
432 /* If a scan happened to start before we got here
433 * then just return; the statistics notification will
434 * kick off another scheduled work to compensate for
435 * any temperature delta we missed here. */
436 if (test_bit(STATUS_EXIT_PENDING, &priv->status) ||
437 test_bit(STATUS_SCANNING, &priv->status))
438 return;
439
440 mutex_lock(&priv->mutex);
441
442 /* Regardless of if we are associated, we must reconfigure the
443 * TX power since frames can be sent on non-radar channels while
444 * not associated */
445 iwl4965_send_tx_power(priv);
446
447 /* Update last_temperature to keep is_calib_needed from running
448 * when it isn't needed... */
449 priv->last_temperature = priv->temperature;
450
451 mutex_unlock(&priv->mutex);
452}
453
454/*
455 * Acquire priv->lock before calling this function !
456 */
457static void iwl4965_set_wr_ptrs(struct iwl_priv *priv, int txq_id, u32 index)
458{
459 iwl_write_direct32(priv, HBUS_TARG_WRPTR,
460 (index & 0xff) | (txq_id << 8));
461 iwl_write_prph(priv, IWL49_SCD_QUEUE_RDPTR(txq_id), index);
462}
463
464/**
465 * iwl4965_tx_queue_set_status - (optionally) start Tx/Cmd queue
466 * @tx_fifo_id: Tx DMA/FIFO channel (range 0-7) that the queue will feed
467 * @scd_retry: (1) Indicates queue will be used in aggregation mode
468 *
469 * NOTE: Acquire priv->lock before calling this function !
470 */
471static void iwl4965_tx_queue_set_status(struct iwl_priv *priv,
472 struct iwl_tx_queue *txq,
473 int tx_fifo_id, int scd_retry)
474{
475 int txq_id = txq->q.id;
476
477 /* Find out whether to activate Tx queue */
478 int active = test_bit(txq_id, &priv->txq_ctx_active_msk) ? 1 : 0;
479
480 /* Set up and activate */
481 iwl_write_prph(priv, IWL49_SCD_QUEUE_STATUS_BITS(txq_id),
482 (active << IWL49_SCD_QUEUE_STTS_REG_POS_ACTIVE) |
483 (tx_fifo_id << IWL49_SCD_QUEUE_STTS_REG_POS_TXF) |
484 (scd_retry << IWL49_SCD_QUEUE_STTS_REG_POS_WSL) |
485 (scd_retry << IWL49_SCD_QUEUE_STTS_REG_POS_SCD_ACK) |
486 IWL49_SCD_QUEUE_STTS_REG_MSK);
487
488 txq->sched_retry = scd_retry;
489
490 IWL_DEBUG_INFO(priv, "%s %s Queue %d on AC %d\n",
491 active ? "Activate" : "Deactivate",
492 scd_retry ? "BA" : "AC", txq_id, tx_fifo_id);
493}
494
495static const s8 default_queue_to_tx_fifo[] = {
496 IWL_TX_FIFO_VO,
497 IWL_TX_FIFO_VI,
498 IWL_TX_FIFO_BE,
499 IWL_TX_FIFO_BK,
500 IWL49_CMD_FIFO_NUM,
501 IWL_TX_FIFO_UNUSED,
502 IWL_TX_FIFO_UNUSED,
503};
504
505static int iwl4965_alive_notify(struct iwl_priv *priv)
506{
507 u32 a;
508 unsigned long flags;
509 int i, chan;
510 u32 reg_val;
511
512 spin_lock_irqsave(&priv->lock, flags);
513
514 /* Clear 4965's internal Tx Scheduler data base */
515 priv->scd_base_addr = iwl_read_prph(priv, IWL49_SCD_SRAM_BASE_ADDR);
516 a = priv->scd_base_addr + IWL49_SCD_CONTEXT_DATA_OFFSET;
517 for (; a < priv->scd_base_addr + IWL49_SCD_TX_STTS_BITMAP_OFFSET; a += 4)
518 iwl_write_targ_mem(priv, a, 0);
519 for (; a < priv->scd_base_addr + IWL49_SCD_TRANSLATE_TBL_OFFSET; a += 4)
520 iwl_write_targ_mem(priv, a, 0);
521 for (; a < priv->scd_base_addr +
522 IWL49_SCD_TRANSLATE_TBL_OFFSET_QUEUE(priv->hw_params.max_txq_num); a += 4)
523 iwl_write_targ_mem(priv, a, 0);
524
525 /* Tel 4965 where to find Tx byte count tables */
526 iwl_write_prph(priv, IWL49_SCD_DRAM_BASE_ADDR,
527 priv->scd_bc_tbls.dma >> 10);
528
529 /* Enable DMA channel */
530 for (chan = 0; chan < FH49_TCSR_CHNL_NUM ; chan++)
531 iwl_write_direct32(priv, FH_TCSR_CHNL_TX_CONFIG_REG(chan),
532 FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE |
533 FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE);
534
535 /* Update FH chicken bits */
536 reg_val = iwl_read_direct32(priv, FH_TX_CHICKEN_BITS_REG);
537 iwl_write_direct32(priv, FH_TX_CHICKEN_BITS_REG,
538 reg_val | FH_TX_CHICKEN_BITS_SCD_AUTO_RETRY_EN);
539
540 /* Disable chain mode for all queues */
541 iwl_write_prph(priv, IWL49_SCD_QUEUECHAIN_SEL, 0);
542
543 /* Initialize each Tx queue (including the command queue) */
544 for (i = 0; i < priv->hw_params.max_txq_num; i++) {
545
546 /* TFD circular buffer read/write indexes */
547 iwl_write_prph(priv, IWL49_SCD_QUEUE_RDPTR(i), 0);
548 iwl_write_direct32(priv, HBUS_TARG_WRPTR, 0 | (i << 8));
549
550 /* Max Tx Window size for Scheduler-ACK mode */
551 iwl_write_targ_mem(priv, priv->scd_base_addr +
552 IWL49_SCD_CONTEXT_QUEUE_OFFSET(i),
553 (SCD_WIN_SIZE <<
554 IWL49_SCD_QUEUE_CTX_REG1_WIN_SIZE_POS) &
555 IWL49_SCD_QUEUE_CTX_REG1_WIN_SIZE_MSK);
556
557 /* Frame limit */
558 iwl_write_targ_mem(priv, priv->scd_base_addr +
559 IWL49_SCD_CONTEXT_QUEUE_OFFSET(i) +
560 sizeof(u32),
561 (SCD_FRAME_LIMIT <<
562 IWL49_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS) &
563 IWL49_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK);
564
565 }
566 iwl_write_prph(priv, IWL49_SCD_INTERRUPT_MASK,
567 (1 << priv->hw_params.max_txq_num) - 1);
568
569 /* Activate all Tx DMA/FIFO channels */
570 priv->cfg->ops->lib->txq_set_sched(priv, IWL_MASK(0, 6));
571
572 iwl4965_set_wr_ptrs(priv, IWL_DEFAULT_CMD_QUEUE_NUM, 0);
573
574 /* make sure all queue are not stopped */
575 memset(&priv->queue_stopped[0], 0, sizeof(priv->queue_stopped));
576 for (i = 0; i < 4; i++)
577 atomic_set(&priv->queue_stop_count[i], 0);
578
579 /* reset to 0 to enable all the queue first */
580 priv->txq_ctx_active_msk = 0;
581 /* Map each Tx/cmd queue to its corresponding fifo */
582 BUILD_BUG_ON(ARRAY_SIZE(default_queue_to_tx_fifo) != 7);
583
584 for (i = 0; i < ARRAY_SIZE(default_queue_to_tx_fifo); i++) {
585 int ac = default_queue_to_tx_fifo[i];
586
587 iwl_txq_ctx_activate(priv, i);
588
589 if (ac == IWL_TX_FIFO_UNUSED)
590 continue;
591
592 iwl4965_tx_queue_set_status(priv, &priv->txq[i], ac, 0);
593 }
594
595 spin_unlock_irqrestore(&priv->lock, flags);
596
597 return 0;
598}
599
600static struct iwl_sensitivity_ranges iwl4965_sensitivity = { 354static struct iwl_sensitivity_ranges iwl4965_sensitivity = {
601 .min_nrg_cck = 97, 355 .min_nrg_cck = 97,
602 .max_nrg_cck = 0, /* not used, set to 0 */ 356 .max_nrg_cck = 0, /* not used, set to 0 */
@@ -658,15 +412,15 @@ static int iwl4965_hw_set_hw_params(struct iwl_priv *priv)
658 412
659 priv->hw_params.rx_wrt_ptr_reg = FH_RSCSR_CHNL0_WPTR; 413 priv->hw_params.rx_wrt_ptr_reg = FH_RSCSR_CHNL0_WPTR;
660 414
661 priv->hw_params.tx_chains_num = num_of_ant(priv->cfg->valid_tx_ant); 415 priv->hw_params.tx_chains_num = iwl4965_num_of_ant(priv->cfg->valid_tx_ant);
662 priv->hw_params.rx_chains_num = num_of_ant(priv->cfg->valid_rx_ant); 416 priv->hw_params.rx_chains_num = iwl4965_num_of_ant(priv->cfg->valid_rx_ant);
663 priv->hw_params.valid_tx_ant = priv->cfg->valid_tx_ant; 417 priv->hw_params.valid_tx_ant = priv->cfg->valid_tx_ant;
664 priv->hw_params.valid_rx_ant = priv->cfg->valid_rx_ant; 418 priv->hw_params.valid_rx_ant = priv->cfg->valid_rx_ant;
665 419
666 iwl4965_set_ct_threshold(priv); 420 iwl4965_set_ct_threshold(priv);
667 421
668 priv->hw_params.sens = &iwl4965_sensitivity; 422 priv->hw_params.sens = &iwl4965_sensitivity;
669 priv->hw_params.beacon_time_tsf_bits = IWLAGN_EXT_BEACON_TIME_POS; 423 priv->hw_params.beacon_time_tsf_bits = IWL4965_EXT_BEACON_TIME_POS;
670 424
671 return 0; 425 return 0;
672} 426}
@@ -1150,9 +904,9 @@ static int iwl4965_fill_txpower_tbl(struct iwl_priv *priv, u8 band, u16 channel,
1150 IWL_DEBUG_TXPOWER(priv, "chan %d band %d is_ht40 %d\n", channel, band, 904 IWL_DEBUG_TXPOWER(priv, "chan %d band %d is_ht40 %d\n", channel, band,
1151 is_ht40); 905 is_ht40);
1152 906
1153 ch_info = iwl_get_channel_info(priv, priv->band, channel); 907 ch_info = iwl_legacy_get_channel_info(priv, priv->band, channel);
1154 908
1155 if (!is_channel_valid(ch_info)) 909 if (!iwl_legacy_is_channel_valid(ch_info))
1156 return -EINVAL; 910 return -EINVAL;
1157 911
1158 /* get txatten group, used to select 1) thermal txpower adjustment 912 /* get txatten group, used to select 1) thermal txpower adjustment
@@ -1376,7 +1130,7 @@ static int iwl4965_send_tx_power(struct iwl_priv *priv)
1376 1130
1377 band = priv->band == IEEE80211_BAND_2GHZ; 1131 band = priv->band == IEEE80211_BAND_2GHZ;
1378 1132
1379 is_ht40 = is_ht40_channel(ctx->active.flags); 1133 is_ht40 = iw4965_is_ht40_channel(ctx->active.flags);
1380 1134
1381 if (is_ht40 && (ctx->active.flags & RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK)) 1135 if (is_ht40 && (ctx->active.flags & RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK))
1382 ctrl_chan_high = 1; 1136 ctrl_chan_high = 1;
@@ -1390,7 +1144,8 @@ static int iwl4965_send_tx_power(struct iwl_priv *priv)
1390 if (ret) 1144 if (ret)
1391 goto out; 1145 goto out;
1392 1146
1393 ret = iwl_send_cmd_pdu(priv, REPLY_TX_PWR_TABLE_CMD, sizeof(cmd), &cmd); 1147 ret = iwl_legacy_send_cmd_pdu(priv,
1148 REPLY_TX_PWR_TABLE_CMD, sizeof(cmd), &cmd);
1394 1149
1395out: 1150out:
1396 return ret; 1151 return ret;
@@ -1401,8 +1156,8 @@ static int iwl4965_send_rxon_assoc(struct iwl_priv *priv,
1401{ 1156{
1402 int ret = 0; 1157 int ret = 0;
1403 struct iwl4965_rxon_assoc_cmd rxon_assoc; 1158 struct iwl4965_rxon_assoc_cmd rxon_assoc;
1404 const struct iwl_rxon_cmd *rxon1 = &ctx->staging; 1159 const struct iwl_legacy_rxon_cmd *rxon1 = &ctx->staging;
1405 const struct iwl_rxon_cmd *rxon2 = &ctx->active; 1160 const struct iwl_legacy_rxon_cmd *rxon2 = &ctx->active;
1406 1161
1407 if ((rxon1->flags == rxon2->flags) && 1162 if ((rxon1->flags == rxon2->flags) &&
1408 (rxon1->filter_flags == rxon2->filter_flags) && 1163 (rxon1->filter_flags == rxon2->filter_flags) &&
@@ -1428,7 +1183,7 @@ static int iwl4965_send_rxon_assoc(struct iwl_priv *priv,
1428 ctx->staging.ofdm_ht_dual_stream_basic_rates; 1183 ctx->staging.ofdm_ht_dual_stream_basic_rates;
1429 rxon_assoc.rx_chain_select_flags = ctx->staging.rx_chain; 1184 rxon_assoc.rx_chain_select_flags = ctx->staging.rx_chain;
1430 1185
1431 ret = iwl_send_cmd_pdu_async(priv, REPLY_RXON_ASSOC, 1186 ret = iwl_legacy_send_cmd_pdu_async(priv, REPLY_RXON_ASSOC,
1432 sizeof(rxon_assoc), &rxon_assoc, NULL); 1187 sizeof(rxon_assoc), &rxon_assoc, NULL);
1433 if (ret) 1188 if (ret)
1434 return ret; 1189 return ret;
@@ -1439,12 +1194,12 @@ static int iwl4965_send_rxon_assoc(struct iwl_priv *priv,
1439static int iwl4965_commit_rxon(struct iwl_priv *priv, struct iwl_rxon_context *ctx) 1194static int iwl4965_commit_rxon(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
1440{ 1195{
1441 /* cast away the const for active_rxon in this function */ 1196 /* cast away the const for active_rxon in this function */
1442 struct iwl_rxon_cmd *active_rxon = (void *)&ctx->active; 1197 struct iwl_legacy_rxon_cmd *active_rxon = (void *)&ctx->active;
1443 int ret; 1198 int ret;
1444 bool new_assoc = 1199 bool new_assoc =
1445 !!(ctx->staging.filter_flags & RXON_FILTER_ASSOC_MSK); 1200 !!(ctx->staging.filter_flags & RXON_FILTER_ASSOC_MSK);
1446 1201
1447 if (!iwl_is_alive(priv)) 1202 if (!iwl_legacy_is_alive(priv))
1448 return -EBUSY; 1203 return -EBUSY;
1449 1204
1450 if (!ctx->is_active) 1205 if (!ctx->is_active)
@@ -1453,7 +1208,7 @@ static int iwl4965_commit_rxon(struct iwl_priv *priv, struct iwl_rxon_context *c
1453 /* always get timestamp with Rx frame */ 1208 /* always get timestamp with Rx frame */
1454 ctx->staging.flags |= RXON_FLG_TSF2HOST_MSK; 1209 ctx->staging.flags |= RXON_FLG_TSF2HOST_MSK;
1455 1210
1456 ret = iwl_check_rxon_cmd(priv, ctx); 1211 ret = iwl_legacy_check_rxon_cmd(priv, ctx);
1457 if (ret) { 1212 if (ret) {
1458 IWL_ERR(priv, "Invalid RXON configuration. Not committing.\n"); 1213 IWL_ERR(priv, "Invalid RXON configuration. Not committing.\n");
1459 return -EINVAL; 1214 return -EINVAL;
@@ -1467,21 +1222,21 @@ static int iwl4965_commit_rxon(struct iwl_priv *priv, struct iwl_rxon_context *c
1467 (priv->switch_rxon.channel != ctx->staging.channel)) { 1222 (priv->switch_rxon.channel != ctx->staging.channel)) {
1468 IWL_DEBUG_11H(priv, "abort channel switch on %d\n", 1223 IWL_DEBUG_11H(priv, "abort channel switch on %d\n",
1469 le16_to_cpu(priv->switch_rxon.channel)); 1224 le16_to_cpu(priv->switch_rxon.channel));
1470 iwl_chswitch_done(priv, false); 1225 iwl_legacy_chswitch_done(priv, false);
1471 } 1226 }
1472 1227
1473 /* If we don't need to send a full RXON, we can use 1228 /* If we don't need to send a full RXON, we can use
1474 * iwl_rxon_assoc_cmd which is used to reconfigure filter 1229 * iwl_rxon_assoc_cmd which is used to reconfigure filter
1475 * and other flags for the current radio configuration. */ 1230 * and other flags for the current radio configuration. */
1476 if (!iwl_full_rxon_required(priv, ctx)) { 1231 if (!iwl_legacy_full_rxon_required(priv, ctx)) {
1477 ret = iwl_send_rxon_assoc(priv, ctx); 1232 ret = iwl_legacy_send_rxon_assoc(priv, ctx);
1478 if (ret) { 1233 if (ret) {
1479 IWL_ERR(priv, "Error setting RXON_ASSOC (%d)\n", ret); 1234 IWL_ERR(priv, "Error setting RXON_ASSOC (%d)\n", ret);
1480 return ret; 1235 return ret;
1481 } 1236 }
1482 1237
1483 memcpy(active_rxon, &ctx->staging, sizeof(*active_rxon)); 1238 memcpy(active_rxon, &ctx->staging, sizeof(*active_rxon));
1484 iwl_print_rx_config_cmd(priv, ctx); 1239 iwl_legacy_print_rx_config_cmd(priv, ctx);
1485 return 0; 1240 return 0;
1486 } 1241 }
1487 1242
@@ -1489,12 +1244,12 @@ static int iwl4965_commit_rxon(struct iwl_priv *priv, struct iwl_rxon_context *c
1489 * an RXON_ASSOC and the new config wants the associated mask enabled, 1244 * an RXON_ASSOC and the new config wants the associated mask enabled,
1490 * we must clear the associated from the active configuration 1245 * we must clear the associated from the active configuration
1491 * before we apply the new config */ 1246 * before we apply the new config */
1492 if (iwl_is_associated_ctx(ctx) && new_assoc) { 1247 if (iwl_legacy_is_associated_ctx(ctx) && new_assoc) {
1493 IWL_DEBUG_INFO(priv, "Toggling associated bit on current RXON\n"); 1248 IWL_DEBUG_INFO(priv, "Toggling associated bit on current RXON\n");
1494 active_rxon->filter_flags &= ~RXON_FILTER_ASSOC_MSK; 1249 active_rxon->filter_flags &= ~RXON_FILTER_ASSOC_MSK;
1495 1250
1496 ret = iwl_send_cmd_pdu(priv, ctx->rxon_cmd, 1251 ret = iwl_legacy_send_cmd_pdu(priv, ctx->rxon_cmd,
1497 sizeof(struct iwl_rxon_cmd), 1252 sizeof(struct iwl_legacy_rxon_cmd),
1498 active_rxon); 1253 active_rxon);
1499 1254
1500 /* If the mask clearing failed then we set 1255 /* If the mask clearing failed then we set
@@ -1504,9 +1259,9 @@ static int iwl4965_commit_rxon(struct iwl_priv *priv, struct iwl_rxon_context *c
1504 IWL_ERR(priv, "Error clearing ASSOC_MSK (%d)\n", ret); 1259 IWL_ERR(priv, "Error clearing ASSOC_MSK (%d)\n", ret);
1505 return ret; 1260 return ret;
1506 } 1261 }
1507 iwl_clear_ucode_stations(priv, ctx); 1262 iwl_legacy_clear_ucode_stations(priv, ctx);
1508 iwl_restore_stations(priv, ctx); 1263 iwl_legacy_restore_stations(priv, ctx);
1509 ret = iwl_restore_default_wep_keys(priv, ctx); 1264 ret = iwl4965_restore_default_wep_keys(priv, ctx);
1510 if (ret) { 1265 if (ret) {
1511 IWL_ERR(priv, "Failed to restore WEP keys (%d)\n", ret); 1266 IWL_ERR(priv, "Failed to restore WEP keys (%d)\n", ret);
1512 return ret; 1267 return ret;
@@ -1521,24 +1276,25 @@ static int iwl4965_commit_rxon(struct iwl_priv *priv, struct iwl_rxon_context *c
1521 le16_to_cpu(ctx->staging.channel), 1276 le16_to_cpu(ctx->staging.channel),
1522 ctx->staging.bssid_addr); 1277 ctx->staging.bssid_addr);
1523 1278
1524 iwl_set_rxon_hwcrypto(priv, ctx, !priv->cfg->mod_params->sw_crypto); 1279 iwl_legacy_set_rxon_hwcrypto(priv, ctx,
1280 !priv->cfg->mod_params->sw_crypto);
1525 1281
1526 /* Apply the new configuration 1282 /* Apply the new configuration
1527 * RXON unassoc clears the station table in uCode so restoration of 1283 * RXON unassoc clears the station table in uCode so restoration of
1528 * stations is needed after it (the RXON command) completes 1284 * stations is needed after it (the RXON command) completes
1529 */ 1285 */
1530 if (!new_assoc) { 1286 if (!new_assoc) {
1531 ret = iwl_send_cmd_pdu(priv, ctx->rxon_cmd, 1287 ret = iwl_legacy_send_cmd_pdu(priv, ctx->rxon_cmd,
1532 sizeof(struct iwl_rxon_cmd), &ctx->staging); 1288 sizeof(struct iwl_legacy_rxon_cmd), &ctx->staging);
1533 if (ret) { 1289 if (ret) {
1534 IWL_ERR(priv, "Error setting new RXON (%d)\n", ret); 1290 IWL_ERR(priv, "Error setting new RXON (%d)\n", ret);
1535 return ret; 1291 return ret;
1536 } 1292 }
1537 IWL_DEBUG_INFO(priv, "Return from !new_assoc RXON.\n"); 1293 IWL_DEBUG_INFO(priv, "Return from !new_assoc RXON.\n");
1538 memcpy(active_rxon, &ctx->staging, sizeof(*active_rxon)); 1294 memcpy(active_rxon, &ctx->staging, sizeof(*active_rxon));
1539 iwl_clear_ucode_stations(priv, ctx); 1295 iwl_legacy_clear_ucode_stations(priv, ctx);
1540 iwl_restore_stations(priv, ctx); 1296 iwl_legacy_restore_stations(priv, ctx);
1541 ret = iwl_restore_default_wep_keys(priv, ctx); 1297 ret = iwl4965_restore_default_wep_keys(priv, ctx);
1542 if (ret) { 1298 if (ret) {
1543 IWL_ERR(priv, "Failed to restore WEP keys (%d)\n", ret); 1299 IWL_ERR(priv, "Failed to restore WEP keys (%d)\n", ret);
1544 return ret; 1300 return ret;
@@ -1549,21 +1305,21 @@ static int iwl4965_commit_rxon(struct iwl_priv *priv, struct iwl_rxon_context *c
1549 /* Apply the new configuration 1305 /* Apply the new configuration
1550 * RXON assoc doesn't clear the station table in uCode, 1306 * RXON assoc doesn't clear the station table in uCode,
1551 */ 1307 */
1552 ret = iwl_send_cmd_pdu(priv, ctx->rxon_cmd, 1308 ret = iwl_legacy_send_cmd_pdu(priv, ctx->rxon_cmd,
1553 sizeof(struct iwl_rxon_cmd), &ctx->staging); 1309 sizeof(struct iwl_legacy_rxon_cmd), &ctx->staging);
1554 if (ret) { 1310 if (ret) {
1555 IWL_ERR(priv, "Error setting new RXON (%d)\n", ret); 1311 IWL_ERR(priv, "Error setting new RXON (%d)\n", ret);
1556 return ret; 1312 return ret;
1557 } 1313 }
1558 memcpy(active_rxon, &ctx->staging, sizeof(*active_rxon)); 1314 memcpy(active_rxon, &ctx->staging, sizeof(*active_rxon));
1559 } 1315 }
1560 iwl_print_rx_config_cmd(priv, ctx); 1316 iwl_legacy_print_rx_config_cmd(priv, ctx);
1561 1317
1562 iwl_init_sensitivity(priv); 1318 iwl4965_init_sensitivity(priv);
1563 1319
1564 /* If we issue a new RXON command which required a tune then we must 1320 /* If we issue a new RXON command which required a tune then we must
1565 * send a new TXPOWER command or we won't be able to Tx any frames */ 1321 * send a new TXPOWER command or we won't be able to Tx any frames */
1566 ret = iwl_set_tx_power(priv, priv->tx_power_next, true); 1322 ret = iwl_legacy_set_tx_power(priv, priv->tx_power_user_lmt, true);
1567 if (ret) { 1323 if (ret) {
1568 IWL_ERR(priv, "Error sending TX power (%d)\n", ret); 1324 IWL_ERR(priv, "Error sending TX power (%d)\n", ret);
1569 return ret; 1325 return ret;
@@ -1590,7 +1346,7 @@ static int iwl4965_hw_channel_switch(struct iwl_priv *priv,
1590 struct ieee80211_vif *vif = ctx->vif; 1346 struct ieee80211_vif *vif = ctx->vif;
1591 band = priv->band == IEEE80211_BAND_2GHZ; 1347 band = priv->band == IEEE80211_BAND_2GHZ;
1592 1348
1593 is_ht40 = is_ht40_channel(ctx->staging.flags); 1349 is_ht40 = iw4965_is_ht40_channel(ctx->staging.flags);
1594 1350
1595 if (is_ht40 && 1351 if (is_ht40 &&
1596 (ctx->staging.flags & RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK)) 1352 (ctx->staging.flags & RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK))
@@ -1621,19 +1377,19 @@ static int iwl4965_hw_channel_switch(struct iwl_priv *priv,
1621 else { 1377 else {
1622 switch_time_in_usec = 1378 switch_time_in_usec =
1623 vif->bss_conf.beacon_int * switch_count * TIME_UNIT; 1379 vif->bss_conf.beacon_int * switch_count * TIME_UNIT;
1624 ucode_switch_time = iwl_usecs_to_beacons(priv, 1380 ucode_switch_time = iwl_legacy_usecs_to_beacons(priv,
1625 switch_time_in_usec, 1381 switch_time_in_usec,
1626 beacon_interval); 1382 beacon_interval);
1627 cmd.switch_time = iwl_add_beacon_time(priv, 1383 cmd.switch_time = iwl_legacy_add_beacon_time(priv,
1628 priv->ucode_beacon_time, 1384 priv->ucode_beacon_time,
1629 ucode_switch_time, 1385 ucode_switch_time,
1630 beacon_interval); 1386 beacon_interval);
1631 } 1387 }
1632 IWL_DEBUG_11H(priv, "uCode time for the switch is 0x%x\n", 1388 IWL_DEBUG_11H(priv, "uCode time for the switch is 0x%x\n",
1633 cmd.switch_time); 1389 cmd.switch_time);
1634 ch_info = iwl_get_channel_info(priv, priv->band, ch); 1390 ch_info = iwl_legacy_get_channel_info(priv, priv->band, ch);
1635 if (ch_info) 1391 if (ch_info)
1636 cmd.expect_beacon = is_channel_radar(ch_info); 1392 cmd.expect_beacon = iwl_legacy_is_channel_radar(ch_info);
1637 else { 1393 else {
1638 IWL_ERR(priv, "invalid channel switch from %u to %u\n", 1394 IWL_ERR(priv, "invalid channel switch from %u to %u\n",
1639 ctx->active.channel, ch); 1395 ctx->active.channel, ch);
@@ -1650,7 +1406,8 @@ static int iwl4965_hw_channel_switch(struct iwl_priv *priv,
1650 priv->switch_rxon.channel = cmd.channel; 1406 priv->switch_rxon.channel = cmd.channel;
1651 priv->switch_rxon.switch_in_progress = true; 1407 priv->switch_rxon.switch_in_progress = true;
1652 1408
1653 return iwl_send_cmd_pdu(priv, REPLY_CHANNEL_SWITCH, sizeof(cmd), &cmd); 1409 return iwl_legacy_send_cmd_pdu(priv,
1410 REPLY_CHANNEL_SWITCH, sizeof(cmd), &cmd);
1654} 1411}
1655 1412
1656/** 1413/**
@@ -1692,7 +1449,7 @@ static int iwl4965_hw_get_temperature(struct iwl_priv *priv)
1692 u32 R4; 1449 u32 R4;
1693 1450
1694 if (test_bit(STATUS_TEMPERATURE, &priv->status) && 1451 if (test_bit(STATUS_TEMPERATURE, &priv->status) &&
1695 (priv->_agn.statistics.flag & 1452 (priv->_4965.statistics.flag &
1696 STATISTICS_REPLY_FLG_HT40_MODE_MSK)) { 1453 STATISTICS_REPLY_FLG_HT40_MODE_MSK)) {
1697 IWL_DEBUG_TEMP(priv, "Running HT40 temperature calibration\n"); 1454 IWL_DEBUG_TEMP(priv, "Running HT40 temperature calibration\n");
1698 R1 = (s32)le32_to_cpu(priv->card_alive_init.therm_r1[1]); 1455 R1 = (s32)le32_to_cpu(priv->card_alive_init.therm_r1[1]);
@@ -1717,7 +1474,7 @@ static int iwl4965_hw_get_temperature(struct iwl_priv *priv)
1717 if (!test_bit(STATUS_TEMPERATURE, &priv->status)) 1474 if (!test_bit(STATUS_TEMPERATURE, &priv->status))
1718 vt = sign_extend32(R4, 23); 1475 vt = sign_extend32(R4, 23);
1719 else 1476 else
1720 vt = sign_extend32(le32_to_cpu(priv->_agn.statistics. 1477 vt = sign_extend32(le32_to_cpu(priv->_4965.statistics.
1721 general.common.temperature), 23); 1478 general.common.temperature), 23);
1722 1479
1723 IWL_DEBUG_TEMP(priv, "Calib values R[1-3]: %d %d %d R4: %d\n", R1, R2, R3, vt); 1480 IWL_DEBUG_TEMP(priv, "Calib values R[1-3]: %d %d %d R4: %d\n", R1, R2, R3, vt);
@@ -1802,7 +1559,6 @@ static void iwl4965_temperature_calib(struct iwl_priv *priv)
1802 } 1559 }
1803 1560
1804 priv->temperature = temp; 1561 priv->temperature = temp;
1805 iwl_tt_handler(priv);
1806 set_bit(STATUS_TEMPERATURE, &priv->status); 1562 set_bit(STATUS_TEMPERATURE, &priv->status);
1807 1563
1808 if (!priv->disable_tx_power_cal && 1564 if (!priv->disable_tx_power_cal &&
@@ -1811,152 +1567,6 @@ static void iwl4965_temperature_calib(struct iwl_priv *priv)
1811 queue_work(priv->workqueue, &priv->txpower_work); 1567 queue_work(priv->workqueue, &priv->txpower_work);
1812} 1568}
1813 1569
1814/**
1815 * iwl4965_tx_queue_stop_scheduler - Stop queue, but keep configuration
1816 */
1817static void iwl4965_tx_queue_stop_scheduler(struct iwl_priv *priv,
1818 u16 txq_id)
1819{
1820 /* Simply stop the queue, but don't change any configuration;
1821 * the SCD_ACT_EN bit is the write-enable mask for the ACTIVE bit. */
1822 iwl_write_prph(priv,
1823 IWL49_SCD_QUEUE_STATUS_BITS(txq_id),
1824 (0 << IWL49_SCD_QUEUE_STTS_REG_POS_ACTIVE)|
1825 (1 << IWL49_SCD_QUEUE_STTS_REG_POS_SCD_ACT_EN));
1826}
1827
1828/**
1829 * txq_id must be greater than IWL49_FIRST_AMPDU_QUEUE
1830 * priv->lock must be held by the caller
1831 */
1832static int iwl4965_txq_agg_disable(struct iwl_priv *priv, u16 txq_id,
1833 u16 ssn_idx, u8 tx_fifo)
1834{
1835 if ((IWL49_FIRST_AMPDU_QUEUE > txq_id) ||
1836 (IWL49_FIRST_AMPDU_QUEUE +
1837 priv->cfg->base_params->num_of_ampdu_queues <= txq_id)) {
1838 IWL_WARN(priv,
1839 "queue number out of range: %d, must be %d to %d\n",
1840 txq_id, IWL49_FIRST_AMPDU_QUEUE,
1841 IWL49_FIRST_AMPDU_QUEUE +
1842 priv->cfg->base_params->num_of_ampdu_queues - 1);
1843 return -EINVAL;
1844 }
1845
1846 iwl4965_tx_queue_stop_scheduler(priv, txq_id);
1847
1848 iwl_clear_bits_prph(priv, IWL49_SCD_QUEUECHAIN_SEL, (1 << txq_id));
1849
1850 priv->txq[txq_id].q.read_ptr = (ssn_idx & 0xff);
1851 priv->txq[txq_id].q.write_ptr = (ssn_idx & 0xff);
1852 /* supposes that ssn_idx is valid (!= 0xFFF) */
1853 iwl4965_set_wr_ptrs(priv, txq_id, ssn_idx);
1854
1855 iwl_clear_bits_prph(priv, IWL49_SCD_INTERRUPT_MASK, (1 << txq_id));
1856 iwl_txq_ctx_deactivate(priv, txq_id);
1857 iwl4965_tx_queue_set_status(priv, &priv->txq[txq_id], tx_fifo, 0);
1858
1859 return 0;
1860}
1861
1862/**
1863 * iwl4965_tx_queue_set_q2ratid - Map unique receiver/tid combination to a queue
1864 */
1865static int iwl4965_tx_queue_set_q2ratid(struct iwl_priv *priv, u16 ra_tid,
1866 u16 txq_id)
1867{
1868 u32 tbl_dw_addr;
1869 u32 tbl_dw;
1870 u16 scd_q2ratid;
1871
1872 scd_q2ratid = ra_tid & IWL_SCD_QUEUE_RA_TID_MAP_RATID_MSK;
1873
1874 tbl_dw_addr = priv->scd_base_addr +
1875 IWL49_SCD_TRANSLATE_TBL_OFFSET_QUEUE(txq_id);
1876
1877 tbl_dw = iwl_read_targ_mem(priv, tbl_dw_addr);
1878
1879 if (txq_id & 0x1)
1880 tbl_dw = (scd_q2ratid << 16) | (tbl_dw & 0x0000FFFF);
1881 else
1882 tbl_dw = scd_q2ratid | (tbl_dw & 0xFFFF0000);
1883
1884 iwl_write_targ_mem(priv, tbl_dw_addr, tbl_dw);
1885
1886 return 0;
1887}
1888
1889
1890/**
1891 * iwl4965_tx_queue_agg_enable - Set up & enable aggregation for selected queue
1892 *
1893 * NOTE: txq_id must be greater than IWL49_FIRST_AMPDU_QUEUE,
1894 * i.e. it must be one of the higher queues used for aggregation
1895 */
1896static int iwl4965_txq_agg_enable(struct iwl_priv *priv, int txq_id,
1897 int tx_fifo, int sta_id, int tid, u16 ssn_idx)
1898{
1899 unsigned long flags;
1900 u16 ra_tid;
1901 int ret;
1902
1903 if ((IWL49_FIRST_AMPDU_QUEUE > txq_id) ||
1904 (IWL49_FIRST_AMPDU_QUEUE +
1905 priv->cfg->base_params->num_of_ampdu_queues <= txq_id)) {
1906 IWL_WARN(priv,
1907 "queue number out of range: %d, must be %d to %d\n",
1908 txq_id, IWL49_FIRST_AMPDU_QUEUE,
1909 IWL49_FIRST_AMPDU_QUEUE +
1910 priv->cfg->base_params->num_of_ampdu_queues - 1);
1911 return -EINVAL;
1912 }
1913
1914 ra_tid = BUILD_RAxTID(sta_id, tid);
1915
1916 /* Modify device's station table to Tx this TID */
1917 ret = iwl_sta_tx_modify_enable_tid(priv, sta_id, tid);
1918 if (ret)
1919 return ret;
1920
1921 spin_lock_irqsave(&priv->lock, flags);
1922
1923 /* Stop this Tx queue before configuring it */
1924 iwl4965_tx_queue_stop_scheduler(priv, txq_id);
1925
1926 /* Map receiver-address / traffic-ID to this queue */
1927 iwl4965_tx_queue_set_q2ratid(priv, ra_tid, txq_id);
1928
1929 /* Set this queue as a chain-building queue */
1930 iwl_set_bits_prph(priv, IWL49_SCD_QUEUECHAIN_SEL, (1 << txq_id));
1931
1932 /* Place first TFD at index corresponding to start sequence number.
1933 * Assumes that ssn_idx is valid (!= 0xFFF) */
1934 priv->txq[txq_id].q.read_ptr = (ssn_idx & 0xff);
1935 priv->txq[txq_id].q.write_ptr = (ssn_idx & 0xff);
1936 iwl4965_set_wr_ptrs(priv, txq_id, ssn_idx);
1937
1938 /* Set up Tx window size and frame limit for this queue */
1939 iwl_write_targ_mem(priv,
1940 priv->scd_base_addr + IWL49_SCD_CONTEXT_QUEUE_OFFSET(txq_id),
1941 (SCD_WIN_SIZE << IWL49_SCD_QUEUE_CTX_REG1_WIN_SIZE_POS) &
1942 IWL49_SCD_QUEUE_CTX_REG1_WIN_SIZE_MSK);
1943
1944 iwl_write_targ_mem(priv, priv->scd_base_addr +
1945 IWL49_SCD_CONTEXT_QUEUE_OFFSET(txq_id) + sizeof(u32),
1946 (SCD_FRAME_LIMIT << IWL49_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS)
1947 & IWL49_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK);
1948
1949 iwl_set_bits_prph(priv, IWL49_SCD_INTERRUPT_MASK, (1 << txq_id));
1950
1951 /* Set up Status area in SRAM, map to Tx DMA/FIFO, activate the queue */
1952 iwl4965_tx_queue_set_status(priv, &priv->txq[txq_id], tx_fifo, 1);
1953
1954 spin_unlock_irqrestore(&priv->lock, flags);
1955
1956 return 0;
1957}
1958
1959
1960static u16 iwl4965_get_hcmd_size(u8 cmd_id, u16 len) 1570static u16 iwl4965_get_hcmd_size(u8 cmd_id, u16 len)
1961{ 1571{
1962 switch (cmd_id) { 1572 switch (cmd_id) {
@@ -1967,7 +1577,8 @@ static u16 iwl4965_get_hcmd_size(u8 cmd_id, u16 len)
1967 } 1577 }
1968} 1578}
1969 1579
1970static u16 iwl4965_build_addsta_hcmd(const struct iwl_addsta_cmd *cmd, u8 *data) 1580static u16 iwl4965_build_addsta_hcmd(const struct iwl_legacy_addsta_cmd *cmd,
1581 u8 *data)
1971{ 1582{
1972 struct iwl4965_addsta_cmd *addsta = (struct iwl4965_addsta_cmd *)data; 1583 struct iwl4965_addsta_cmd *addsta = (struct iwl4965_addsta_cmd *)data;
1973 addsta->mode = cmd->mode; 1584 addsta->mode = cmd->mode;
@@ -2020,16 +1631,14 @@ static int iwl4965_tx_status_reply_tx(struct iwl_priv *priv,
2020 status = le16_to_cpu(frame_status[0].status); 1631 status = le16_to_cpu(frame_status[0].status);
2021 idx = start_idx; 1632 idx = start_idx;
2022 1633
2023 /* FIXME: code repetition */
2024 IWL_DEBUG_TX_REPLY(priv, "FrameCnt = %d, StartIdx=%d idx=%d\n", 1634 IWL_DEBUG_TX_REPLY(priv, "FrameCnt = %d, StartIdx=%d idx=%d\n",
2025 agg->frame_count, agg->start_idx, idx); 1635 agg->frame_count, agg->start_idx, idx);
2026 1636
2027 info = IEEE80211_SKB_CB(priv->txq[txq_id].txb[idx].skb); 1637 info = IEEE80211_SKB_CB(priv->txq[txq_id].txb[idx].skb);
2028 info->status.rates[0].count = tx_resp->failure_frame + 1; 1638 info->status.rates[0].count = tx_resp->failure_frame + 1;
2029 info->flags &= ~IEEE80211_TX_CTL_AMPDU; 1639 info->flags &= ~IEEE80211_TX_CTL_AMPDU;
2030 info->flags |= iwl_tx_status_to_mac80211(status); 1640 info->flags |= iwl4965_tx_status_to_mac80211(status);
2031 iwlagn_hwrate_to_tx_control(priv, rate_n_flags, info); 1641 iwl4965_hwrate_to_tx_control(priv, rate_n_flags, info);
2032 /* FIXME: code repetition end */
2033 1642
2034 IWL_DEBUG_TX_REPLY(priv, "1 Frame 0x%x failure :%d\n", 1643 IWL_DEBUG_TX_REPLY(priv, "1 Frame 0x%x failure :%d\n",
2035 status & 0xff, tx_resp->failure_frame); 1644 status & 0xff, tx_resp->failure_frame);
@@ -2056,7 +1665,7 @@ static int iwl4965_tx_status_reply_tx(struct iwl_priv *priv,
2056 IWL_DEBUG_TX_REPLY(priv, "FrameCnt = %d, txq_id=%d idx=%d\n", 1665 IWL_DEBUG_TX_REPLY(priv, "FrameCnt = %d, txq_id=%d idx=%d\n",
2057 agg->frame_count, txq_id, idx); 1666 agg->frame_count, txq_id, idx);
2058 1667
2059 hdr = iwl_tx_queue_get_hdr(priv, txq_id, idx); 1668 hdr = iwl_legacy_tx_queue_get_hdr(priv, txq_id, idx);
2060 if (!hdr) { 1669 if (!hdr) {
2061 IWL_ERR(priv, 1670 IWL_ERR(priv,
2062 "BUG_ON idx doesn't point to valid skb" 1671 "BUG_ON idx doesn't point to valid skb"
@@ -2107,15 +1716,14 @@ static int iwl4965_tx_status_reply_tx(struct iwl_priv *priv,
2107 return 0; 1716 return 0;
2108} 1717}
2109 1718
2110static u8 iwl_find_station(struct iwl_priv *priv, const u8 *addr) 1719static u8 iwl4965_find_station(struct iwl_priv *priv, const u8 *addr)
2111{ 1720{
2112 int i; 1721 int i;
2113 int start = 0; 1722 int start = 0;
2114 int ret = IWL_INVALID_STATION; 1723 int ret = IWL_INVALID_STATION;
2115 unsigned long flags; 1724 unsigned long flags;
2116 1725
2117 if ((priv->iw_mode == NL80211_IFTYPE_ADHOC) || 1726 if ((priv->iw_mode == NL80211_IFTYPE_ADHOC))
2118 (priv->iw_mode == NL80211_IFTYPE_AP))
2119 start = IWL_STA_ID; 1727 start = IWL_STA_ID;
2120 1728
2121 if (is_broadcast_ether_addr(addr)) 1729 if (is_broadcast_ether_addr(addr))
@@ -2151,13 +1759,13 @@ static u8 iwl_find_station(struct iwl_priv *priv, const u8 *addr)
2151 return ret; 1759 return ret;
2152} 1760}
2153 1761
2154static int iwl_get_ra_sta_id(struct iwl_priv *priv, struct ieee80211_hdr *hdr) 1762static int iwl4965_get_ra_sta_id(struct iwl_priv *priv, struct ieee80211_hdr *hdr)
2155{ 1763{
2156 if (priv->iw_mode == NL80211_IFTYPE_STATION) { 1764 if (priv->iw_mode == NL80211_IFTYPE_STATION) {
2157 return IWL_AP_ID; 1765 return IWL_AP_ID;
2158 } else { 1766 } else {
2159 u8 *da = ieee80211_get_DA(hdr); 1767 u8 *da = ieee80211_get_DA(hdr);
2160 return iwl_find_station(priv, da); 1768 return iwl4965_find_station(priv, da);
2161 } 1769 }
2162} 1770}
2163 1771
@@ -2182,7 +1790,7 @@ static void iwl4965_rx_reply_tx(struct iwl_priv *priv,
2182 u8 *qc = NULL; 1790 u8 *qc = NULL;
2183 unsigned long flags; 1791 unsigned long flags;
2184 1792
2185 if ((index >= txq->q.n_bd) || (iwl_queue_used(&txq->q, index) == 0)) { 1793 if ((index >= txq->q.n_bd) || (iwl_legacy_queue_used(&txq->q, index) == 0)) {
2186 IWL_ERR(priv, "Read index for DMA queue txq_id (%d) index %d " 1794 IWL_ERR(priv, "Read index for DMA queue txq_id (%d) index %d "
2187 "is out of range [0-%d] %d %d\n", txq_id, 1795 "is out of range [0-%d] %d %d\n", txq_id,
2188 index, txq->q.n_bd, txq->q.write_ptr, 1796 index, txq->q.n_bd, txq->q.write_ptr,
@@ -2194,13 +1802,13 @@ static void iwl4965_rx_reply_tx(struct iwl_priv *priv,
2194 info = IEEE80211_SKB_CB(txq->txb[txq->q.read_ptr].skb); 1802 info = IEEE80211_SKB_CB(txq->txb[txq->q.read_ptr].skb);
2195 memset(&info->status, 0, sizeof(info->status)); 1803 memset(&info->status, 0, sizeof(info->status));
2196 1804
2197 hdr = iwl_tx_queue_get_hdr(priv, txq_id, index); 1805 hdr = iwl_legacy_tx_queue_get_hdr(priv, txq_id, index);
2198 if (ieee80211_is_data_qos(hdr->frame_control)) { 1806 if (ieee80211_is_data_qos(hdr->frame_control)) {
2199 qc = ieee80211_get_qos_ctl(hdr); 1807 qc = ieee80211_get_qos_ctl(hdr);
2200 tid = qc[0] & 0xf; 1808 tid = qc[0] & 0xf;
2201 } 1809 }
2202 1810
2203 sta_id = iwl_get_ra_sta_id(priv, hdr); 1811 sta_id = iwl4965_get_ra_sta_id(priv, hdr);
2204 if (txq->sched_retry && unlikely(sta_id == IWL_INVALID_STATION)) { 1812 if (txq->sched_retry && unlikely(sta_id == IWL_INVALID_STATION)) {
2205 IWL_ERR(priv, "Station not known\n"); 1813 IWL_ERR(priv, "Station not known\n");
2206 return; 1814 return;
@@ -2217,51 +1825,52 @@ static void iwl4965_rx_reply_tx(struct iwl_priv *priv,
2217 iwl4965_tx_status_reply_tx(priv, agg, tx_resp, txq_id, index); 1825 iwl4965_tx_status_reply_tx(priv, agg, tx_resp, txq_id, index);
2218 1826
2219 /* check if BAR is needed */ 1827 /* check if BAR is needed */
2220 if ((tx_resp->frame_count == 1) && !iwl_is_tx_success(status)) 1828 if ((tx_resp->frame_count == 1) && !iwl4965_is_tx_success(status))
2221 info->flags |= IEEE80211_TX_STAT_AMPDU_NO_BACK; 1829 info->flags |= IEEE80211_TX_STAT_AMPDU_NO_BACK;
2222 1830
2223 if (txq->q.read_ptr != (scd_ssn & 0xff)) { 1831 if (txq->q.read_ptr != (scd_ssn & 0xff)) {
2224 index = iwl_queue_dec_wrap(scd_ssn & 0xff, txq->q.n_bd); 1832 index = iwl_legacy_queue_dec_wrap(scd_ssn & 0xff,
1833 txq->q.n_bd);
2225 IWL_DEBUG_TX_REPLY(priv, "Retry scheduler reclaim scd_ssn " 1834 IWL_DEBUG_TX_REPLY(priv, "Retry scheduler reclaim scd_ssn "
2226 "%d index %d\n", scd_ssn , index); 1835 "%d index %d\n", scd_ssn , index);
2227 freed = iwlagn_tx_queue_reclaim(priv, txq_id, index); 1836 freed = iwl4965_tx_queue_reclaim(priv, txq_id, index);
2228 if (qc) 1837 if (qc)
2229 iwl_free_tfds_in_queue(priv, sta_id, 1838 iwl4965_free_tfds_in_queue(priv, sta_id,
2230 tid, freed); 1839 tid, freed);
2231 1840
2232 if (priv->mac80211_registered && 1841 if (priv->mac80211_registered &&
2233 (iwl_queue_space(&txq->q) > txq->q.low_mark) && 1842 (iwl_legacy_queue_space(&txq->q) > txq->q.low_mark)
2234 (agg->state != IWL_EMPTYING_HW_QUEUE_DELBA)) 1843 && (agg->state != IWL_EMPTYING_HW_QUEUE_DELBA))
2235 iwl_wake_queue(priv, txq); 1844 iwl_legacy_wake_queue(priv, txq);
2236 } 1845 }
2237 } else { 1846 } else {
2238 info->status.rates[0].count = tx_resp->failure_frame + 1; 1847 info->status.rates[0].count = tx_resp->failure_frame + 1;
2239 info->flags |= iwl_tx_status_to_mac80211(status); 1848 info->flags |= iwl4965_tx_status_to_mac80211(status);
2240 iwlagn_hwrate_to_tx_control(priv, 1849 iwl4965_hwrate_to_tx_control(priv,
2241 le32_to_cpu(tx_resp->rate_n_flags), 1850 le32_to_cpu(tx_resp->rate_n_flags),
2242 info); 1851 info);
2243 1852
2244 IWL_DEBUG_TX_REPLY(priv, "TXQ %d status %s (0x%08x) " 1853 IWL_DEBUG_TX_REPLY(priv, "TXQ %d status %s (0x%08x) "
2245 "rate_n_flags 0x%x retries %d\n", 1854 "rate_n_flags 0x%x retries %d\n",
2246 txq_id, 1855 txq_id,
2247 iwl_get_tx_fail_reason(status), status, 1856 iwl4965_get_tx_fail_reason(status), status,
2248 le32_to_cpu(tx_resp->rate_n_flags), 1857 le32_to_cpu(tx_resp->rate_n_flags),
2249 tx_resp->failure_frame); 1858 tx_resp->failure_frame);
2250 1859
2251 freed = iwlagn_tx_queue_reclaim(priv, txq_id, index); 1860 freed = iwl4965_tx_queue_reclaim(priv, txq_id, index);
2252 if (qc && likely(sta_id != IWL_INVALID_STATION)) 1861 if (qc && likely(sta_id != IWL_INVALID_STATION))
2253 iwl_free_tfds_in_queue(priv, sta_id, tid, freed); 1862 iwl4965_free_tfds_in_queue(priv, sta_id, tid, freed);
2254 else if (sta_id == IWL_INVALID_STATION) 1863 else if (sta_id == IWL_INVALID_STATION)
2255 IWL_DEBUG_TX_REPLY(priv, "Station not known\n"); 1864 IWL_DEBUG_TX_REPLY(priv, "Station not known\n");
2256 1865
2257 if (priv->mac80211_registered && 1866 if (priv->mac80211_registered &&
2258 (iwl_queue_space(&txq->q) > txq->q.low_mark)) 1867 (iwl_legacy_queue_space(&txq->q) > txq->q.low_mark))
2259 iwl_wake_queue(priv, txq); 1868 iwl_legacy_wake_queue(priv, txq);
2260 } 1869 }
2261 if (qc && likely(sta_id != IWL_INVALID_STATION)) 1870 if (qc && likely(sta_id != IWL_INVALID_STATION))
2262 iwlagn_txq_check_empty(priv, sta_id, tid, txq_id); 1871 iwl4965_txq_check_empty(priv, sta_id, tid, txq_id);
2263 1872
2264 iwl_check_abort_status(priv, tx_resp->frame_count, status); 1873 iwl4965_check_abort_status(priv, tx_resp->frame_count, status);
2265 1874
2266 spin_unlock_irqrestore(&priv->sta_lock, flags); 1875 spin_unlock_irqrestore(&priv->sta_lock, flags);
2267} 1876}
@@ -2271,8 +1880,8 @@ static void iwl4965_rx_beacon_notif(struct iwl_priv *priv,
2271{ 1880{
2272 struct iwl_rx_packet *pkt = rxb_addr(rxb); 1881 struct iwl_rx_packet *pkt = rxb_addr(rxb);
2273 struct iwl4965_beacon_notif *beacon = (void *)pkt->u.raw; 1882 struct iwl4965_beacon_notif *beacon = (void *)pkt->u.raw;
2274#ifdef CONFIG_IWLWIFI_DEBUG 1883 u8 rate __maybe_unused =
2275 u8 rate = iwl_hw_get_rate(beacon->beacon_notify_hdr.rate_n_flags); 1884 iwl4965_hw_get_rate(beacon->beacon_notify_hdr.rate_n_flags);
2276 1885
2277 IWL_DEBUG_RX(priv, "beacon status %#x, retries:%d ibssmgr:%d " 1886 IWL_DEBUG_RX(priv, "beacon status %#x, retries:%d ibssmgr:%d "
2278 "tsf:0x%.8x%.8x rate:%d\n", 1887 "tsf:0x%.8x%.8x rate:%d\n",
@@ -2281,79 +1890,24 @@ static void iwl4965_rx_beacon_notif(struct iwl_priv *priv,
2281 le32_to_cpu(beacon->ibss_mgr_status), 1890 le32_to_cpu(beacon->ibss_mgr_status),
2282 le32_to_cpu(beacon->high_tsf), 1891 le32_to_cpu(beacon->high_tsf),
2283 le32_to_cpu(beacon->low_tsf), rate); 1892 le32_to_cpu(beacon->low_tsf), rate);
2284#endif
2285 1893
2286 priv->ibss_manager = le32_to_cpu(beacon->ibss_mgr_status); 1894 priv->ibss_manager = le32_to_cpu(beacon->ibss_mgr_status);
2287
2288 if (!test_bit(STATUS_EXIT_PENDING, &priv->status))
2289 queue_work(priv->workqueue, &priv->beacon_update);
2290}
2291
2292static int iwl4965_calc_rssi(struct iwl_priv *priv,
2293 struct iwl_rx_phy_res *rx_resp)
2294{
2295 /* data from PHY/DSP regarding signal strength, etc.,
2296 * contents are always there, not configurable by host. */
2297 struct iwl4965_rx_non_cfg_phy *ncphy =
2298 (struct iwl4965_rx_non_cfg_phy *)rx_resp->non_cfg_phy_buf;
2299 u32 agc = (le16_to_cpu(ncphy->agc_info) & IWL49_AGC_DB_MASK)
2300 >> IWL49_AGC_DB_POS;
2301
2302 u32 valid_antennae =
2303 (le16_to_cpu(rx_resp->phy_flags) & IWL49_RX_PHY_FLAGS_ANTENNAE_MASK)
2304 >> IWL49_RX_PHY_FLAGS_ANTENNAE_OFFSET;
2305 u8 max_rssi = 0;
2306 u32 i;
2307
2308 /* Find max rssi among 3 possible receivers.
2309 * These values are measured by the digital signal processor (DSP).
2310 * They should stay fairly constant even as the signal strength varies,
2311 * if the radio's automatic gain control (AGC) is working right.
2312 * AGC value (see below) will provide the "interesting" info. */
2313 for (i = 0; i < 3; i++)
2314 if (valid_antennae & (1 << i))
2315 max_rssi = max(ncphy->rssi_info[i << 1], max_rssi);
2316
2317 IWL_DEBUG_STATS(priv, "Rssi In A %d B %d C %d Max %d AGC dB %d\n",
2318 ncphy->rssi_info[0], ncphy->rssi_info[2], ncphy->rssi_info[4],
2319 max_rssi, agc);
2320
2321 /* dBm = max_rssi dB - agc dB - constant.
2322 * Higher AGC (higher radio gain) means lower signal. */
2323 return max_rssi - agc - IWLAGN_RSSI_OFFSET;
2324} 1895}
2325 1896
2326
2327/* Set up 4965-specific Rx frame reply handlers */ 1897/* Set up 4965-specific Rx frame reply handlers */
2328static void iwl4965_rx_handler_setup(struct iwl_priv *priv) 1898static void iwl4965_rx_handler_setup(struct iwl_priv *priv)
2329{ 1899{
2330 /* Legacy Rx frames */ 1900 /* Legacy Rx frames */
2331 priv->rx_handlers[REPLY_RX] = iwlagn_rx_reply_rx; 1901 priv->rx_handlers[REPLY_RX] = iwl4965_rx_reply_rx;
2332 /* Tx response */ 1902 /* Tx response */
2333 priv->rx_handlers[REPLY_TX] = iwl4965_rx_reply_tx; 1903 priv->rx_handlers[REPLY_TX] = iwl4965_rx_reply_tx;
2334 priv->rx_handlers[BEACON_NOTIFICATION] = iwl4965_rx_beacon_notif; 1904 priv->rx_handlers[BEACON_NOTIFICATION] = iwl4965_rx_beacon_notif;
2335
2336 /* set up notification wait support */
2337 spin_lock_init(&priv->_agn.notif_wait_lock);
2338 INIT_LIST_HEAD(&priv->_agn.notif_waits);
2339 init_waitqueue_head(&priv->_agn.notif_waitq);
2340}
2341
2342static void iwl4965_setup_deferred_work(struct iwl_priv *priv)
2343{
2344 INIT_WORK(&priv->txpower_work, iwl4965_bg_txpower_work);
2345}
2346
2347static void iwl4965_cancel_deferred_work(struct iwl_priv *priv)
2348{
2349 cancel_work_sync(&priv->txpower_work);
2350} 1905}
2351 1906
2352static struct iwl_hcmd_ops iwl4965_hcmd = { 1907static struct iwl_hcmd_ops iwl4965_hcmd = {
2353 .rxon_assoc = iwl4965_send_rxon_assoc, 1908 .rxon_assoc = iwl4965_send_rxon_assoc,
2354 .commit_rxon = iwl4965_commit_rxon, 1909 .commit_rxon = iwl4965_commit_rxon,
2355 .set_rxon_chain = iwlagn_set_rxon_chain, 1910 .set_rxon_chain = iwl4965_set_rxon_chain,
2356 .send_bt_config = iwl_send_bt_config,
2357}; 1911};
2358 1912
2359static void iwl4965_post_scan(struct iwl_priv *priv) 1913static void iwl4965_post_scan(struct iwl_priv *priv)
@@ -2365,7 +1919,7 @@ static void iwl4965_post_scan(struct iwl_priv *priv)
2365 * performing the scan, fire one off if needed 1919 * performing the scan, fire one off if needed
2366 */ 1920 */
2367 if (memcmp(&ctx->staging, &ctx->active, sizeof(ctx->staging))) 1921 if (memcmp(&ctx->staging, &ctx->active, sizeof(ctx->staging)))
2368 iwlcore_commit_rxon(priv, ctx); 1922 iwl_legacy_commit_rxon(priv, ctx);
2369} 1923}
2370 1924
2371static void iwl4965_post_associate(struct iwl_priv *priv) 1925static void iwl4965_post_associate(struct iwl_priv *priv)
@@ -2378,29 +1932,24 @@ static void iwl4965_post_associate(struct iwl_priv *priv)
2378 if (!vif || !priv->is_open) 1932 if (!vif || !priv->is_open)
2379 return; 1933 return;
2380 1934
2381 if (vif->type == NL80211_IFTYPE_AP) {
2382 IWL_ERR(priv, "%s Should not be called in AP mode\n", __func__);
2383 return;
2384 }
2385
2386 if (test_bit(STATUS_EXIT_PENDING, &priv->status)) 1935 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
2387 return; 1936 return;
2388 1937
2389 iwl_scan_cancel_timeout(priv, 200); 1938 iwl_legacy_scan_cancel_timeout(priv, 200);
2390 1939
2391 conf = ieee80211_get_hw_conf(priv->hw); 1940 conf = iwl_legacy_ieee80211_get_hw_conf(priv->hw);
2392 1941
2393 ctx->staging.filter_flags &= ~RXON_FILTER_ASSOC_MSK; 1942 ctx->staging.filter_flags &= ~RXON_FILTER_ASSOC_MSK;
2394 iwlcore_commit_rxon(priv, ctx); 1943 iwl_legacy_commit_rxon(priv, ctx);
2395 1944
2396 ret = iwl_send_rxon_timing(priv, ctx); 1945 ret = iwl_legacy_send_rxon_timing(priv, ctx);
2397 if (ret) 1946 if (ret)
2398 IWL_WARN(priv, "RXON timing - " 1947 IWL_WARN(priv, "RXON timing - "
2399 "Attempting to continue.\n"); 1948 "Attempting to continue.\n");
2400 1949
2401 ctx->staging.filter_flags |= RXON_FILTER_ASSOC_MSK; 1950 ctx->staging.filter_flags |= RXON_FILTER_ASSOC_MSK;
2402 1951
2403 iwl_set_rxon_ht(priv, &priv->current_ht_config); 1952 iwl_legacy_set_rxon_ht(priv, &priv->current_ht_config);
2404 1953
2405 if (priv->cfg->ops->hcmd->set_rxon_chain) 1954 if (priv->cfg->ops->hcmd->set_rxon_chain)
2406 priv->cfg->ops->hcmd->set_rxon_chain(priv, ctx); 1955 priv->cfg->ops->hcmd->set_rxon_chain(priv, ctx);
@@ -2422,7 +1971,7 @@ static void iwl4965_post_associate(struct iwl_priv *priv)
2422 ctx->staging.flags &= ~RXON_FLG_SHORT_SLOT_MSK; 1971 ctx->staging.flags &= ~RXON_FLG_SHORT_SLOT_MSK;
2423 } 1972 }
2424 1973
2425 iwlcore_commit_rxon(priv, ctx); 1974 iwl_legacy_commit_rxon(priv, ctx);
2426 1975
2427 IWL_DEBUG_ASSOC(priv, "Associated as %d to: %pM\n", 1976 IWL_DEBUG_ASSOC(priv, "Associated as %d to: %pM\n",
2428 vif->bss_conf.aid, ctx->active.bssid_addr); 1977 vif->bss_conf.aid, ctx->active.bssid_addr);
@@ -2431,7 +1980,7 @@ static void iwl4965_post_associate(struct iwl_priv *priv)
2431 case NL80211_IFTYPE_STATION: 1980 case NL80211_IFTYPE_STATION:
2432 break; 1981 break;
2433 case NL80211_IFTYPE_ADHOC: 1982 case NL80211_IFTYPE_ADHOC:
2434 iwlagn_send_beacon_cmd(priv); 1983 iwl4965_send_beacon_cmd(priv);
2435 break; 1984 break;
2436 default: 1985 default:
2437 IWL_ERR(priv, "%s Should not be called in %d mode\n", 1986 IWL_ERR(priv, "%s Should not be called in %d mode\n",
@@ -2443,10 +1992,10 @@ static void iwl4965_post_associate(struct iwl_priv *priv)
2443 * If chain noise has already been run, then we need to enable 1992 * If chain noise has already been run, then we need to enable
2444 * power management here */ 1993 * power management here */
2445 if (priv->chain_noise_data.state == IWL_CHAIN_NOISE_DONE) 1994 if (priv->chain_noise_data.state == IWL_CHAIN_NOISE_DONE)
2446 iwl_power_update_mode(priv, false); 1995 iwl_legacy_power_update_mode(priv, false);
2447 1996
2448 /* Enable Rx differential gain and sensitivity calibrations */ 1997 /* Enable Rx differential gain and sensitivity calibrations */
2449 iwl_chain_noise_reset(priv); 1998 iwl4965_chain_noise_reset(priv);
2450 priv->start_calib = 1; 1999 priv->start_calib = 1;
2451} 2000}
2452 2001
@@ -2462,14 +2011,14 @@ static void iwl4965_config_ap(struct iwl_priv *priv)
2462 return; 2011 return;
2463 2012
2464 /* The following should be done only at AP bring up */ 2013 /* The following should be done only at AP bring up */
2465 if (!iwl_is_associated_ctx(ctx)) { 2014 if (!iwl_legacy_is_associated_ctx(ctx)) {
2466 2015
2467 /* RXON - unassoc (to set timing command) */ 2016 /* RXON - unassoc (to set timing command) */
2468 ctx->staging.filter_flags &= ~RXON_FILTER_ASSOC_MSK; 2017 ctx->staging.filter_flags &= ~RXON_FILTER_ASSOC_MSK;
2469 iwlcore_commit_rxon(priv, ctx); 2018 iwl_legacy_commit_rxon(priv, ctx);
2470 2019
2471 /* RXON Timing */ 2020 /* RXON Timing */
2472 ret = iwl_send_rxon_timing(priv, ctx); 2021 ret = iwl_legacy_send_rxon_timing(priv, ctx);
2473 if (ret) 2022 if (ret)
2474 IWL_WARN(priv, "RXON timing failed - " 2023 IWL_WARN(priv, "RXON timing failed - "
2475 "Attempting to continue.\n"); 2024 "Attempting to continue.\n");
@@ -2477,7 +2026,7 @@ static void iwl4965_config_ap(struct iwl_priv *priv)
2477 /* AP has all antennas */ 2026 /* AP has all antennas */
2478 priv->chain_noise_data.active_chains = 2027 priv->chain_noise_data.active_chains =
2479 priv->hw_params.valid_rx_ant; 2028 priv->hw_params.valid_rx_ant;
2480 iwl_set_rxon_ht(priv, &priv->current_ht_config); 2029 iwl_legacy_set_rxon_ht(priv, &priv->current_ht_config);
2481 if (priv->cfg->ops->hcmd->set_rxon_chain) 2030 if (priv->cfg->ops->hcmd->set_rxon_chain)
2482 priv->cfg->ops->hcmd->set_rxon_chain(priv, ctx); 2031 priv->cfg->ops->hcmd->set_rxon_chain(priv, ctx);
2483 2032
@@ -2499,51 +2048,37 @@ static void iwl4965_config_ap(struct iwl_priv *priv)
2499 ~RXON_FLG_SHORT_SLOT_MSK; 2048 ~RXON_FLG_SHORT_SLOT_MSK;
2500 } 2049 }
2501 /* need to send beacon cmd before committing assoc RXON! */ 2050 /* need to send beacon cmd before committing assoc RXON! */
2502 iwlagn_send_beacon_cmd(priv); 2051 iwl4965_send_beacon_cmd(priv);
2503 /* restore RXON assoc */ 2052 /* restore RXON assoc */
2504 ctx->staging.filter_flags |= RXON_FILTER_ASSOC_MSK; 2053 ctx->staging.filter_flags |= RXON_FILTER_ASSOC_MSK;
2505 iwlcore_commit_rxon(priv, ctx); 2054 iwl_legacy_commit_rxon(priv, ctx);
2506 } 2055 }
2507 iwlagn_send_beacon_cmd(priv); 2056 iwl4965_send_beacon_cmd(priv);
2508
2509 /* FIXME - we need to add code here to detect a totally new
2510 * configuration, reset the AP, unassoc, rxon timing, assoc,
2511 * clear sta table, add BCAST sta... */
2512} 2057}
2513 2058
2514static struct iwl_hcmd_utils_ops iwl4965_hcmd_utils = { 2059static struct iwl_hcmd_utils_ops iwl4965_hcmd_utils = {
2515 .get_hcmd_size = iwl4965_get_hcmd_size, 2060 .get_hcmd_size = iwl4965_get_hcmd_size,
2516 .build_addsta_hcmd = iwl4965_build_addsta_hcmd, 2061 .build_addsta_hcmd = iwl4965_build_addsta_hcmd,
2517 .chain_noise_reset = iwl4965_chain_noise_reset, 2062 .request_scan = iwl4965_request_scan,
2518 .gain_computation = iwl4965_gain_computation,
2519 .tx_cmd_protection = iwl_legacy_tx_cmd_protection,
2520 .calc_rssi = iwl4965_calc_rssi,
2521 .request_scan = iwlagn_request_scan,
2522 .post_scan = iwl4965_post_scan, 2063 .post_scan = iwl4965_post_scan,
2523}; 2064};
2524 2065
2525static struct iwl_lib_ops iwl4965_lib = { 2066static struct iwl_lib_ops iwl4965_lib = {
2526 .set_hw_params = iwl4965_hw_set_hw_params, 2067 .set_hw_params = iwl4965_hw_set_hw_params,
2527 .txq_update_byte_cnt_tbl = iwl4965_txq_update_byte_cnt_tbl, 2068 .txq_update_byte_cnt_tbl = iwl4965_txq_update_byte_cnt_tbl,
2528 .txq_set_sched = iwl4965_txq_set_sched, 2069 .txq_attach_buf_to_tfd = iwl4965_hw_txq_attach_buf_to_tfd,
2529 .txq_agg_enable = iwl4965_txq_agg_enable, 2070 .txq_free_tfd = iwl4965_hw_txq_free_tfd,
2530 .txq_agg_disable = iwl4965_txq_agg_disable, 2071 .txq_init = iwl4965_hw_tx_queue_init,
2531 .txq_attach_buf_to_tfd = iwl_hw_txq_attach_buf_to_tfd,
2532 .txq_free_tfd = iwl_hw_txq_free_tfd,
2533 .txq_init = iwl_hw_tx_queue_init,
2534 .rx_handler_setup = iwl4965_rx_handler_setup, 2072 .rx_handler_setup = iwl4965_rx_handler_setup,
2535 .setup_deferred_work = iwl4965_setup_deferred_work,
2536 .cancel_deferred_work = iwl4965_cancel_deferred_work,
2537 .is_valid_rtc_data_addr = iwl4965_hw_valid_rtc_data_addr, 2073 .is_valid_rtc_data_addr = iwl4965_hw_valid_rtc_data_addr,
2538 .alive_notify = iwl4965_alive_notify,
2539 .init_alive_start = iwl4965_init_alive_start, 2074 .init_alive_start = iwl4965_init_alive_start,
2540 .load_ucode = iwl4965_load_bsm, 2075 .load_ucode = iwl4965_load_bsm,
2541 .dump_nic_event_log = iwl_dump_nic_event_log, 2076 .dump_nic_event_log = iwl4965_dump_nic_event_log,
2542 .dump_nic_error_log = iwl_dump_nic_error_log, 2077 .dump_nic_error_log = iwl4965_dump_nic_error_log,
2543 .dump_fh = iwl_dump_fh, 2078 .dump_fh = iwl4965_dump_fh,
2544 .set_channel_switch = iwl4965_hw_channel_switch, 2079 .set_channel_switch = iwl4965_hw_channel_switch,
2545 .apm_ops = { 2080 .apm_ops = {
2546 .init = iwl_apm_init, 2081 .init = iwl_legacy_apm_init,
2547 .config = iwl4965_nic_config, 2082 .config = iwl4965_nic_config,
2548 }, 2083 },
2549 .eeprom_ops = { 2084 .eeprom_ops = {
@@ -2556,64 +2091,56 @@ static struct iwl_lib_ops iwl4965_lib = {
2556 EEPROM_4965_REGULATORY_BAND_24_HT40_CHANNELS, 2091 EEPROM_4965_REGULATORY_BAND_24_HT40_CHANNELS,
2557 EEPROM_4965_REGULATORY_BAND_52_HT40_CHANNELS 2092 EEPROM_4965_REGULATORY_BAND_52_HT40_CHANNELS
2558 }, 2093 },
2559 .acquire_semaphore = iwlcore_eeprom_acquire_semaphore, 2094 .acquire_semaphore = iwl4965_eeprom_acquire_semaphore,
2560 .release_semaphore = iwlcore_eeprom_release_semaphore, 2095 .release_semaphore = iwl4965_eeprom_release_semaphore,
2561 .calib_version = iwl4965_eeprom_calib_version,
2562 .query_addr = iwlcore_eeprom_query_addr,
2563 }, 2096 },
2564 .send_tx_power = iwl4965_send_tx_power, 2097 .send_tx_power = iwl4965_send_tx_power,
2565 .update_chain_flags = iwl_update_chain_flags, 2098 .update_chain_flags = iwl4965_update_chain_flags,
2566 .isr_ops = {
2567 .isr = iwl_isr_legacy,
2568 },
2569 .temp_ops = { 2099 .temp_ops = {
2570 .temperature = iwl4965_temperature_calib, 2100 .temperature = iwl4965_temperature_calib,
2571 }, 2101 },
2572 .debugfs_ops = { 2102 .debugfs_ops = {
2573 .rx_stats_read = iwl_ucode_rx_stats_read, 2103 .rx_stats_read = iwl4965_ucode_rx_stats_read,
2574 .tx_stats_read = iwl_ucode_tx_stats_read, 2104 .tx_stats_read = iwl4965_ucode_tx_stats_read,
2575 .general_stats_read = iwl_ucode_general_stats_read, 2105 .general_stats_read = iwl4965_ucode_general_stats_read,
2576 .bt_stats_read = iwl_ucode_bt_stats_read,
2577 .reply_tx_error = iwl_reply_tx_error_read,
2578 }, 2106 },
2579 .check_plcp_health = iwl_good_plcp_health, 2107 .check_plcp_health = iwl4965_good_plcp_health,
2580}; 2108};
2581 2109
2582static const struct iwl_legacy_ops iwl4965_legacy_ops = { 2110static const struct iwl_legacy_ops iwl4965_legacy_ops = {
2583 .post_associate = iwl4965_post_associate, 2111 .post_associate = iwl4965_post_associate,
2584 .config_ap = iwl4965_config_ap, 2112 .config_ap = iwl4965_config_ap,
2585 .manage_ibss_station = iwlagn_manage_ibss_station, 2113 .manage_ibss_station = iwl4965_manage_ibss_station,
2586 .update_bcast_stations = iwl_update_bcast_stations, 2114 .update_bcast_stations = iwl4965_update_bcast_stations,
2587}; 2115};
2588 2116
2589struct ieee80211_ops iwl4965_hw_ops = { 2117struct ieee80211_ops iwl4965_hw_ops = {
2590 .tx = iwlagn_mac_tx, 2118 .tx = iwl4965_mac_tx,
2591 .start = iwlagn_mac_start, 2119 .start = iwl4965_mac_start,
2592 .stop = iwlagn_mac_stop, 2120 .stop = iwl4965_mac_stop,
2593 .add_interface = iwl_mac_add_interface, 2121 .add_interface = iwl_legacy_mac_add_interface,
2594 .remove_interface = iwl_mac_remove_interface, 2122 .remove_interface = iwl_legacy_mac_remove_interface,
2595 .change_interface = iwl_mac_change_interface, 2123 .change_interface = iwl_legacy_mac_change_interface,
2596 .config = iwl_legacy_mac_config, 2124 .config = iwl_legacy_mac_config,
2597 .configure_filter = iwlagn_configure_filter, 2125 .configure_filter = iwl4965_configure_filter,
2598 .set_key = iwlagn_mac_set_key, 2126 .set_key = iwl4965_mac_set_key,
2599 .update_tkip_key = iwlagn_mac_update_tkip_key, 2127 .update_tkip_key = iwl4965_mac_update_tkip_key,
2600 .conf_tx = iwl_mac_conf_tx, 2128 .conf_tx = iwl_legacy_mac_conf_tx,
2601 .reset_tsf = iwl_legacy_mac_reset_tsf, 2129 .reset_tsf = iwl_legacy_mac_reset_tsf,
2602 .bss_info_changed = iwl_legacy_mac_bss_info_changed, 2130 .bss_info_changed = iwl_legacy_mac_bss_info_changed,
2603 .ampdu_action = iwlagn_mac_ampdu_action, 2131 .ampdu_action = iwl4965_mac_ampdu_action,
2604 .hw_scan = iwl_mac_hw_scan, 2132 .hw_scan = iwl_legacy_mac_hw_scan,
2605 .sta_add = iwlagn_mac_sta_add, 2133 .sta_add = iwl4965_mac_sta_add,
2606 .sta_remove = iwl_mac_sta_remove, 2134 .sta_remove = iwl_legacy_mac_sta_remove,
2607 .channel_switch = iwlagn_mac_channel_switch, 2135 .channel_switch = iwl4965_mac_channel_switch,
2608 .flush = iwlagn_mac_flush, 2136 .tx_last_beacon = iwl_legacy_mac_tx_last_beacon,
2609 .tx_last_beacon = iwl_mac_tx_last_beacon,
2610}; 2137};
2611 2138
2612static const struct iwl_ops iwl4965_ops = { 2139static const struct iwl_ops iwl4965_ops = {
2613 .lib = &iwl4965_lib, 2140 .lib = &iwl4965_lib,
2614 .hcmd = &iwl4965_hcmd, 2141 .hcmd = &iwl4965_hcmd,
2615 .utils = &iwl4965_hcmd_utils, 2142 .utils = &iwl4965_hcmd_utils,
2616 .led = &iwlagn_led_ops, 2143 .led = &iwl4965_led_ops,
2617 .legacy = &iwl4965_legacy_ops, 2144 .legacy = &iwl4965_legacy_ops,
2618 .ieee80211_ops = &iwl4965_hw_ops, 2145 .ieee80211_ops = &iwl4965_hw_ops,
2619}; 2146};
@@ -2625,22 +2152,18 @@ static struct iwl_base_params iwl4965_base_params = {
2625 .pll_cfg_val = 0, 2152 .pll_cfg_val = 0,
2626 .set_l0s = true, 2153 .set_l0s = true,
2627 .use_bsm = true, 2154 .use_bsm = true,
2628 .use_isr_legacy = true,
2629 .broken_powersave = true,
2630 .led_compensation = 61, 2155 .led_compensation = 61,
2631 .chain_noise_num_beacons = IWL4965_CAL_NUM_BEACONS, 2156 .chain_noise_num_beacons = IWL4965_CAL_NUM_BEACONS,
2632 .plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF, 2157 .plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF,
2633 .wd_timeout = IWL_DEF_WD_TIMEOUT, 2158 .wd_timeout = IWL_DEF_WD_TIMEOUT,
2634 .temperature_kelvin = true, 2159 .temperature_kelvin = true,
2635 .max_event_log_size = 512, 2160 .max_event_log_size = 512,
2636 .tx_power_by_driver = true,
2637 .ucode_tracing = true, 2161 .ucode_tracing = true,
2638 .sensitivity_calib_by_driver = true, 2162 .sensitivity_calib_by_driver = true,
2639 .chain_noise_calib_by_driver = true, 2163 .chain_noise_calib_by_driver = true,
2640 .no_agg_framecnt_info = true,
2641}; 2164};
2642 2165
2643struct iwl_cfg iwl4965_agn_cfg = { 2166struct iwl_cfg iwl4965_cfg = {
2644 .name = "Intel(R) Wireless WiFi Link 4965AGN", 2167 .name = "Intel(R) Wireless WiFi Link 4965AGN",
2645 .fw_name_pre = IWL4965_FW_PRE, 2168 .fw_name_pre = IWL4965_FW_PRE,
2646 .ucode_api_max = IWL4965_UCODE_API_MAX, 2169 .ucode_api_max = IWL4965_UCODE_API_MAX,
@@ -2651,7 +2174,7 @@ struct iwl_cfg iwl4965_agn_cfg = {
2651 .eeprom_ver = EEPROM_4965_EEPROM_VERSION, 2174 .eeprom_ver = EEPROM_4965_EEPROM_VERSION,
2652 .eeprom_calib_ver = EEPROM_4965_TX_POWER_VERSION, 2175 .eeprom_calib_ver = EEPROM_4965_TX_POWER_VERSION,
2653 .ops = &iwl4965_ops, 2176 .ops = &iwl4965_ops,
2654 .mod_params = &iwlagn_mod_params, 2177 .mod_params = &iwl4965_mod_params,
2655 .base_params = &iwl4965_base_params, 2178 .base_params = &iwl4965_base_params,
2656 .led_mode = IWL_LED_BLINK, 2179 .led_mode = IWL_LED_BLINK,
2657 /* 2180 /*
@@ -2663,4 +2186,3 @@ struct iwl_cfg iwl4965_agn_cfg = {
2663 2186
2664/* Module firmware */ 2187/* Module firmware */
2665MODULE_FIRMWARE(IWL4965_MODULE_FIRMWARE(IWL4965_UCODE_API_MAX)); 2188MODULE_FIRMWARE(IWL4965_MODULE_FIRMWARE(IWL4965_UCODE_API_MAX));
2666
diff --git a/drivers/net/wireless/iwlegacy/iwl-4965.h b/drivers/net/wireless/iwlegacy/iwl-4965.h
new file mode 100644
index 000000000000..79e206770f71
--- /dev/null
+++ b/drivers/net/wireless/iwlegacy/iwl-4965.h
@@ -0,0 +1,282 @@
1/******************************************************************************
2 *
3 * This file is provided under a dual BSD/GPLv2 license. When using or
4 * redistributing this file, you may do so under either license.
5 *
6 * GPL LICENSE SUMMARY
7 *
8 * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of version 2 of the GNU General Public License as
12 * published by the Free Software Foundation.
13 *
14 * This program is distributed in the hope that it will be useful, but
15 * WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
22 * USA
23 *
24 * The full GNU General Public License is included in this distribution
25 * in the file called LICENSE.GPL.
26 *
27 * Contact Information:
28 * Intel Linux Wireless <ilw@linux.intel.com>
29 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
30 *
31 * BSD LICENSE
32 *
33 * Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved.
34 * All rights reserved.
35 *
36 * Redistribution and use in source and binary forms, with or without
37 * modification, are permitted provided that the following conditions
38 * are met:
39 *
40 * * Redistributions of source code must retain the above copyright
41 * notice, this list of conditions and the following disclaimer.
42 * * Redistributions in binary form must reproduce the above copyright
43 * notice, this list of conditions and the following disclaimer in
44 * the documentation and/or other materials provided with the
45 * distribution.
46 * * Neither the name Intel Corporation nor the names of its
47 * contributors may be used to endorse or promote products derived
48 * from this software without specific prior written permission.
49 *
50 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
51 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
52 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
53 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
54 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
55 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
56 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
57 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
58 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
59 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
60 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
61 *****************************************************************************/
62
63#ifndef __iwl_4965_h__
64#define __iwl_4965_h__
65
66#include "iwl-dev.h"
67
68/* configuration for the _4965 devices */
69extern struct iwl_cfg iwl4965_cfg;
70
71extern struct iwl_mod_params iwl4965_mod_params;
72
73extern struct ieee80211_ops iwl4965_hw_ops;
74
75/* tx queue */
76void iwl4965_free_tfds_in_queue(struct iwl_priv *priv,
77 int sta_id, int tid, int freed);
78
79/* RXON */
80void iwl4965_set_rxon_chain(struct iwl_priv *priv,
81 struct iwl_rxon_context *ctx);
82
83/* uCode */
84int iwl4965_verify_ucode(struct iwl_priv *priv);
85
86/* lib */
87void iwl4965_check_abort_status(struct iwl_priv *priv,
88 u8 frame_count, u32 status);
89
90void iwl4965_rx_queue_reset(struct iwl_priv *priv, struct iwl_rx_queue *rxq);
91int iwl4965_rx_init(struct iwl_priv *priv, struct iwl_rx_queue *rxq);
92int iwl4965_hw_nic_init(struct iwl_priv *priv);
93int iwl4965_dump_fh(struct iwl_priv *priv, char **buf, bool display);
94
95/* rx */
96void iwl4965_rx_queue_restock(struct iwl_priv *priv);
97void iwl4965_rx_replenish(struct iwl_priv *priv);
98void iwl4965_rx_replenish_now(struct iwl_priv *priv);
99void iwl4965_rx_queue_free(struct iwl_priv *priv, struct iwl_rx_queue *rxq);
100int iwl4965_rxq_stop(struct iwl_priv *priv);
101int iwl4965_hwrate_to_mac80211_idx(u32 rate_n_flags, enum ieee80211_band band);
102void iwl4965_rx_reply_rx(struct iwl_priv *priv,
103 struct iwl_rx_mem_buffer *rxb);
104void iwl4965_rx_reply_rx_phy(struct iwl_priv *priv,
105 struct iwl_rx_mem_buffer *rxb);
106void iwl4965_rx_handle(struct iwl_priv *priv);
107
108/* tx */
109void iwl4965_hw_txq_free_tfd(struct iwl_priv *priv, struct iwl_tx_queue *txq);
110int iwl4965_hw_txq_attach_buf_to_tfd(struct iwl_priv *priv,
111 struct iwl_tx_queue *txq,
112 dma_addr_t addr, u16 len, u8 reset, u8 pad);
113int iwl4965_hw_tx_queue_init(struct iwl_priv *priv,
114 struct iwl_tx_queue *txq);
115void iwl4965_hwrate_to_tx_control(struct iwl_priv *priv, u32 rate_n_flags,
116 struct ieee80211_tx_info *info);
117int iwl4965_tx_skb(struct iwl_priv *priv, struct sk_buff *skb);
118int iwl4965_tx_agg_start(struct iwl_priv *priv, struct ieee80211_vif *vif,
119 struct ieee80211_sta *sta, u16 tid, u16 *ssn);
120int iwl4965_tx_agg_stop(struct iwl_priv *priv, struct ieee80211_vif *vif,
121 struct ieee80211_sta *sta, u16 tid);
122int iwl4965_txq_check_empty(struct iwl_priv *priv,
123 int sta_id, u8 tid, int txq_id);
124void iwl4965_rx_reply_compressed_ba(struct iwl_priv *priv,
125 struct iwl_rx_mem_buffer *rxb);
126int iwl4965_tx_queue_reclaim(struct iwl_priv *priv, int txq_id, int index);
127void iwl4965_hw_txq_ctx_free(struct iwl_priv *priv);
128int iwl4965_txq_ctx_alloc(struct iwl_priv *priv);
129void iwl4965_txq_ctx_reset(struct iwl_priv *priv);
130void iwl4965_txq_ctx_stop(struct iwl_priv *priv);
131void iwl4965_txq_set_sched(struct iwl_priv *priv, u32 mask);
132
133/*
134 * Acquire priv->lock before calling this function !
135 */
136void iwl4965_set_wr_ptrs(struct iwl_priv *priv, int txq_id, u32 index);
137/**
138 * iwl4965_tx_queue_set_status - (optionally) start Tx/Cmd queue
139 * @tx_fifo_id: Tx DMA/FIFO channel (range 0-7) that the queue will feed
140 * @scd_retry: (1) Indicates queue will be used in aggregation mode
141 *
142 * NOTE: Acquire priv->lock before calling this function !
143 */
144void iwl4965_tx_queue_set_status(struct iwl_priv *priv,
145 struct iwl_tx_queue *txq,
146 int tx_fifo_id, int scd_retry);
147
148static inline u32 iwl4965_tx_status_to_mac80211(u32 status)
149{
150 status &= TX_STATUS_MSK;
151
152 switch (status) {
153 case TX_STATUS_SUCCESS:
154 case TX_STATUS_DIRECT_DONE:
155 return IEEE80211_TX_STAT_ACK;
156 case TX_STATUS_FAIL_DEST_PS:
157 return IEEE80211_TX_STAT_TX_FILTERED;
158 default:
159 return 0;
160 }
161}
162
163static inline bool iwl4965_is_tx_success(u32 status)
164{
165 status &= TX_STATUS_MSK;
166 return (status == TX_STATUS_SUCCESS) ||
167 (status == TX_STATUS_DIRECT_DONE);
168}
169
170u8 iwl4965_toggle_tx_ant(struct iwl_priv *priv, u8 ant_idx, u8 valid);
171
172/* rx */
173void iwl4965_rx_missed_beacon_notif(struct iwl_priv *priv,
174 struct iwl_rx_mem_buffer *rxb);
175bool iwl4965_good_plcp_health(struct iwl_priv *priv,
176 struct iwl_rx_packet *pkt);
177void iwl4965_rx_statistics(struct iwl_priv *priv,
178 struct iwl_rx_mem_buffer *rxb);
179void iwl4965_reply_statistics(struct iwl_priv *priv,
180 struct iwl_rx_mem_buffer *rxb);
181
182/* scan */
183int iwl4965_request_scan(struct iwl_priv *priv, struct ieee80211_vif *vif);
184
185/* station mgmt */
186int iwl4965_manage_ibss_station(struct iwl_priv *priv,
187 struct ieee80211_vif *vif, bool add);
188
189/* hcmd */
190int iwl4965_send_beacon_cmd(struct iwl_priv *priv);
191
192#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
193const char *iwl4965_get_tx_fail_reason(u32 status);
194#else
195static inline const char *
196iwl4965_get_tx_fail_reason(u32 status) { return ""; }
197#endif
198
199/* station management */
200int iwl4965_alloc_bcast_station(struct iwl_priv *priv,
201 struct iwl_rxon_context *ctx);
202int iwl4965_add_bssid_station(struct iwl_priv *priv,
203 struct iwl_rxon_context *ctx,
204 const u8 *addr, u8 *sta_id_r);
205int iwl4965_remove_default_wep_key(struct iwl_priv *priv,
206 struct iwl_rxon_context *ctx,
207 struct ieee80211_key_conf *key);
208int iwl4965_set_default_wep_key(struct iwl_priv *priv,
209 struct iwl_rxon_context *ctx,
210 struct ieee80211_key_conf *key);
211int iwl4965_restore_default_wep_keys(struct iwl_priv *priv,
212 struct iwl_rxon_context *ctx);
213int iwl4965_set_dynamic_key(struct iwl_priv *priv,
214 struct iwl_rxon_context *ctx,
215 struct ieee80211_key_conf *key, u8 sta_id);
216int iwl4965_remove_dynamic_key(struct iwl_priv *priv,
217 struct iwl_rxon_context *ctx,
218 struct ieee80211_key_conf *key, u8 sta_id);
219void iwl4965_update_tkip_key(struct iwl_priv *priv,
220 struct iwl_rxon_context *ctx,
221 struct ieee80211_key_conf *keyconf,
222 struct ieee80211_sta *sta, u32 iv32, u16 *phase1key);
223int iwl4965_sta_tx_modify_enable_tid(struct iwl_priv *priv,
224 int sta_id, int tid);
225int iwl4965_sta_rx_agg_start(struct iwl_priv *priv, struct ieee80211_sta *sta,
226 int tid, u16 ssn);
227int iwl4965_sta_rx_agg_stop(struct iwl_priv *priv, struct ieee80211_sta *sta,
228 int tid);
229void iwl4965_sta_modify_sleep_tx_count(struct iwl_priv *priv,
230 int sta_id, int cnt);
231int iwl4965_update_bcast_stations(struct iwl_priv *priv);
232
233/* rate */
234static inline u32 iwl4965_ant_idx_to_flags(u8 ant_idx)
235{
236 return BIT(ant_idx) << RATE_MCS_ANT_POS;
237}
238
239static inline u8 iwl4965_hw_get_rate(__le32 rate_n_flags)
240{
241 return le32_to_cpu(rate_n_flags) & 0xFF;
242}
243
244static inline __le32 iwl4965_hw_set_rate_n_flags(u8 rate, u32 flags)
245{
246 return cpu_to_le32(flags|(u32)rate);
247}
248
249/* eeprom */
250void iwl4965_eeprom_get_mac(const struct iwl_priv *priv, u8 *mac);
251int iwl4965_eeprom_acquire_semaphore(struct iwl_priv *priv);
252void iwl4965_eeprom_release_semaphore(struct iwl_priv *priv);
253int iwl4965_eeprom_check_version(struct iwl_priv *priv);
254
255/* mac80211 handlers (for 4965) */
256int iwl4965_mac_tx(struct ieee80211_hw *hw, struct sk_buff *skb);
257int iwl4965_mac_start(struct ieee80211_hw *hw);
258void iwl4965_mac_stop(struct ieee80211_hw *hw);
259void iwl4965_configure_filter(struct ieee80211_hw *hw,
260 unsigned int changed_flags,
261 unsigned int *total_flags,
262 u64 multicast);
263int iwl4965_mac_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
264 struct ieee80211_vif *vif, struct ieee80211_sta *sta,
265 struct ieee80211_key_conf *key);
266void iwl4965_mac_update_tkip_key(struct ieee80211_hw *hw,
267 struct ieee80211_vif *vif,
268 struct ieee80211_key_conf *keyconf,
269 struct ieee80211_sta *sta,
270 u32 iv32, u16 *phase1key);
271int iwl4965_mac_ampdu_action(struct ieee80211_hw *hw,
272 struct ieee80211_vif *vif,
273 enum ieee80211_ampdu_mlme_action action,
274 struct ieee80211_sta *sta, u16 tid, u16 *ssn,
275 u8 buf_size);
276int iwl4965_mac_sta_add(struct ieee80211_hw *hw,
277 struct ieee80211_vif *vif,
278 struct ieee80211_sta *sta);
279void iwl4965_mac_channel_switch(struct ieee80211_hw *hw,
280 struct ieee80211_channel_switch *ch_switch);
281
282#endif /* __iwl_4965_h__ */
diff --git a/drivers/net/wireless/iwlegacy/iwl-commands.h b/drivers/net/wireless/iwlegacy/iwl-commands.h
new file mode 100644
index 000000000000..17a1d504348e
--- /dev/null
+++ b/drivers/net/wireless/iwlegacy/iwl-commands.h
@@ -0,0 +1,3405 @@
1/******************************************************************************
2 *
3 * This file is provided under a dual BSD/GPLv2 license. When using or
4 * redistributing this file, you may do so under either license.
5 *
6 * GPL LICENSE SUMMARY
7 *
8 * Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved.
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of version 2 of the GNU General Public License as
12 * published by the Free Software Foundation.
13 *
14 * This program is distributed in the hope that it will be useful, but
15 * WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
22 * USA
23 *
24 * The full GNU General Public License is included in this distribution
25 * in the file called LICENSE.GPL.
26 *
27 * Contact Information:
28 * Intel Linux Wireless <ilw@linux.intel.com>
29 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
30 *
31 * BSD LICENSE
32 *
33 * Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved.
34 * All rights reserved.
35 *
36 * Redistribution and use in source and binary forms, with or without
37 * modification, are permitted provided that the following conditions
38 * are met:
39 *
40 * * Redistributions of source code must retain the above copyright
41 * notice, this list of conditions and the following disclaimer.
42 * * Redistributions in binary form must reproduce the above copyright
43 * notice, this list of conditions and the following disclaimer in
44 * the documentation and/or other materials provided with the
45 * distribution.
46 * * Neither the name Intel Corporation nor the names of its
47 * contributors may be used to endorse or promote products derived
48 * from this software without specific prior written permission.
49 *
50 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
51 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
52 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
53 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
54 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
55 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
56 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
57 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
58 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
59 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
60 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
61 *
62 *****************************************************************************/
63/*
64 * Please use this file (iwl-commands.h) only for uCode API definitions.
65 * Please use iwl-xxxx-hw.h for hardware-related definitions.
66 * Please use iwl-dev.h for driver implementation definitions.
67 */
68
69#ifndef __iwl_legacy_commands_h__
70#define __iwl_legacy_commands_h__
71
72struct iwl_priv;
73
74/* uCode version contains 4 values: Major/Minor/API/Serial */
75#define IWL_UCODE_MAJOR(ver) (((ver) & 0xFF000000) >> 24)
76#define IWL_UCODE_MINOR(ver) (((ver) & 0x00FF0000) >> 16)
77#define IWL_UCODE_API(ver) (((ver) & 0x0000FF00) >> 8)
78#define IWL_UCODE_SERIAL(ver) ((ver) & 0x000000FF)
79
80
81/* Tx rates */
82#define IWL_CCK_RATES 4
83#define IWL_OFDM_RATES 8
84#define IWL_MAX_RATES (IWL_CCK_RATES + IWL_OFDM_RATES)
85
86enum {
87 REPLY_ALIVE = 0x1,
88 REPLY_ERROR = 0x2,
89
90 /* RXON and QOS commands */
91 REPLY_RXON = 0x10,
92 REPLY_RXON_ASSOC = 0x11,
93 REPLY_QOS_PARAM = 0x13,
94 REPLY_RXON_TIMING = 0x14,
95
96 /* Multi-Station support */
97 REPLY_ADD_STA = 0x18,
98 REPLY_REMOVE_STA = 0x19,
99
100 /* Security */
101 REPLY_WEPKEY = 0x20,
102
103 /* RX, TX, LEDs */
104 REPLY_3945_RX = 0x1b, /* 3945 only */
105 REPLY_TX = 0x1c,
106 REPLY_RATE_SCALE = 0x47, /* 3945 only */
107 REPLY_LEDS_CMD = 0x48,
108 REPLY_TX_LINK_QUALITY_CMD = 0x4e, /* for 4965 and up */
109
110 /* 802.11h related */
111 REPLY_CHANNEL_SWITCH = 0x72,
112 CHANNEL_SWITCH_NOTIFICATION = 0x73,
113 REPLY_SPECTRUM_MEASUREMENT_CMD = 0x74,
114 SPECTRUM_MEASURE_NOTIFICATION = 0x75,
115
116 /* Power Management */
117 POWER_TABLE_CMD = 0x77,
118 PM_SLEEP_NOTIFICATION = 0x7A,
119 PM_DEBUG_STATISTIC_NOTIFIC = 0x7B,
120
121 /* Scan commands and notifications */
122 REPLY_SCAN_CMD = 0x80,
123 REPLY_SCAN_ABORT_CMD = 0x81,
124 SCAN_START_NOTIFICATION = 0x82,
125 SCAN_RESULTS_NOTIFICATION = 0x83,
126 SCAN_COMPLETE_NOTIFICATION = 0x84,
127
128 /* IBSS/AP commands */
129 BEACON_NOTIFICATION = 0x90,
130 REPLY_TX_BEACON = 0x91,
131
132 /* Miscellaneous commands */
133 REPLY_TX_PWR_TABLE_CMD = 0x97,
134
135 /* Bluetooth device coexistence config command */
136 REPLY_BT_CONFIG = 0x9b,
137
138 /* Statistics */
139 REPLY_STATISTICS_CMD = 0x9c,
140 STATISTICS_NOTIFICATION = 0x9d,
141
142 /* RF-KILL commands and notifications */
143 CARD_STATE_NOTIFICATION = 0xa1,
144
145 /* Missed beacons notification */
146 MISSED_BEACONS_NOTIFICATION = 0xa2,
147
148 REPLY_CT_KILL_CONFIG_CMD = 0xa4,
149 SENSITIVITY_CMD = 0xa8,
150 REPLY_PHY_CALIBRATION_CMD = 0xb0,
151 REPLY_RX_PHY_CMD = 0xc0,
152 REPLY_RX_MPDU_CMD = 0xc1,
153 REPLY_RX = 0xc3,
154 REPLY_COMPRESSED_BA = 0xc5,
155
156 REPLY_MAX = 0xff
157};
158
159/******************************************************************************
160 * (0)
161 * Commonly used structures and definitions:
162 * Command header, rate_n_flags, txpower
163 *
164 *****************************************************************************/
165
166/* iwl_cmd_header flags value */
167#define IWL_CMD_FAILED_MSK 0x40
168
169#define SEQ_TO_QUEUE(s) (((s) >> 8) & 0x1f)
170#define QUEUE_TO_SEQ(q) (((q) & 0x1f) << 8)
171#define SEQ_TO_INDEX(s) ((s) & 0xff)
172#define INDEX_TO_SEQ(i) ((i) & 0xff)
173#define SEQ_HUGE_FRAME cpu_to_le16(0x4000)
174#define SEQ_RX_FRAME cpu_to_le16(0x8000)
175
176/**
177 * struct iwl_cmd_header
178 *
179 * This header format appears in the beginning of each command sent from the
180 * driver, and each response/notification received from uCode.
181 */
182struct iwl_cmd_header {
183 u8 cmd; /* Command ID: REPLY_RXON, etc. */
184 u8 flags; /* 0:5 reserved, 6 abort, 7 internal */
185 /*
186 * The driver sets up the sequence number to values of its choosing.
187 * uCode does not use this value, but passes it back to the driver
188 * when sending the response to each driver-originated command, so
189 * the driver can match the response to the command. Since the values
190 * don't get used by uCode, the driver may set up an arbitrary format.
191 *
192 * There is one exception: uCode sets bit 15 when it originates
193 * the response/notification, i.e. when the response/notification
194 * is not a direct response to a command sent by the driver. For
195 * example, uCode issues REPLY_3945_RX when it sends a received frame
196 * to the driver; it is not a direct response to any driver command.
197 *
198 * The Linux driver uses the following format:
199 *
200 * 0:7 tfd index - position within TX queue
201 * 8:12 TX queue id
202 * 13 reserved
203 * 14 huge - driver sets this to indicate command is in the
204 * 'huge' storage at the end of the command buffers
205 * 15 unsolicited RX or uCode-originated notification
206 */
207 __le16 sequence;
208
209 /* command or response/notification data follows immediately */
210 u8 data[0];
211} __packed;
212
213
214/**
215 * struct iwl3945_tx_power
216 *
217 * Used in REPLY_TX_PWR_TABLE_CMD, REPLY_SCAN_CMD, REPLY_CHANNEL_SWITCH
218 *
219 * Each entry contains two values:
220 * 1) DSP gain (or sometimes called DSP attenuation). This is a fine-grained
221 * linear value that multiplies the output of the digital signal processor,
222 * before being sent to the analog radio.
223 * 2) Radio gain. This sets the analog gain of the radio Tx path.
224 * It is a coarser setting, and behaves in a logarithmic (dB) fashion.
225 *
226 * Driver obtains values from struct iwl3945_tx_power power_gain_table[][].
227 */
228struct iwl3945_tx_power {
229 u8 tx_gain; /* gain for analog radio */
230 u8 dsp_atten; /* gain for DSP */
231} __packed;
232
233/**
234 * struct iwl3945_power_per_rate
235 *
236 * Used in REPLY_TX_PWR_TABLE_CMD, REPLY_CHANNEL_SWITCH
237 */
238struct iwl3945_power_per_rate {
239 u8 rate; /* plcp */
240 struct iwl3945_tx_power tpc;
241 u8 reserved;
242} __packed;
243
244/**
245 * iwl4965 rate_n_flags bit fields
246 *
247 * rate_n_flags format is used in following iwl4965 commands:
248 * REPLY_RX (response only)
249 * REPLY_RX_MPDU (response only)
250 * REPLY_TX (both command and response)
251 * REPLY_TX_LINK_QUALITY_CMD
252 *
253 * High-throughput (HT) rate format for bits 7:0 (bit 8 must be "1"):
254 * 2-0: 0) 6 Mbps
255 * 1) 12 Mbps
256 * 2) 18 Mbps
257 * 3) 24 Mbps
258 * 4) 36 Mbps
259 * 5) 48 Mbps
260 * 6) 54 Mbps
261 * 7) 60 Mbps
262 *
263 * 4-3: 0) Single stream (SISO)
264 * 1) Dual stream (MIMO)
265 * 2) Triple stream (MIMO)
266 *
267 * 5: Value of 0x20 in bits 7:0 indicates 6 Mbps HT40 duplicate data
268 *
269 * Legacy OFDM rate format for bits 7:0 (bit 8 must be "0", bit 9 "0"):
270 * 3-0: 0xD) 6 Mbps
271 * 0xF) 9 Mbps
272 * 0x5) 12 Mbps
273 * 0x7) 18 Mbps
274 * 0x9) 24 Mbps
275 * 0xB) 36 Mbps
276 * 0x1) 48 Mbps
277 * 0x3) 54 Mbps
278 *
279 * Legacy CCK rate format for bits 7:0 (bit 8 must be "0", bit 9 "1"):
280 * 6-0: 10) 1 Mbps
281 * 20) 2 Mbps
282 * 55) 5.5 Mbps
283 * 110) 11 Mbps
284 */
285#define RATE_MCS_CODE_MSK 0x7
286#define RATE_MCS_SPATIAL_POS 3
287#define RATE_MCS_SPATIAL_MSK 0x18
288#define RATE_MCS_HT_DUP_POS 5
289#define RATE_MCS_HT_DUP_MSK 0x20
290
291/* Bit 8: (1) HT format, (0) legacy format in bits 7:0 */
292#define RATE_MCS_FLAGS_POS 8
293#define RATE_MCS_HT_POS 8
294#define RATE_MCS_HT_MSK 0x100
295
296/* Bit 9: (1) CCK, (0) OFDM. HT (bit 8) must be "0" for this bit to be valid */
297#define RATE_MCS_CCK_POS 9
298#define RATE_MCS_CCK_MSK 0x200
299
300/* Bit 10: (1) Use Green Field preamble */
301#define RATE_MCS_GF_POS 10
302#define RATE_MCS_GF_MSK 0x400
303
304/* Bit 11: (1) Use 40Mhz HT40 chnl width, (0) use 20 MHz legacy chnl width */
305#define RATE_MCS_HT40_POS 11
306#define RATE_MCS_HT40_MSK 0x800
307
308/* Bit 12: (1) Duplicate data on both 20MHz chnls. HT40 (bit 11) must be set. */
309#define RATE_MCS_DUP_POS 12
310#define RATE_MCS_DUP_MSK 0x1000
311
312/* Bit 13: (1) Short guard interval (0.4 usec), (0) normal GI (0.8 usec) */
313#define RATE_MCS_SGI_POS 13
314#define RATE_MCS_SGI_MSK 0x2000
315
316/**
317 * rate_n_flags Tx antenna masks
318 * 4965 has 2 transmitters
319 * bit14:16
320 */
321#define RATE_MCS_ANT_POS 14
322#define RATE_MCS_ANT_A_MSK 0x04000
323#define RATE_MCS_ANT_B_MSK 0x08000
324#define RATE_MCS_ANT_C_MSK 0x10000
325#define RATE_MCS_ANT_AB_MSK (RATE_MCS_ANT_A_MSK | RATE_MCS_ANT_B_MSK)
326#define RATE_MCS_ANT_ABC_MSK (RATE_MCS_ANT_AB_MSK | RATE_MCS_ANT_C_MSK)
327#define RATE_ANT_NUM 3
328
329#define POWER_TABLE_NUM_ENTRIES 33
330#define POWER_TABLE_NUM_HT_OFDM_ENTRIES 32
331#define POWER_TABLE_CCK_ENTRY 32
332
333#define IWL_PWR_NUM_HT_OFDM_ENTRIES 24
334#define IWL_PWR_CCK_ENTRIES 2
335
336/**
337 * union iwl4965_tx_power_dual_stream
338 *
339 * Host format used for REPLY_TX_PWR_TABLE_CMD, REPLY_CHANNEL_SWITCH
340 * Use __le32 version (struct tx_power_dual_stream) when building command.
341 *
342 * Driver provides radio gain and DSP attenuation settings to device in pairs,
343 * one value for each transmitter chain. The first value is for transmitter A,
344 * second for transmitter B.
345 *
346 * For SISO bit rates, both values in a pair should be identical.
347 * For MIMO rates, one value may be different from the other,
348 * in order to balance the Tx output between the two transmitters.
349 *
350 * See more details in doc for TXPOWER in iwl-4965-hw.h.
351 */
352union iwl4965_tx_power_dual_stream {
353 struct {
354 u8 radio_tx_gain[2];
355 u8 dsp_predis_atten[2];
356 } s;
357 u32 dw;
358};
359
360/**
361 * struct tx_power_dual_stream
362 *
363 * Table entries in REPLY_TX_PWR_TABLE_CMD, REPLY_CHANNEL_SWITCH
364 *
365 * Same format as iwl_tx_power_dual_stream, but __le32
366 */
367struct tx_power_dual_stream {
368 __le32 dw;
369} __packed;
370
371/**
372 * struct iwl4965_tx_power_db
373 *
374 * Entire table within REPLY_TX_PWR_TABLE_CMD, REPLY_CHANNEL_SWITCH
375 */
376struct iwl4965_tx_power_db {
377 struct tx_power_dual_stream power_tbl[POWER_TABLE_NUM_ENTRIES];
378} __packed;
379
380/******************************************************************************
381 * (0a)
382 * Alive and Error Commands & Responses:
383 *
384 *****************************************************************************/
385
386#define UCODE_VALID_OK cpu_to_le32(0x1)
387#define INITIALIZE_SUBTYPE (9)
388
389/*
390 * ("Initialize") REPLY_ALIVE = 0x1 (response only, not a command)
391 *
392 * uCode issues this "initialize alive" notification once the initialization
393 * uCode image has completed its work, and is ready to load the runtime image.
394 * This is the *first* "alive" notification that the driver will receive after
395 * rebooting uCode; the "initialize" alive is indicated by subtype field == 9.
396 *
397 * See comments documenting "BSM" (bootstrap state machine).
398 *
399 * For 4965, this notification contains important calibration data for
400 * calculating txpower settings:
401 *
402 * 1) Power supply voltage indication. The voltage sensor outputs higher
403 * values for lower voltage, and vice verse.
404 *
405 * 2) Temperature measurement parameters, for each of two channel widths
406 * (20 MHz and 40 MHz) supported by the radios. Temperature sensing
407 * is done via one of the receiver chains, and channel width influences
408 * the results.
409 *
410 * 3) Tx gain compensation to balance 4965's 2 Tx chains for MIMO operation,
411 * for each of 5 frequency ranges.
412 */
413struct iwl_init_alive_resp {
414 u8 ucode_minor;
415 u8 ucode_major;
416 __le16 reserved1;
417 u8 sw_rev[8];
418 u8 ver_type;
419 u8 ver_subtype; /* "9" for initialize alive */
420 __le16 reserved2;
421 __le32 log_event_table_ptr;
422 __le32 error_event_table_ptr;
423 __le32 timestamp;
424 __le32 is_valid;
425
426 /* calibration values from "initialize" uCode */
427 __le32 voltage; /* signed, higher value is lower voltage */
428 __le32 therm_r1[2]; /* signed, 1st for normal, 2nd for HT40 */
429 __le32 therm_r2[2]; /* signed */
430 __le32 therm_r3[2]; /* signed */
431 __le32 therm_r4[2]; /* signed */
432 __le32 tx_atten[5][2]; /* signed MIMO gain comp, 5 freq groups,
433 * 2 Tx chains */
434} __packed;
435
436
437/**
438 * REPLY_ALIVE = 0x1 (response only, not a command)
439 *
440 * uCode issues this "alive" notification once the runtime image is ready
441 * to receive commands from the driver. This is the *second* "alive"
442 * notification that the driver will receive after rebooting uCode;
443 * this "alive" is indicated by subtype field != 9.
444 *
445 * See comments documenting "BSM" (bootstrap state machine).
446 *
447 * This response includes two pointers to structures within the device's
448 * data SRAM (access via HBUS_TARG_MEM_* regs) that are useful for debugging:
449 *
450 * 1) log_event_table_ptr indicates base of the event log. This traces
451 * a 256-entry history of uCode execution within a circular buffer.
452 * Its header format is:
453 *
454 * __le32 log_size; log capacity (in number of entries)
455 * __le32 type; (1) timestamp with each entry, (0) no timestamp
456 * __le32 wraps; # times uCode has wrapped to top of circular buffer
457 * __le32 write_index; next circular buffer entry that uCode would fill
458 *
459 * The header is followed by the circular buffer of log entries. Entries
460 * with timestamps have the following format:
461 *
462 * __le32 event_id; range 0 - 1500
463 * __le32 timestamp; low 32 bits of TSF (of network, if associated)
464 * __le32 data; event_id-specific data value
465 *
466 * Entries without timestamps contain only event_id and data.
467 *
468 *
469 * 2) error_event_table_ptr indicates base of the error log. This contains
470 * information about any uCode error that occurs. For 4965, the format
471 * of the error log is:
472 *
473 * __le32 valid; (nonzero) valid, (0) log is empty
474 * __le32 error_id; type of error
475 * __le32 pc; program counter
476 * __le32 blink1; branch link
477 * __le32 blink2; branch link
478 * __le32 ilink1; interrupt link
479 * __le32 ilink2; interrupt link
480 * __le32 data1; error-specific data
481 * __le32 data2; error-specific data
482 * __le32 line; source code line of error
483 * __le32 bcon_time; beacon timer
484 * __le32 tsf_low; network timestamp function timer
485 * __le32 tsf_hi; network timestamp function timer
486 * __le32 gp1; GP1 timer register
487 * __le32 gp2; GP2 timer register
488 * __le32 gp3; GP3 timer register
489 * __le32 ucode_ver; uCode version
490 * __le32 hw_ver; HW Silicon version
491 * __le32 brd_ver; HW board version
492 * __le32 log_pc; log program counter
493 * __le32 frame_ptr; frame pointer
494 * __le32 stack_ptr; stack pointer
495 * __le32 hcmd; last host command
496 * __le32 isr0; isr status register LMPM_NIC_ISR0: rxtx_flag
497 * __le32 isr1; isr status register LMPM_NIC_ISR1: host_flag
498 * __le32 isr2; isr status register LMPM_NIC_ISR2: enc_flag
499 * __le32 isr3; isr status register LMPM_NIC_ISR3: time_flag
500 * __le32 isr4; isr status register LMPM_NIC_ISR4: wico interrupt
501 * __le32 isr_pref; isr status register LMPM_NIC_PREF_STAT
502 * __le32 wait_event; wait event() caller address
503 * __le32 l2p_control; L2pControlField
504 * __le32 l2p_duration; L2pDurationField
505 * __le32 l2p_mhvalid; L2pMhValidBits
506 * __le32 l2p_addr_match; L2pAddrMatchStat
507 * __le32 lmpm_pmg_sel; indicate which clocks are turned on (LMPM_PMG_SEL)
508 * __le32 u_timestamp; indicate when the date and time of the compilation
509 * __le32 reserved;
510 *
511 * The Linux driver can print both logs to the system log when a uCode error
512 * occurs.
513 */
514struct iwl_alive_resp {
515 u8 ucode_minor;
516 u8 ucode_major;
517 __le16 reserved1;
518 u8 sw_rev[8];
519 u8 ver_type;
520 u8 ver_subtype; /* not "9" for runtime alive */
521 __le16 reserved2;
522 __le32 log_event_table_ptr; /* SRAM address for event log */
523 __le32 error_event_table_ptr; /* SRAM address for error log */
524 __le32 timestamp;
525 __le32 is_valid;
526} __packed;
527
528/*
529 * REPLY_ERROR = 0x2 (response only, not a command)
530 */
531struct iwl_error_resp {
532 __le32 error_type;
533 u8 cmd_id;
534 u8 reserved1;
535 __le16 bad_cmd_seq_num;
536 __le32 error_info;
537 __le64 timestamp;
538} __packed;
539
540/******************************************************************************
541 * (1)
542 * RXON Commands & Responses:
543 *
544 *****************************************************************************/
545
546/*
547 * Rx config defines & structure
548 */
549/* rx_config device types */
550enum {
551 RXON_DEV_TYPE_AP = 1,
552 RXON_DEV_TYPE_ESS = 3,
553 RXON_DEV_TYPE_IBSS = 4,
554 RXON_DEV_TYPE_SNIFFER = 6,
555};
556
557
558#define RXON_RX_CHAIN_DRIVER_FORCE_MSK cpu_to_le16(0x1 << 0)
559#define RXON_RX_CHAIN_DRIVER_FORCE_POS (0)
560#define RXON_RX_CHAIN_VALID_MSK cpu_to_le16(0x7 << 1)
561#define RXON_RX_CHAIN_VALID_POS (1)
562#define RXON_RX_CHAIN_FORCE_SEL_MSK cpu_to_le16(0x7 << 4)
563#define RXON_RX_CHAIN_FORCE_SEL_POS (4)
564#define RXON_RX_CHAIN_FORCE_MIMO_SEL_MSK cpu_to_le16(0x7 << 7)
565#define RXON_RX_CHAIN_FORCE_MIMO_SEL_POS (7)
566#define RXON_RX_CHAIN_CNT_MSK cpu_to_le16(0x3 << 10)
567#define RXON_RX_CHAIN_CNT_POS (10)
568#define RXON_RX_CHAIN_MIMO_CNT_MSK cpu_to_le16(0x3 << 12)
569#define RXON_RX_CHAIN_MIMO_CNT_POS (12)
570#define RXON_RX_CHAIN_MIMO_FORCE_MSK cpu_to_le16(0x1 << 14)
571#define RXON_RX_CHAIN_MIMO_FORCE_POS (14)
572
573/* rx_config flags */
574/* band & modulation selection */
575#define RXON_FLG_BAND_24G_MSK cpu_to_le32(1 << 0)
576#define RXON_FLG_CCK_MSK cpu_to_le32(1 << 1)
577/* auto detection enable */
578#define RXON_FLG_AUTO_DETECT_MSK cpu_to_le32(1 << 2)
579/* TGg protection when tx */
580#define RXON_FLG_TGG_PROTECT_MSK cpu_to_le32(1 << 3)
581/* cck short slot & preamble */
582#define RXON_FLG_SHORT_SLOT_MSK cpu_to_le32(1 << 4)
583#define RXON_FLG_SHORT_PREAMBLE_MSK cpu_to_le32(1 << 5)
584/* antenna selection */
585#define RXON_FLG_DIS_DIV_MSK cpu_to_le32(1 << 7)
586#define RXON_FLG_ANT_SEL_MSK cpu_to_le32(0x0f00)
587#define RXON_FLG_ANT_A_MSK cpu_to_le32(1 << 8)
588#define RXON_FLG_ANT_B_MSK cpu_to_le32(1 << 9)
589/* radar detection enable */
590#define RXON_FLG_RADAR_DETECT_MSK cpu_to_le32(1 << 12)
591#define RXON_FLG_TGJ_NARROW_BAND_MSK cpu_to_le32(1 << 13)
592/* rx response to host with 8-byte TSF
593* (according to ON_AIR deassertion) */
594#define RXON_FLG_TSF2HOST_MSK cpu_to_le32(1 << 15)
595
596
597/* HT flags */
598#define RXON_FLG_CTRL_CHANNEL_LOC_POS (22)
599#define RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK cpu_to_le32(0x1 << 22)
600
601#define RXON_FLG_HT_OPERATING_MODE_POS (23)
602
603#define RXON_FLG_HT_PROT_MSK cpu_to_le32(0x1 << 23)
604#define RXON_FLG_HT40_PROT_MSK cpu_to_le32(0x2 << 23)
605
606#define RXON_FLG_CHANNEL_MODE_POS (25)
607#define RXON_FLG_CHANNEL_MODE_MSK cpu_to_le32(0x3 << 25)
608
609/* channel mode */
610enum {
611 CHANNEL_MODE_LEGACY = 0,
612 CHANNEL_MODE_PURE_40 = 1,
613 CHANNEL_MODE_MIXED = 2,
614 CHANNEL_MODE_RESERVED = 3,
615};
616#define RXON_FLG_CHANNEL_MODE_LEGACY \
617 cpu_to_le32(CHANNEL_MODE_LEGACY << RXON_FLG_CHANNEL_MODE_POS)
618#define RXON_FLG_CHANNEL_MODE_PURE_40 \
619 cpu_to_le32(CHANNEL_MODE_PURE_40 << RXON_FLG_CHANNEL_MODE_POS)
620#define RXON_FLG_CHANNEL_MODE_MIXED \
621 cpu_to_le32(CHANNEL_MODE_MIXED << RXON_FLG_CHANNEL_MODE_POS)
622
623/* CTS to self (if spec allows) flag */
624#define RXON_FLG_SELF_CTS_EN cpu_to_le32(0x1<<30)
625
626/* rx_config filter flags */
627/* accept all data frames */
628#define RXON_FILTER_PROMISC_MSK cpu_to_le32(1 << 0)
629/* pass control & management to host */
630#define RXON_FILTER_CTL2HOST_MSK cpu_to_le32(1 << 1)
631/* accept multi-cast */
632#define RXON_FILTER_ACCEPT_GRP_MSK cpu_to_le32(1 << 2)
633/* don't decrypt uni-cast frames */
634#define RXON_FILTER_DIS_DECRYPT_MSK cpu_to_le32(1 << 3)
635/* don't decrypt multi-cast frames */
636#define RXON_FILTER_DIS_GRP_DECRYPT_MSK cpu_to_le32(1 << 4)
637/* STA is associated */
638#define RXON_FILTER_ASSOC_MSK cpu_to_le32(1 << 5)
639/* transfer to host non bssid beacons in associated state */
640#define RXON_FILTER_BCON_AWARE_MSK cpu_to_le32(1 << 6)
641
642/**
643 * REPLY_RXON = 0x10 (command, has simple generic response)
644 *
645 * RXON tunes the radio tuner to a service channel, and sets up a number
646 * of parameters that are used primarily for Rx, but also for Tx operations.
647 *
648 * NOTE: When tuning to a new channel, driver must set the
649 * RXON_FILTER_ASSOC_MSK to 0. This will clear station-dependent
650 * info within the device, including the station tables, tx retry
651 * rate tables, and txpower tables. Driver must build a new station
652 * table and txpower table before transmitting anything on the RXON
653 * channel.
654 *
655 * NOTE: All RXONs wipe clean the internal txpower table. Driver must
656 * issue a new REPLY_TX_PWR_TABLE_CMD after each REPLY_RXON (0x10),
657 * regardless of whether RXON_FILTER_ASSOC_MSK is set.
658 */
659
660struct iwl3945_rxon_cmd {
661 u8 node_addr[6];
662 __le16 reserved1;
663 u8 bssid_addr[6];
664 __le16 reserved2;
665 u8 wlap_bssid_addr[6];
666 __le16 reserved3;
667 u8 dev_type;
668 u8 air_propagation;
669 __le16 reserved4;
670 u8 ofdm_basic_rates;
671 u8 cck_basic_rates;
672 __le16 assoc_id;
673 __le32 flags;
674 __le32 filter_flags;
675 __le16 channel;
676 __le16 reserved5;
677} __packed;
678
679struct iwl4965_rxon_cmd {
680 u8 node_addr[6];
681 __le16 reserved1;
682 u8 bssid_addr[6];
683 __le16 reserved2;
684 u8 wlap_bssid_addr[6];
685 __le16 reserved3;
686 u8 dev_type;
687 u8 air_propagation;
688 __le16 rx_chain;
689 u8 ofdm_basic_rates;
690 u8 cck_basic_rates;
691 __le16 assoc_id;
692 __le32 flags;
693 __le32 filter_flags;
694 __le16 channel;
695 u8 ofdm_ht_single_stream_basic_rates;
696 u8 ofdm_ht_dual_stream_basic_rates;
697} __packed;
698
699/* Create a common rxon cmd which will be typecast into the 3945 or 4965
700 * specific rxon cmd, depending on where it is called from.
701 */
702struct iwl_legacy_rxon_cmd {
703 u8 node_addr[6];
704 __le16 reserved1;
705 u8 bssid_addr[6];
706 __le16 reserved2;
707 u8 wlap_bssid_addr[6];
708 __le16 reserved3;
709 u8 dev_type;
710 u8 air_propagation;
711 __le16 rx_chain;
712 u8 ofdm_basic_rates;
713 u8 cck_basic_rates;
714 __le16 assoc_id;
715 __le32 flags;
716 __le32 filter_flags;
717 __le16 channel;
718 u8 ofdm_ht_single_stream_basic_rates;
719 u8 ofdm_ht_dual_stream_basic_rates;
720 u8 reserved4;
721 u8 reserved5;
722} __packed;
723
724
725/*
726 * REPLY_RXON_ASSOC = 0x11 (command, has simple generic response)
727 */
728struct iwl3945_rxon_assoc_cmd {
729 __le32 flags;
730 __le32 filter_flags;
731 u8 ofdm_basic_rates;
732 u8 cck_basic_rates;
733 __le16 reserved;
734} __packed;
735
736struct iwl4965_rxon_assoc_cmd {
737 __le32 flags;
738 __le32 filter_flags;
739 u8 ofdm_basic_rates;
740 u8 cck_basic_rates;
741 u8 ofdm_ht_single_stream_basic_rates;
742 u8 ofdm_ht_dual_stream_basic_rates;
743 __le16 rx_chain_select_flags;
744 __le16 reserved;
745} __packed;
746
747#define IWL_CONN_MAX_LISTEN_INTERVAL 10
748#define IWL_MAX_UCODE_BEACON_INTERVAL 4 /* 4096 */
749#define IWL39_MAX_UCODE_BEACON_INTERVAL 1 /* 1024 */
750
751/*
752 * REPLY_RXON_TIMING = 0x14 (command, has simple generic response)
753 */
754struct iwl_rxon_time_cmd {
755 __le64 timestamp;
756 __le16 beacon_interval;
757 __le16 atim_window;
758 __le32 beacon_init_val;
759 __le16 listen_interval;
760 u8 dtim_period;
761 u8 delta_cp_bss_tbtts;
762} __packed;
763
764/*
765 * REPLY_CHANNEL_SWITCH = 0x72 (command, has simple generic response)
766 */
767struct iwl3945_channel_switch_cmd {
768 u8 band;
769 u8 expect_beacon;
770 __le16 channel;
771 __le32 rxon_flags;
772 __le32 rxon_filter_flags;
773 __le32 switch_time;
774 struct iwl3945_power_per_rate power[IWL_MAX_RATES];
775} __packed;
776
777struct iwl4965_channel_switch_cmd {
778 u8 band;
779 u8 expect_beacon;
780 __le16 channel;
781 __le32 rxon_flags;
782 __le32 rxon_filter_flags;
783 __le32 switch_time;
784 struct iwl4965_tx_power_db tx_power;
785} __packed;
786
787/*
788 * CHANNEL_SWITCH_NOTIFICATION = 0x73 (notification only, not a command)
789 */
790struct iwl_csa_notification {
791 __le16 band;
792 __le16 channel;
793 __le32 status; /* 0 - OK, 1 - fail */
794} __packed;
795
796/******************************************************************************
797 * (2)
798 * Quality-of-Service (QOS) Commands & Responses:
799 *
800 *****************************************************************************/
801
802/**
803 * struct iwl_ac_qos -- QOS timing params for REPLY_QOS_PARAM
804 * One for each of 4 EDCA access categories in struct iwl_qosparam_cmd
805 *
806 * @cw_min: Contention window, start value in numbers of slots.
807 * Should be a power-of-2, minus 1. Device's default is 0x0f.
808 * @cw_max: Contention window, max value in numbers of slots.
809 * Should be a power-of-2, minus 1. Device's default is 0x3f.
810 * @aifsn: Number of slots in Arbitration Interframe Space (before
811 * performing random backoff timing prior to Tx). Device default 1.
812 * @edca_txop: Length of Tx opportunity, in uSecs. Device default is 0.
813 *
814 * Device will automatically increase contention window by (2*CW) + 1 for each
815 * transmission retry. Device uses cw_max as a bit mask, ANDed with new CW
816 * value, to cap the CW value.
817 */
818struct iwl_ac_qos {
819 __le16 cw_min;
820 __le16 cw_max;
821 u8 aifsn;
822 u8 reserved1;
823 __le16 edca_txop;
824} __packed;
825
826/* QoS flags defines */
827#define QOS_PARAM_FLG_UPDATE_EDCA_MSK cpu_to_le32(0x01)
828#define QOS_PARAM_FLG_TGN_MSK cpu_to_le32(0x02)
829#define QOS_PARAM_FLG_TXOP_TYPE_MSK cpu_to_le32(0x10)
830
831/* Number of Access Categories (AC) (EDCA), queues 0..3 */
832#define AC_NUM 4
833
834/*
835 * REPLY_QOS_PARAM = 0x13 (command, has simple generic response)
836 *
837 * This command sets up timings for each of the 4 prioritized EDCA Tx FIFOs
838 * 0: Background, 1: Best Effort, 2: Video, 3: Voice.
839 */
840struct iwl_qosparam_cmd {
841 __le32 qos_flags;
842 struct iwl_ac_qos ac[AC_NUM];
843} __packed;
844
845/******************************************************************************
846 * (3)
847 * Add/Modify Stations Commands & Responses:
848 *
849 *****************************************************************************/
850/*
851 * Multi station support
852 */
853
854/* Special, dedicated locations within device's station table */
855#define IWL_AP_ID 0
856#define IWL_STA_ID 2
857#define IWL3945_BROADCAST_ID 24
858#define IWL3945_STATION_COUNT 25
859#define IWL4965_BROADCAST_ID 31
860#define IWL4965_STATION_COUNT 32
861
862#define IWL_STATION_COUNT 32 /* MAX(3945,4965)*/
863#define IWL_INVALID_STATION 255
864
865#define STA_FLG_TX_RATE_MSK cpu_to_le32(1 << 2)
866#define STA_FLG_PWR_SAVE_MSK cpu_to_le32(1 << 8)
867#define STA_FLG_RTS_MIMO_PROT_MSK cpu_to_le32(1 << 17)
868#define STA_FLG_AGG_MPDU_8US_MSK cpu_to_le32(1 << 18)
869#define STA_FLG_MAX_AGG_SIZE_POS (19)
870#define STA_FLG_MAX_AGG_SIZE_MSK cpu_to_le32(3 << 19)
871#define STA_FLG_HT40_EN_MSK cpu_to_le32(1 << 21)
872#define STA_FLG_MIMO_DIS_MSK cpu_to_le32(1 << 22)
873#define STA_FLG_AGG_MPDU_DENSITY_POS (23)
874#define STA_FLG_AGG_MPDU_DENSITY_MSK cpu_to_le32(7 << 23)
875
876/* Use in mode field. 1: modify existing entry, 0: add new station entry */
877#define STA_CONTROL_MODIFY_MSK 0x01
878
879/* key flags __le16*/
880#define STA_KEY_FLG_ENCRYPT_MSK cpu_to_le16(0x0007)
881#define STA_KEY_FLG_NO_ENC cpu_to_le16(0x0000)
882#define STA_KEY_FLG_WEP cpu_to_le16(0x0001)
883#define STA_KEY_FLG_CCMP cpu_to_le16(0x0002)
884#define STA_KEY_FLG_TKIP cpu_to_le16(0x0003)
885
886#define STA_KEY_FLG_KEYID_POS 8
887#define STA_KEY_FLG_INVALID cpu_to_le16(0x0800)
888/* wep key is either from global key (0) or from station info array (1) */
889#define STA_KEY_FLG_MAP_KEY_MSK cpu_to_le16(0x0008)
890
891/* wep key in STA: 5-bytes (0) or 13-bytes (1) */
892#define STA_KEY_FLG_KEY_SIZE_MSK cpu_to_le16(0x1000)
893#define STA_KEY_MULTICAST_MSK cpu_to_le16(0x4000)
894#define STA_KEY_MAX_NUM 8
895
896/* Flags indicate whether to modify vs. don't change various station params */
897#define STA_MODIFY_KEY_MASK 0x01
898#define STA_MODIFY_TID_DISABLE_TX 0x02
899#define STA_MODIFY_TX_RATE_MSK 0x04
900#define STA_MODIFY_ADDBA_TID_MSK 0x08
901#define STA_MODIFY_DELBA_TID_MSK 0x10
902#define STA_MODIFY_SLEEP_TX_COUNT_MSK 0x20
903
904/* Receiver address (actually, Rx station's index into station table),
905 * combined with Traffic ID (QOS priority), in format used by Tx Scheduler */
906#define BUILD_RAxTID(sta_id, tid) (((sta_id) << 4) + (tid))
907
908struct iwl4965_keyinfo {
909 __le16 key_flags;
910 u8 tkip_rx_tsc_byte2; /* TSC[2] for key mix ph1 detection */
911 u8 reserved1;
912 __le16 tkip_rx_ttak[5]; /* 10-byte unicast TKIP TTAK */
913 u8 key_offset;
914 u8 reserved2;
915 u8 key[16]; /* 16-byte unicast decryption key */
916} __packed;
917
918/**
919 * struct sta_id_modify
920 * @addr[ETH_ALEN]: station's MAC address
921 * @sta_id: index of station in uCode's station table
922 * @modify_mask: STA_MODIFY_*, 1: modify, 0: don't change
923 *
924 * Driver selects unused table index when adding new station,
925 * or the index to a pre-existing station entry when modifying that station.
926 * Some indexes have special purposes (IWL_AP_ID, index 0, is for AP).
927 *
928 * modify_mask flags select which parameters to modify vs. leave alone.
929 */
930struct sta_id_modify {
931 u8 addr[ETH_ALEN];
932 __le16 reserved1;
933 u8 sta_id;
934 u8 modify_mask;
935 __le16 reserved2;
936} __packed;
937
938/*
939 * REPLY_ADD_STA = 0x18 (command)
940 *
941 * The device contains an internal table of per-station information,
942 * with info on security keys, aggregation parameters, and Tx rates for
943 * initial Tx attempt and any retries (4965 devices uses
944 * REPLY_TX_LINK_QUALITY_CMD,
945 * 3945 uses REPLY_RATE_SCALE to set up rate tables).
946 *
947 * REPLY_ADD_STA sets up the table entry for one station, either creating
948 * a new entry, or modifying a pre-existing one.
949 *
950 * NOTE: RXON command (without "associated" bit set) wipes the station table
951 * clean. Moving into RF_KILL state does this also. Driver must set up
952 * new station table before transmitting anything on the RXON channel
953 * (except active scans or active measurements; those commands carry
954 * their own txpower/rate setup data).
955 *
956 * When getting started on a new channel, driver must set up the
957 * IWL_BROADCAST_ID entry (last entry in the table). For a client
958 * station in a BSS, once an AP is selected, driver sets up the AP STA
959 * in the IWL_AP_ID entry (1st entry in the table). BROADCAST and AP
960 * are all that are needed for a BSS client station. If the device is
961 * used as AP, or in an IBSS network, driver must set up station table
962 * entries for all STAs in network, starting with index IWL_STA_ID.
963 */
964
965struct iwl3945_addsta_cmd {
966 u8 mode; /* 1: modify existing, 0: add new station */
967 u8 reserved[3];
968 struct sta_id_modify sta;
969 struct iwl4965_keyinfo key;
970 __le32 station_flags; /* STA_FLG_* */
971 __le32 station_flags_msk; /* STA_FLG_* */
972
973 /* bit field to disable (1) or enable (0) Tx for Traffic ID (TID)
974 * corresponding to bit (e.g. bit 5 controls TID 5).
975 * Set modify_mask bit STA_MODIFY_TID_DISABLE_TX to use this field. */
976 __le16 tid_disable_tx;
977
978 __le16 rate_n_flags;
979
980 /* TID for which to add block-ack support.
981 * Set modify_mask bit STA_MODIFY_ADDBA_TID_MSK to use this field. */
982 u8 add_immediate_ba_tid;
983
984 /* TID for which to remove block-ack support.
985 * Set modify_mask bit STA_MODIFY_DELBA_TID_MSK to use this field. */
986 u8 remove_immediate_ba_tid;
987
988 /* Starting Sequence Number for added block-ack support.
989 * Set modify_mask bit STA_MODIFY_ADDBA_TID_MSK to use this field. */
990 __le16 add_immediate_ba_ssn;
991} __packed;
992
993struct iwl4965_addsta_cmd {
994 u8 mode; /* 1: modify existing, 0: add new station */
995 u8 reserved[3];
996 struct sta_id_modify sta;
997 struct iwl4965_keyinfo key;
998 __le32 station_flags; /* STA_FLG_* */
999 __le32 station_flags_msk; /* STA_FLG_* */
1000
1001 /* bit field to disable (1) or enable (0) Tx for Traffic ID (TID)
1002 * corresponding to bit (e.g. bit 5 controls TID 5).
1003 * Set modify_mask bit STA_MODIFY_TID_DISABLE_TX to use this field. */
1004 __le16 tid_disable_tx;
1005
1006 __le16 reserved1;
1007
1008 /* TID for which to add block-ack support.
1009 * Set modify_mask bit STA_MODIFY_ADDBA_TID_MSK to use this field. */
1010 u8 add_immediate_ba_tid;
1011
1012 /* TID for which to remove block-ack support.
1013 * Set modify_mask bit STA_MODIFY_DELBA_TID_MSK to use this field. */
1014 u8 remove_immediate_ba_tid;
1015
1016 /* Starting Sequence Number for added block-ack support.
1017 * Set modify_mask bit STA_MODIFY_ADDBA_TID_MSK to use this field. */
1018 __le16 add_immediate_ba_ssn;
1019
1020 /*
1021 * Number of packets OK to transmit to station even though
1022 * it is asleep -- used to synchronise PS-poll and u-APSD
1023 * responses while ucode keeps track of STA sleep state.
1024 */
1025 __le16 sleep_tx_count;
1026
1027 __le16 reserved2;
1028} __packed;
1029
1030/* Wrapper struct for 3945 and 4965 addsta_cmd structures */
1031struct iwl_legacy_addsta_cmd {
1032 u8 mode; /* 1: modify existing, 0: add new station */
1033 u8 reserved[3];
1034 struct sta_id_modify sta;
1035 struct iwl4965_keyinfo key;
1036 __le32 station_flags; /* STA_FLG_* */
1037 __le32 station_flags_msk; /* STA_FLG_* */
1038
1039 /* bit field to disable (1) or enable (0) Tx for Traffic ID (TID)
1040 * corresponding to bit (e.g. bit 5 controls TID 5).
1041 * Set modify_mask bit STA_MODIFY_TID_DISABLE_TX to use this field. */
1042 __le16 tid_disable_tx;
1043
1044 __le16 rate_n_flags; /* 3945 only */
1045
1046 /* TID for which to add block-ack support.
1047 * Set modify_mask bit STA_MODIFY_ADDBA_TID_MSK to use this field. */
1048 u8 add_immediate_ba_tid;
1049
1050 /* TID for which to remove block-ack support.
1051 * Set modify_mask bit STA_MODIFY_DELBA_TID_MSK to use this field. */
1052 u8 remove_immediate_ba_tid;
1053
1054 /* Starting Sequence Number for added block-ack support.
1055 * Set modify_mask bit STA_MODIFY_ADDBA_TID_MSK to use this field. */
1056 __le16 add_immediate_ba_ssn;
1057
1058 /*
1059 * Number of packets OK to transmit to station even though
1060 * it is asleep -- used to synchronise PS-poll and u-APSD
1061 * responses while ucode keeps track of STA sleep state.
1062 */
1063 __le16 sleep_tx_count;
1064
1065 __le16 reserved2;
1066} __packed;
1067
1068
1069#define ADD_STA_SUCCESS_MSK 0x1
1070#define ADD_STA_NO_ROOM_IN_TABLE 0x2
1071#define ADD_STA_NO_BLOCK_ACK_RESOURCE 0x4
1072#define ADD_STA_MODIFY_NON_EXIST_STA 0x8
1073/*
1074 * REPLY_ADD_STA = 0x18 (response)
1075 */
1076struct iwl_add_sta_resp {
1077 u8 status; /* ADD_STA_* */
1078} __packed;
1079
1080#define REM_STA_SUCCESS_MSK 0x1
1081/*
1082 * REPLY_REM_STA = 0x19 (response)
1083 */
1084struct iwl_rem_sta_resp {
1085 u8 status;
1086} __packed;
1087
1088/*
1089 * REPLY_REM_STA = 0x19 (command)
1090 */
1091struct iwl_rem_sta_cmd {
1092 u8 num_sta; /* number of removed stations */
1093 u8 reserved[3];
1094 u8 addr[ETH_ALEN]; /* MAC addr of the first station */
1095 u8 reserved2[2];
1096} __packed;
1097
1098#define IWL_TX_FIFO_BK_MSK cpu_to_le32(BIT(0))
1099#define IWL_TX_FIFO_BE_MSK cpu_to_le32(BIT(1))
1100#define IWL_TX_FIFO_VI_MSK cpu_to_le32(BIT(2))
1101#define IWL_TX_FIFO_VO_MSK cpu_to_le32(BIT(3))
1102#define IWL_AGG_TX_QUEUE_MSK cpu_to_le32(0xffc00)
1103
1104#define IWL_DROP_SINGLE 0
1105#define IWL_DROP_SELECTED 1
1106#define IWL_DROP_ALL 2
1107
1108/*
1109 * REPLY_WEP_KEY = 0x20
1110 */
1111struct iwl_wep_key {
1112 u8 key_index;
1113 u8 key_offset;
1114 u8 reserved1[2];
1115 u8 key_size;
1116 u8 reserved2[3];
1117 u8 key[16];
1118} __packed;
1119
1120struct iwl_wep_cmd {
1121 u8 num_keys;
1122 u8 global_key_type;
1123 u8 flags;
1124 u8 reserved;
1125 struct iwl_wep_key key[0];
1126} __packed;
1127
1128#define WEP_KEY_WEP_TYPE 1
1129#define WEP_KEYS_MAX 4
1130#define WEP_INVALID_OFFSET 0xff
1131#define WEP_KEY_LEN_64 5
1132#define WEP_KEY_LEN_128 13
1133
1134/******************************************************************************
1135 * (4)
1136 * Rx Responses:
1137 *
1138 *****************************************************************************/
1139
1140#define RX_RES_STATUS_NO_CRC32_ERROR cpu_to_le32(1 << 0)
1141#define RX_RES_STATUS_NO_RXE_OVERFLOW cpu_to_le32(1 << 1)
1142
1143#define RX_RES_PHY_FLAGS_BAND_24_MSK cpu_to_le16(1 << 0)
1144#define RX_RES_PHY_FLAGS_MOD_CCK_MSK cpu_to_le16(1 << 1)
1145#define RX_RES_PHY_FLAGS_SHORT_PREAMBLE_MSK cpu_to_le16(1 << 2)
1146#define RX_RES_PHY_FLAGS_NARROW_BAND_MSK cpu_to_le16(1 << 3)
1147#define RX_RES_PHY_FLAGS_ANTENNA_MSK 0xf0
1148#define RX_RES_PHY_FLAGS_ANTENNA_POS 4
1149
1150#define RX_RES_STATUS_SEC_TYPE_MSK (0x7 << 8)
1151#define RX_RES_STATUS_SEC_TYPE_NONE (0x0 << 8)
1152#define RX_RES_STATUS_SEC_TYPE_WEP (0x1 << 8)
1153#define RX_RES_STATUS_SEC_TYPE_CCMP (0x2 << 8)
1154#define RX_RES_STATUS_SEC_TYPE_TKIP (0x3 << 8)
1155#define RX_RES_STATUS_SEC_TYPE_ERR (0x7 << 8)
1156
1157#define RX_RES_STATUS_STATION_FOUND (1<<6)
1158#define RX_RES_STATUS_NO_STATION_INFO_MISMATCH (1<<7)
1159
1160#define RX_RES_STATUS_DECRYPT_TYPE_MSK (0x3 << 11)
1161#define RX_RES_STATUS_NOT_DECRYPT (0x0 << 11)
1162#define RX_RES_STATUS_DECRYPT_OK (0x3 << 11)
1163#define RX_RES_STATUS_BAD_ICV_MIC (0x1 << 11)
1164#define RX_RES_STATUS_BAD_KEY_TTAK (0x2 << 11)
1165
1166#define RX_MPDU_RES_STATUS_ICV_OK (0x20)
1167#define RX_MPDU_RES_STATUS_MIC_OK (0x40)
1168#define RX_MPDU_RES_STATUS_TTAK_OK (1 << 7)
1169#define RX_MPDU_RES_STATUS_DEC_DONE_MSK (0x800)
1170
1171
1172struct iwl3945_rx_frame_stats {
1173 u8 phy_count;
1174 u8 id;
1175 u8 rssi;
1176 u8 agc;
1177 __le16 sig_avg;
1178 __le16 noise_diff;
1179 u8 payload[0];
1180} __packed;
1181
1182struct iwl3945_rx_frame_hdr {
1183 __le16 channel;
1184 __le16 phy_flags;
1185 u8 reserved1;
1186 u8 rate;
1187 __le16 len;
1188 u8 payload[0];
1189} __packed;
1190
1191struct iwl3945_rx_frame_end {
1192 __le32 status;
1193 __le64 timestamp;
1194 __le32 beacon_timestamp;
1195} __packed;
1196
1197/*
1198 * REPLY_3945_RX = 0x1b (response only, not a command)
1199 *
1200 * NOTE: DO NOT dereference from casts to this structure
1201 * It is provided only for calculating minimum data set size.
1202 * The actual offsets of the hdr and end are dynamic based on
1203 * stats.phy_count
1204 */
1205struct iwl3945_rx_frame {
1206 struct iwl3945_rx_frame_stats stats;
1207 struct iwl3945_rx_frame_hdr hdr;
1208 struct iwl3945_rx_frame_end end;
1209} __packed;
1210
1211#define IWL39_RX_FRAME_SIZE (4 + sizeof(struct iwl3945_rx_frame))
1212
1213/* Fixed (non-configurable) rx data from phy */
1214
1215#define IWL49_RX_RES_PHY_CNT 14
1216#define IWL49_RX_PHY_FLAGS_ANTENNAE_OFFSET (4)
1217#define IWL49_RX_PHY_FLAGS_ANTENNAE_MASK (0x70)
1218#define IWL49_AGC_DB_MASK (0x3f80) /* MASK(7,13) */
1219#define IWL49_AGC_DB_POS (7)
1220struct iwl4965_rx_non_cfg_phy {
1221 __le16 ant_selection; /* ant A bit 4, ant B bit 5, ant C bit 6 */
1222 __le16 agc_info; /* agc code 0:6, agc dB 7:13, reserved 14:15 */
1223 u8 rssi_info[6]; /* we use even entries, 0/2/4 for A/B/C rssi */
1224 u8 pad[0];
1225} __packed;
1226
1227
1228/*
1229 * REPLY_RX = 0xc3 (response only, not a command)
1230 * Used only for legacy (non 11n) frames.
1231 */
1232struct iwl_rx_phy_res {
1233 u8 non_cfg_phy_cnt; /* non configurable DSP phy data byte count */
1234 u8 cfg_phy_cnt; /* configurable DSP phy data byte count */
1235 u8 stat_id; /* configurable DSP phy data set ID */
1236 u8 reserved1;
1237 __le64 timestamp; /* TSF at on air rise */
1238 __le32 beacon_time_stamp; /* beacon at on-air rise */
1239 __le16 phy_flags; /* general phy flags: band, modulation, ... */
1240 __le16 channel; /* channel number */
1241 u8 non_cfg_phy_buf[32]; /* for various implementations of non_cfg_phy */
1242 __le32 rate_n_flags; /* RATE_MCS_* */
1243 __le16 byte_count; /* frame's byte-count */
1244 __le16 frame_time; /* frame's time on the air */
1245} __packed;
1246
1247struct iwl_rx_mpdu_res_start {
1248 __le16 byte_count;
1249 __le16 reserved;
1250} __packed;
1251
1252
1253/******************************************************************************
1254 * (5)
1255 * Tx Commands & Responses:
1256 *
1257 * Driver must place each REPLY_TX command into one of the prioritized Tx
1258 * queues in host DRAM, shared between driver and device (see comments for
1259 * SCD registers and Tx/Rx Queues). When the device's Tx scheduler and uCode
1260 * are preparing to transmit, the device pulls the Tx command over the PCI
1261 * bus via one of the device's Tx DMA channels, to fill an internal FIFO
1262 * from which data will be transmitted.
1263 *
1264 * uCode handles all timing and protocol related to control frames
1265 * (RTS/CTS/ACK), based on flags in the Tx command. uCode and Tx scheduler
1266 * handle reception of block-acks; uCode updates the host driver via
1267 * REPLY_COMPRESSED_BA.
1268 *
1269 * uCode handles retrying Tx when an ACK is expected but not received.
1270 * This includes trying lower data rates than the one requested in the Tx
1271 * command, as set up by the REPLY_RATE_SCALE (for 3945) or
1272 * REPLY_TX_LINK_QUALITY_CMD (4965).
1273 *
1274 * Driver sets up transmit power for various rates via REPLY_TX_PWR_TABLE_CMD.
1275 * This command must be executed after every RXON command, before Tx can occur.
1276 *****************************************************************************/
1277
1278/* REPLY_TX Tx flags field */
1279
1280/*
1281 * 1: Use Request-To-Send protocol before this frame.
1282 * Mutually exclusive vs. TX_CMD_FLG_CTS_MSK.
1283 */
1284#define TX_CMD_FLG_RTS_MSK cpu_to_le32(1 << 1)
1285
1286/*
1287 * 1: Transmit Clear-To-Send to self before this frame.
1288 * Driver should set this for AUTH/DEAUTH/ASSOC-REQ/REASSOC mgmnt frames.
1289 * Mutually exclusive vs. TX_CMD_FLG_RTS_MSK.
1290 */
1291#define TX_CMD_FLG_CTS_MSK cpu_to_le32(1 << 2)
1292
1293/* 1: Expect ACK from receiving station
1294 * 0: Don't expect ACK (MAC header's duration field s/b 0)
1295 * Set this for unicast frames, but not broadcast/multicast. */
1296#define TX_CMD_FLG_ACK_MSK cpu_to_le32(1 << 3)
1297
1298/* For 4965 devices:
1299 * 1: Use rate scale table (see REPLY_TX_LINK_QUALITY_CMD).
1300 * Tx command's initial_rate_index indicates first rate to try;
1301 * uCode walks through table for additional Tx attempts.
1302 * 0: Use Tx rate/MCS from Tx command's rate_n_flags field.
1303 * This rate will be used for all Tx attempts; it will not be scaled. */
1304#define TX_CMD_FLG_STA_RATE_MSK cpu_to_le32(1 << 4)
1305
1306/* 1: Expect immediate block-ack.
1307 * Set when Txing a block-ack request frame. Also set TX_CMD_FLG_ACK_MSK. */
1308#define TX_CMD_FLG_IMM_BA_RSP_MASK cpu_to_le32(1 << 6)
1309
1310/*
1311 * 1: Frame requires full Tx-Op protection.
1312 * Set this if either RTS or CTS Tx Flag gets set.
1313 */
1314#define TX_CMD_FLG_FULL_TXOP_PROT_MSK cpu_to_le32(1 << 7)
1315
1316/* Tx antenna selection field; used only for 3945, reserved (0) for 4965 devices.
1317 * Set field to "0" to allow 3945 uCode to select antenna (normal usage). */
1318#define TX_CMD_FLG_ANT_SEL_MSK cpu_to_le32(0xf00)
1319#define TX_CMD_FLG_ANT_A_MSK cpu_to_le32(1 << 8)
1320#define TX_CMD_FLG_ANT_B_MSK cpu_to_le32(1 << 9)
1321
1322/* 1: uCode overrides sequence control field in MAC header.
1323 * 0: Driver provides sequence control field in MAC header.
1324 * Set this for management frames, non-QOS data frames, non-unicast frames,
1325 * and also in Tx command embedded in REPLY_SCAN_CMD for active scans. */
1326#define TX_CMD_FLG_SEQ_CTL_MSK cpu_to_le32(1 << 13)
1327
1328/* 1: This frame is non-last MPDU; more fragments are coming.
1329 * 0: Last fragment, or not using fragmentation. */
1330#define TX_CMD_FLG_MORE_FRAG_MSK cpu_to_le32(1 << 14)
1331
1332/* 1: uCode calculates and inserts Timestamp Function (TSF) in outgoing frame.
1333 * 0: No TSF required in outgoing frame.
1334 * Set this for transmitting beacons and probe responses. */
1335#define TX_CMD_FLG_TSF_MSK cpu_to_le32(1 << 16)
1336
1337/* 1: Driver inserted 2 bytes pad after the MAC header, for (required) dword
1338 * alignment of frame's payload data field.
1339 * 0: No pad
1340 * Set this for MAC headers with 26 or 30 bytes, i.e. those with QOS or ADDR4
1341 * field (but not both). Driver must align frame data (i.e. data following
1342 * MAC header) to DWORD boundary. */
1343#define TX_CMD_FLG_MH_PAD_MSK cpu_to_le32(1 << 20)
1344
1345/* accelerate aggregation support
1346 * 0 - no CCMP encryption; 1 - CCMP encryption */
1347#define TX_CMD_FLG_AGG_CCMP_MSK cpu_to_le32(1 << 22)
1348
1349/* HCCA-AP - disable duration overwriting. */
1350#define TX_CMD_FLG_DUR_MSK cpu_to_le32(1 << 25)
1351
1352
1353/*
1354 * TX command security control
1355 */
1356#define TX_CMD_SEC_WEP 0x01
1357#define TX_CMD_SEC_CCM 0x02
1358#define TX_CMD_SEC_TKIP 0x03
1359#define TX_CMD_SEC_MSK 0x03
1360#define TX_CMD_SEC_SHIFT 6
1361#define TX_CMD_SEC_KEY128 0x08
1362
1363/*
1364 * security overhead sizes
1365 */
1366#define WEP_IV_LEN 4
1367#define WEP_ICV_LEN 4
1368#define CCMP_MIC_LEN 8
1369#define TKIP_ICV_LEN 4
1370
1371/*
1372 * REPLY_TX = 0x1c (command)
1373 */
1374
1375struct iwl3945_tx_cmd {
1376 /*
1377 * MPDU byte count:
1378 * MAC header (24/26/30/32 bytes) + 2 bytes pad if 26/30 header size,
1379 * + 8 byte IV for CCM or TKIP (not used for WEP)
1380 * + Data payload
1381 * + 8-byte MIC (not used for CCM/WEP)
1382 * NOTE: Does not include Tx command bytes, post-MAC pad bytes,
1383 * MIC (CCM) 8 bytes, ICV (WEP/TKIP/CKIP) 4 bytes, CRC 4 bytes.i
1384 * Range: 14-2342 bytes.
1385 */
1386 __le16 len;
1387
1388 /*
1389 * MPDU or MSDU byte count for next frame.
1390 * Used for fragmentation and bursting, but not 11n aggregation.
1391 * Same as "len", but for next frame. Set to 0 if not applicable.
1392 */
1393 __le16 next_frame_len;
1394
1395 __le32 tx_flags; /* TX_CMD_FLG_* */
1396
1397 u8 rate;
1398
1399 /* Index of recipient station in uCode's station table */
1400 u8 sta_id;
1401 u8 tid_tspec;
1402 u8 sec_ctl;
1403 u8 key[16];
1404 union {
1405 u8 byte[8];
1406 __le16 word[4];
1407 __le32 dw[2];
1408 } tkip_mic;
1409 __le32 next_frame_info;
1410 union {
1411 __le32 life_time;
1412 __le32 attempt;
1413 } stop_time;
1414 u8 supp_rates[2];
1415 u8 rts_retry_limit; /*byte 50 */
1416 u8 data_retry_limit; /*byte 51 */
1417 union {
1418 __le16 pm_frame_timeout;
1419 __le16 attempt_duration;
1420 } timeout;
1421
1422 /*
1423 * Duration of EDCA burst Tx Opportunity, in 32-usec units.
1424 * Set this if txop time is not specified by HCCA protocol (e.g. by AP).
1425 */
1426 __le16 driver_txop;
1427
1428 /*
1429 * MAC header goes here, followed by 2 bytes padding if MAC header
1430 * length is 26 or 30 bytes, followed by payload data
1431 */
1432 u8 payload[0];
1433 struct ieee80211_hdr hdr[0];
1434} __packed;
1435
1436/*
1437 * REPLY_TX = 0x1c (response)
1438 */
1439struct iwl3945_tx_resp {
1440 u8 failure_rts;
1441 u8 failure_frame;
1442 u8 bt_kill_count;
1443 u8 rate;
1444 __le32 wireless_media_time;
1445 __le32 status; /* TX status */
1446} __packed;
1447
1448
1449/*
1450 * 4965 uCode updates these Tx attempt count values in host DRAM.
1451 * Used for managing Tx retries when expecting block-acks.
1452 * Driver should set these fields to 0.
1453 */
1454struct iwl_dram_scratch {
1455 u8 try_cnt; /* Tx attempts */
1456 u8 bt_kill_cnt; /* Tx attempts blocked by Bluetooth device */
1457 __le16 reserved;
1458} __packed;
1459
1460struct iwl_tx_cmd {
1461 /*
1462 * MPDU byte count:
1463 * MAC header (24/26/30/32 bytes) + 2 bytes pad if 26/30 header size,
1464 * + 8 byte IV for CCM or TKIP (not used for WEP)
1465 * + Data payload
1466 * + 8-byte MIC (not used for CCM/WEP)
1467 * NOTE: Does not include Tx command bytes, post-MAC pad bytes,
1468 * MIC (CCM) 8 bytes, ICV (WEP/TKIP/CKIP) 4 bytes, CRC 4 bytes.i
1469 * Range: 14-2342 bytes.
1470 */
1471 __le16 len;
1472
1473 /*
1474 * MPDU or MSDU byte count for next frame.
1475 * Used for fragmentation and bursting, but not 11n aggregation.
1476 * Same as "len", but for next frame. Set to 0 if not applicable.
1477 */
1478 __le16 next_frame_len;
1479
1480 __le32 tx_flags; /* TX_CMD_FLG_* */
1481
1482 /* uCode may modify this field of the Tx command (in host DRAM!).
1483 * Driver must also set dram_lsb_ptr and dram_msb_ptr in this cmd. */
1484 struct iwl_dram_scratch scratch;
1485
1486 /* Rate for *all* Tx attempts, if TX_CMD_FLG_STA_RATE_MSK is cleared. */
1487 __le32 rate_n_flags; /* RATE_MCS_* */
1488
1489 /* Index of destination station in uCode's station table */
1490 u8 sta_id;
1491
1492 /* Type of security encryption: CCM or TKIP */
1493 u8 sec_ctl; /* TX_CMD_SEC_* */
1494
1495 /*
1496 * Index into rate table (see REPLY_TX_LINK_QUALITY_CMD) for initial
1497 * Tx attempt, if TX_CMD_FLG_STA_RATE_MSK is set. Normally "0" for
1498 * data frames, this field may be used to selectively reduce initial
1499 * rate (via non-0 value) for special frames (e.g. management), while
1500 * still supporting rate scaling for all frames.
1501 */
1502 u8 initial_rate_index;
1503 u8 reserved;
1504 u8 key[16];
1505 __le16 next_frame_flags;
1506 __le16 reserved2;
1507 union {
1508 __le32 life_time;
1509 __le32 attempt;
1510 } stop_time;
1511
1512 /* Host DRAM physical address pointer to "scratch" in this command.
1513 * Must be dword aligned. "0" in dram_lsb_ptr disables usage. */
1514 __le32 dram_lsb_ptr;
1515 u8 dram_msb_ptr;
1516
1517 u8 rts_retry_limit; /*byte 50 */
1518 u8 data_retry_limit; /*byte 51 */
1519 u8 tid_tspec;
1520 union {
1521 __le16 pm_frame_timeout;
1522 __le16 attempt_duration;
1523 } timeout;
1524
1525 /*
1526 * Duration of EDCA burst Tx Opportunity, in 32-usec units.
1527 * Set this if txop time is not specified by HCCA protocol (e.g. by AP).
1528 */
1529 __le16 driver_txop;
1530
1531 /*
1532 * MAC header goes here, followed by 2 bytes padding if MAC header
1533 * length is 26 or 30 bytes, followed by payload data
1534 */
1535 u8 payload[0];
1536 struct ieee80211_hdr hdr[0];
1537} __packed;
1538
1539/* TX command response is sent after *3945* transmission attempts.
1540 *
1541 * NOTES:
1542 *
1543 * TX_STATUS_FAIL_NEXT_FRAG
1544 *
1545 * If the fragment flag in the MAC header for the frame being transmitted
1546 * is set and there is insufficient time to transmit the next frame, the
1547 * TX status will be returned with 'TX_STATUS_FAIL_NEXT_FRAG'.
1548 *
1549 * TX_STATUS_FIFO_UNDERRUN
1550 *
1551 * Indicates the host did not provide bytes to the FIFO fast enough while
1552 * a TX was in progress.
1553 *
1554 * TX_STATUS_FAIL_MGMNT_ABORT
1555 *
1556 * This status is only possible if the ABORT ON MGMT RX parameter was
1557 * set to true with the TX command.
1558 *
1559 * If the MSB of the status parameter is set then an abort sequence is
1560 * required. This sequence consists of the host activating the TX Abort
1561 * control line, and then waiting for the TX Abort command response. This
1562 * indicates that a the device is no longer in a transmit state, and that the
1563 * command FIFO has been cleared. The host must then deactivate the TX Abort
1564 * control line. Receiving is still allowed in this case.
1565 */
1566enum {
1567 TX_3945_STATUS_SUCCESS = 0x01,
1568 TX_3945_STATUS_DIRECT_DONE = 0x02,
1569 TX_3945_STATUS_FAIL_SHORT_LIMIT = 0x82,
1570 TX_3945_STATUS_FAIL_LONG_LIMIT = 0x83,
1571 TX_3945_STATUS_FAIL_FIFO_UNDERRUN = 0x84,
1572 TX_3945_STATUS_FAIL_MGMNT_ABORT = 0x85,
1573 TX_3945_STATUS_FAIL_NEXT_FRAG = 0x86,
1574 TX_3945_STATUS_FAIL_LIFE_EXPIRE = 0x87,
1575 TX_3945_STATUS_FAIL_DEST_PS = 0x88,
1576 TX_3945_STATUS_FAIL_ABORTED = 0x89,
1577 TX_3945_STATUS_FAIL_BT_RETRY = 0x8a,
1578 TX_3945_STATUS_FAIL_STA_INVALID = 0x8b,
1579 TX_3945_STATUS_FAIL_FRAG_DROPPED = 0x8c,
1580 TX_3945_STATUS_FAIL_TID_DISABLE = 0x8d,
1581 TX_3945_STATUS_FAIL_FRAME_FLUSHED = 0x8e,
1582 TX_3945_STATUS_FAIL_INSUFFICIENT_CF_POLL = 0x8f,
1583 TX_3945_STATUS_FAIL_TX_LOCKED = 0x90,
1584 TX_3945_STATUS_FAIL_NO_BEACON_ON_RADAR = 0x91,
1585};
1586
1587/*
1588 * TX command response is sent after *4965* transmission attempts.
1589 *
1590 * both postpone and abort status are expected behavior from uCode. there is
1591 * no special operation required from driver; except for RFKILL_FLUSH,
1592 * which required tx flush host command to flush all the tx frames in queues
1593 */
1594enum {
1595 TX_STATUS_SUCCESS = 0x01,
1596 TX_STATUS_DIRECT_DONE = 0x02,
1597 /* postpone TX */
1598 TX_STATUS_POSTPONE_DELAY = 0x40,
1599 TX_STATUS_POSTPONE_FEW_BYTES = 0x41,
1600 TX_STATUS_POSTPONE_QUIET_PERIOD = 0x43,
1601 TX_STATUS_POSTPONE_CALC_TTAK = 0x44,
1602 /* abort TX */
1603 TX_STATUS_FAIL_INTERNAL_CROSSED_RETRY = 0x81,
1604 TX_STATUS_FAIL_SHORT_LIMIT = 0x82,
1605 TX_STATUS_FAIL_LONG_LIMIT = 0x83,
1606 TX_STATUS_FAIL_FIFO_UNDERRUN = 0x84,
1607 TX_STATUS_FAIL_DRAIN_FLOW = 0x85,
1608 TX_STATUS_FAIL_RFKILL_FLUSH = 0x86,
1609 TX_STATUS_FAIL_LIFE_EXPIRE = 0x87,
1610 TX_STATUS_FAIL_DEST_PS = 0x88,
1611 TX_STATUS_FAIL_HOST_ABORTED = 0x89,
1612 TX_STATUS_FAIL_BT_RETRY = 0x8a,
1613 TX_STATUS_FAIL_STA_INVALID = 0x8b,
1614 TX_STATUS_FAIL_FRAG_DROPPED = 0x8c,
1615 TX_STATUS_FAIL_TID_DISABLE = 0x8d,
1616 TX_STATUS_FAIL_FIFO_FLUSHED = 0x8e,
1617 TX_STATUS_FAIL_INSUFFICIENT_CF_POLL = 0x8f,
1618 TX_STATUS_FAIL_PASSIVE_NO_RX = 0x90,
1619 TX_STATUS_FAIL_NO_BEACON_ON_RADAR = 0x91,
1620};
1621
1622#define TX_PACKET_MODE_REGULAR 0x0000
1623#define TX_PACKET_MODE_BURST_SEQ 0x0100
1624#define TX_PACKET_MODE_BURST_FIRST 0x0200
1625
1626enum {
1627 TX_POWER_PA_NOT_ACTIVE = 0x0,
1628};
1629
1630enum {
1631 TX_STATUS_MSK = 0x000000ff, /* bits 0:7 */
1632 TX_STATUS_DELAY_MSK = 0x00000040,
1633 TX_STATUS_ABORT_MSK = 0x00000080,
1634 TX_PACKET_MODE_MSK = 0x0000ff00, /* bits 8:15 */
1635 TX_FIFO_NUMBER_MSK = 0x00070000, /* bits 16:18 */
1636 TX_RESERVED = 0x00780000, /* bits 19:22 */
1637 TX_POWER_PA_DETECT_MSK = 0x7f800000, /* bits 23:30 */
1638 TX_ABORT_REQUIRED_MSK = 0x80000000, /* bits 31:31 */
1639};
1640
1641/* *******************************
1642 * TX aggregation status
1643 ******************************* */
1644
1645enum {
1646 AGG_TX_STATE_TRANSMITTED = 0x00,
1647 AGG_TX_STATE_UNDERRUN_MSK = 0x01,
1648 AGG_TX_STATE_FEW_BYTES_MSK = 0x04,
1649 AGG_TX_STATE_ABORT_MSK = 0x08,
1650 AGG_TX_STATE_LAST_SENT_TTL_MSK = 0x10,
1651 AGG_TX_STATE_LAST_SENT_TRY_CNT_MSK = 0x20,
1652 AGG_TX_STATE_SCD_QUERY_MSK = 0x80,
1653 AGG_TX_STATE_TEST_BAD_CRC32_MSK = 0x100,
1654 AGG_TX_STATE_RESPONSE_MSK = 0x1ff,
1655 AGG_TX_STATE_DUMP_TX_MSK = 0x200,
1656 AGG_TX_STATE_DELAY_TX_MSK = 0x400
1657};
1658
1659#define AGG_TX_STATUS_MSK 0x00000fff /* bits 0:11 */
1660#define AGG_TX_TRY_MSK 0x0000f000 /* bits 12:15 */
1661
1662#define AGG_TX_STATE_LAST_SENT_MSK (AGG_TX_STATE_LAST_SENT_TTL_MSK | \
1663 AGG_TX_STATE_LAST_SENT_TRY_CNT_MSK)
1664
1665/* # tx attempts for first frame in aggregation */
1666#define AGG_TX_STATE_TRY_CNT_POS 12
1667#define AGG_TX_STATE_TRY_CNT_MSK 0xf000
1668
1669/* Command ID and sequence number of Tx command for this frame */
1670#define AGG_TX_STATE_SEQ_NUM_POS 16
1671#define AGG_TX_STATE_SEQ_NUM_MSK 0xffff0000
1672
1673/*
1674 * REPLY_TX = 0x1c (response)
1675 *
1676 * This response may be in one of two slightly different formats, indicated
1677 * by the frame_count field:
1678 *
1679 * 1) No aggregation (frame_count == 1). This reports Tx results for
1680 * a single frame. Multiple attempts, at various bit rates, may have
1681 * been made for this frame.
1682 *
1683 * 2) Aggregation (frame_count > 1). This reports Tx results for
1684 * 2 or more frames that used block-acknowledge. All frames were
1685 * transmitted at same rate. Rate scaling may have been used if first
1686 * frame in this new agg block failed in previous agg block(s).
1687 *
1688 * Note that, for aggregation, ACK (block-ack) status is not delivered here;
1689 * block-ack has not been received by the time the 4965 device records
1690 * this status.
1691 * This status relates to reasons the tx might have been blocked or aborted
1692 * within the sending station (this 4965 device), rather than whether it was
1693 * received successfully by the destination station.
1694 */
1695struct agg_tx_status {
1696 __le16 status;
1697 __le16 sequence;
1698} __packed;
1699
1700struct iwl4965_tx_resp {
1701 u8 frame_count; /* 1 no aggregation, >1 aggregation */
1702 u8 bt_kill_count; /* # blocked by bluetooth (unused for agg) */
1703 u8 failure_rts; /* # failures due to unsuccessful RTS */
1704 u8 failure_frame; /* # failures due to no ACK (unused for agg) */
1705
1706 /* For non-agg: Rate at which frame was successful.
1707 * For agg: Rate at which all frames were transmitted. */
1708 __le32 rate_n_flags; /* RATE_MCS_* */
1709
1710 /* For non-agg: RTS + CTS + frame tx attempts time + ACK.
1711 * For agg: RTS + CTS + aggregation tx time + block-ack time. */
1712 __le16 wireless_media_time; /* uSecs */
1713
1714 __le16 reserved;
1715 __le32 pa_power1; /* RF power amplifier measurement (not used) */
1716 __le32 pa_power2;
1717
1718 /*
1719 * For non-agg: frame status TX_STATUS_*
1720 * For agg: status of 1st frame, AGG_TX_STATE_*; other frame status
1721 * fields follow this one, up to frame_count.
1722 * Bit fields:
1723 * 11- 0: AGG_TX_STATE_* status code
1724 * 15-12: Retry count for 1st frame in aggregation (retries
1725 * occur if tx failed for this frame when it was a
1726 * member of a previous aggregation block). If rate
1727 * scaling is used, retry count indicates the rate
1728 * table entry used for all frames in the new agg.
1729 * 31-16: Sequence # for this frame's Tx cmd (not SSN!)
1730 */
1731 union {
1732 __le32 status;
1733 struct agg_tx_status agg_status[0]; /* for each agg frame */
1734 } u;
1735} __packed;
1736
1737/*
1738 * REPLY_COMPRESSED_BA = 0xc5 (response only, not a command)
1739 *
1740 * Reports Block-Acknowledge from recipient station
1741 */
1742struct iwl_compressed_ba_resp {
1743 __le32 sta_addr_lo32;
1744 __le16 sta_addr_hi16;
1745 __le16 reserved;
1746
1747 /* Index of recipient (BA-sending) station in uCode's station table */
1748 u8 sta_id;
1749 u8 tid;
1750 __le16 seq_ctl;
1751 __le64 bitmap;
1752 __le16 scd_flow;
1753 __le16 scd_ssn;
1754} __packed;
1755
1756/*
1757 * REPLY_TX_PWR_TABLE_CMD = 0x97 (command, has simple generic response)
1758 *
1759 * See details under "TXPOWER" in iwl-4965-hw.h.
1760 */
1761
1762struct iwl3945_txpowertable_cmd {
1763 u8 band; /* 0: 5 GHz, 1: 2.4 GHz */
1764 u8 reserved;
1765 __le16 channel;
1766 struct iwl3945_power_per_rate power[IWL_MAX_RATES];
1767} __packed;
1768
1769struct iwl4965_txpowertable_cmd {
1770 u8 band; /* 0: 5 GHz, 1: 2.4 GHz */
1771 u8 reserved;
1772 __le16 channel;
1773 struct iwl4965_tx_power_db tx_power;
1774} __packed;
1775
1776
1777/**
1778 * struct iwl3945_rate_scaling_cmd - Rate Scaling Command & Response
1779 *
1780 * REPLY_RATE_SCALE = 0x47 (command, has simple generic response)
1781 *
1782 * NOTE: The table of rates passed to the uCode via the
1783 * RATE_SCALE command sets up the corresponding order of
1784 * rates used for all related commands, including rate
1785 * masks, etc.
1786 *
1787 * For example, if you set 9MB (PLCP 0x0f) as the first
1788 * rate in the rate table, the bit mask for that rate
1789 * when passed through ofdm_basic_rates on the REPLY_RXON
1790 * command would be bit 0 (1 << 0)
1791 */
1792struct iwl3945_rate_scaling_info {
1793 __le16 rate_n_flags;
1794 u8 try_cnt;
1795 u8 next_rate_index;
1796} __packed;
1797
1798struct iwl3945_rate_scaling_cmd {
1799 u8 table_id;
1800 u8 reserved[3];
1801 struct iwl3945_rate_scaling_info table[IWL_MAX_RATES];
1802} __packed;
1803
1804
1805/*RS_NEW_API: only TLC_RTS remains and moved to bit 0 */
1806#define LINK_QUAL_FLAGS_SET_STA_TLC_RTS_MSK (1 << 0)
1807
1808/* # of EDCA prioritized tx fifos */
1809#define LINK_QUAL_AC_NUM AC_NUM
1810
1811/* # entries in rate scale table to support Tx retries */
1812#define LINK_QUAL_MAX_RETRY_NUM 16
1813
1814/* Tx antenna selection values */
1815#define LINK_QUAL_ANT_A_MSK (1 << 0)
1816#define LINK_QUAL_ANT_B_MSK (1 << 1)
1817#define LINK_QUAL_ANT_MSK (LINK_QUAL_ANT_A_MSK|LINK_QUAL_ANT_B_MSK)
1818
1819
1820/**
1821 * struct iwl_link_qual_general_params
1822 *
1823 * Used in REPLY_TX_LINK_QUALITY_CMD
1824 */
1825struct iwl_link_qual_general_params {
1826 u8 flags;
1827
1828 /* No entries at or above this (driver chosen) index contain MIMO */
1829 u8 mimo_delimiter;
1830
1831 /* Best single antenna to use for single stream (legacy, SISO). */
1832 u8 single_stream_ant_msk; /* LINK_QUAL_ANT_* */
1833
1834 /* Best antennas to use for MIMO (unused for 4965, assumes both). */
1835 u8 dual_stream_ant_msk; /* LINK_QUAL_ANT_* */
1836
1837 /*
1838 * If driver needs to use different initial rates for different
1839 * EDCA QOS access categories (as implemented by tx fifos 0-3),
1840 * this table will set that up, by indicating the indexes in the
1841 * rs_table[LINK_QUAL_MAX_RETRY_NUM] rate table at which to start.
1842 * Otherwise, driver should set all entries to 0.
1843 *
1844 * Entry usage:
1845 * 0 = Background, 1 = Best Effort (normal), 2 = Video, 3 = Voice
1846 * TX FIFOs above 3 use same value (typically 0) as TX FIFO 3.
1847 */
1848 u8 start_rate_index[LINK_QUAL_AC_NUM];
1849} __packed;
1850
1851#define LINK_QUAL_AGG_TIME_LIMIT_DEF (4000) /* 4 milliseconds */
1852#define LINK_QUAL_AGG_TIME_LIMIT_MAX (8000)
1853#define LINK_QUAL_AGG_TIME_LIMIT_MIN (100)
1854
1855#define LINK_QUAL_AGG_DISABLE_START_DEF (3)
1856#define LINK_QUAL_AGG_DISABLE_START_MAX (255)
1857#define LINK_QUAL_AGG_DISABLE_START_MIN (0)
1858
1859#define LINK_QUAL_AGG_FRAME_LIMIT_DEF (31)
1860#define LINK_QUAL_AGG_FRAME_LIMIT_MAX (63)
1861#define LINK_QUAL_AGG_FRAME_LIMIT_MIN (0)
1862
1863/**
1864 * struct iwl_link_qual_agg_params
1865 *
1866 * Used in REPLY_TX_LINK_QUALITY_CMD
1867 */
1868struct iwl_link_qual_agg_params {
1869
1870 /*
1871 *Maximum number of uSec in aggregation.
1872 * default set to 4000 (4 milliseconds) if not configured in .cfg
1873 */
1874 __le16 agg_time_limit;
1875
1876 /*
1877 * Number of Tx retries allowed for a frame, before that frame will
1878 * no longer be considered for the start of an aggregation sequence
1879 * (scheduler will then try to tx it as single frame).
1880 * Driver should set this to 3.
1881 */
1882 u8 agg_dis_start_th;
1883
1884 /*
1885 * Maximum number of frames in aggregation.
1886 * 0 = no limit (default). 1 = no aggregation.
1887 * Other values = max # frames in aggregation.
1888 */
1889 u8 agg_frame_cnt_limit;
1890
1891 __le32 reserved;
1892} __packed;
1893
1894/*
1895 * REPLY_TX_LINK_QUALITY_CMD = 0x4e (command, has simple generic response)
1896 *
1897 * For 4965 devices only; 3945 uses REPLY_RATE_SCALE.
1898 *
1899 * Each station in the 4965 device's internal station table has its own table
1900 * of 16
1901 * Tx rates and modulation modes (e.g. legacy/SISO/MIMO) for retrying Tx when
1902 * an ACK is not received. This command replaces the entire table for
1903 * one station.
1904 *
1905 * NOTE: Station must already be in 4965 device's station table.
1906 * Use REPLY_ADD_STA.
1907 *
1908 * The rate scaling procedures described below work well. Of course, other
1909 * procedures are possible, and may work better for particular environments.
1910 *
1911 *
1912 * FILLING THE RATE TABLE
1913 *
1914 * Given a particular initial rate and mode, as determined by the rate
1915 * scaling algorithm described below, the Linux driver uses the following
1916 * formula to fill the rs_table[LINK_QUAL_MAX_RETRY_NUM] rate table in the
1917 * Link Quality command:
1918 *
1919 *
1920 * 1) If using High-throughput (HT) (SISO or MIMO) initial rate:
1921 * a) Use this same initial rate for first 3 entries.
1922 * b) Find next lower available rate using same mode (SISO or MIMO),
1923 * use for next 3 entries. If no lower rate available, switch to
1924 * legacy mode (no HT40 channel, no MIMO, no short guard interval).
1925 * c) If using MIMO, set command's mimo_delimiter to number of entries
1926 * using MIMO (3 or 6).
1927 * d) After trying 2 HT rates, switch to legacy mode (no HT40 channel,
1928 * no MIMO, no short guard interval), at the next lower bit rate
1929 * (e.g. if second HT bit rate was 54, try 48 legacy), and follow
1930 * legacy procedure for remaining table entries.
1931 *
1932 * 2) If using legacy initial rate:
1933 * a) Use the initial rate for only one entry.
1934 * b) For each following entry, reduce the rate to next lower available
1935 * rate, until reaching the lowest available rate.
1936 * c) When reducing rate, also switch antenna selection.
1937 * d) Once lowest available rate is reached, repeat this rate until
1938 * rate table is filled (16 entries), switching antenna each entry.
1939 *
1940 *
1941 * ACCUMULATING HISTORY
1942 *
1943 * The rate scaling algorithm for 4965 devices, as implemented in Linux driver,
1944 * uses two sets of frame Tx success history: One for the current/active
1945 * modulation mode, and one for a speculative/search mode that is being
1946 * attempted. If the speculative mode turns out to be more effective (i.e.
1947 * actual transfer rate is better), then the driver continues to use the
1948 * speculative mode as the new current active mode.
1949 *
1950 * Each history set contains, separately for each possible rate, data for a
1951 * sliding window of the 62 most recent tx attempts at that rate. The data
1952 * includes a shifting bitmap of success(1)/failure(0), and sums of successful
1953 * and attempted frames, from which the driver can additionally calculate a
1954 * success ratio (success / attempted) and number of failures
1955 * (attempted - success), and control the size of the window (attempted).
1956 * The driver uses the bit map to remove successes from the success sum, as
1957 * the oldest tx attempts fall out of the window.
1958 *
1959 * When the 4965 device makes multiple tx attempts for a given frame, each
1960 * attempt might be at a different rate, and have different modulation
1961 * characteristics (e.g. antenna, fat channel, short guard interval), as set
1962 * up in the rate scaling table in the Link Quality command. The driver must
1963 * determine which rate table entry was used for each tx attempt, to determine
1964 * which rate-specific history to update, and record only those attempts that
1965 * match the modulation characteristics of the history set.
1966 *
1967 * When using block-ack (aggregation), all frames are transmitted at the same
1968 * rate, since there is no per-attempt acknowledgment from the destination
1969 * station. The Tx response struct iwl_tx_resp indicates the Tx rate in
1970 * rate_n_flags field. After receiving a block-ack, the driver can update
1971 * history for the entire block all at once.
1972 *
1973 *
1974 * FINDING BEST STARTING RATE:
1975 *
1976 * When working with a selected initial modulation mode (see below), the
1977 * driver attempts to find a best initial rate. The initial rate is the
1978 * first entry in the Link Quality command's rate table.
1979 *
1980 * 1) Calculate actual throughput (success ratio * expected throughput, see
1981 * table below) for current initial rate. Do this only if enough frames
1982 * have been attempted to make the value meaningful: at least 6 failed
1983 * tx attempts, or at least 8 successes. If not enough, don't try rate
1984 * scaling yet.
1985 *
1986 * 2) Find available rates adjacent to current initial rate. Available means:
1987 * a) supported by hardware &&
1988 * b) supported by association &&
1989 * c) within any constraints selected by user
1990 *
1991 * 3) Gather measured throughputs for adjacent rates. These might not have
1992 * enough history to calculate a throughput. That's okay, we might try
1993 * using one of them anyway!
1994 *
1995 * 4) Try decreasing rate if, for current rate:
1996 * a) success ratio is < 15% ||
1997 * b) lower adjacent rate has better measured throughput ||
1998 * c) higher adjacent rate has worse throughput, and lower is unmeasured
1999 *
2000 * As a sanity check, if decrease was determined above, leave rate
2001 * unchanged if:
2002 * a) lower rate unavailable
2003 * b) success ratio at current rate > 85% (very good)
2004 * c) current measured throughput is better than expected throughput
2005 * of lower rate (under perfect 100% tx conditions, see table below)
2006 *
2007 * 5) Try increasing rate if, for current rate:
2008 * a) success ratio is < 15% ||
2009 * b) both adjacent rates' throughputs are unmeasured (try it!) ||
2010 * b) higher adjacent rate has better measured throughput ||
2011 * c) lower adjacent rate has worse throughput, and higher is unmeasured
2012 *
2013 * As a sanity check, if increase was determined above, leave rate
2014 * unchanged if:
2015 * a) success ratio at current rate < 70%. This is not particularly
2016 * good performance; higher rate is sure to have poorer success.
2017 *
2018 * 6) Re-evaluate the rate after each tx frame. If working with block-
2019 * acknowledge, history and statistics may be calculated for the entire
2020 * block (including prior history that fits within the history windows),
2021 * before re-evaluation.
2022 *
2023 * FINDING BEST STARTING MODULATION MODE:
2024 *
2025 * After working with a modulation mode for a "while" (and doing rate scaling),
2026 * the driver searches for a new initial mode in an attempt to improve
2027 * throughput. The "while" is measured by numbers of attempted frames:
2028 *
2029 * For legacy mode, search for new mode after:
2030 * 480 successful frames, or 160 failed frames
2031 * For high-throughput modes (SISO or MIMO), search for new mode after:
2032 * 4500 successful frames, or 400 failed frames
2033 *
2034 * Mode switch possibilities are (3 for each mode):
2035 *
2036 * For legacy:
2037 * Change antenna, try SISO (if HT association), try MIMO (if HT association)
2038 * For SISO:
2039 * Change antenna, try MIMO, try shortened guard interval (SGI)
2040 * For MIMO:
2041 * Try SISO antenna A, SISO antenna B, try shortened guard interval (SGI)
2042 *
2043 * When trying a new mode, use the same bit rate as the old/current mode when
2044 * trying antenna switches and shortened guard interval. When switching to
2045 * SISO from MIMO or legacy, or to MIMO from SISO or legacy, use a rate
2046 * for which the expected throughput (under perfect conditions) is about the
2047 * same or slightly better than the actual measured throughput delivered by
2048 * the old/current mode.
2049 *
2050 * Actual throughput can be estimated by multiplying the expected throughput
2051 * by the success ratio (successful / attempted tx frames). Frame size is
2052 * not considered in this calculation; it assumes that frame size will average
2053 * out to be fairly consistent over several samples. The following are
2054 * metric values for expected throughput assuming 100% success ratio.
2055 * Only G band has support for CCK rates:
2056 *
2057 * RATE: 1 2 5 11 6 9 12 18 24 36 48 54 60
2058 *
2059 * G: 7 13 35 58 40 57 72 98 121 154 177 186 186
2060 * A: 0 0 0 0 40 57 72 98 121 154 177 186 186
2061 * SISO 20MHz: 0 0 0 0 42 42 76 102 124 159 183 193 202
2062 * SGI SISO 20MHz: 0 0 0 0 46 46 82 110 132 168 192 202 211
2063 * MIMO 20MHz: 0 0 0 0 74 74 123 155 179 214 236 244 251
2064 * SGI MIMO 20MHz: 0 0 0 0 81 81 131 164 188 222 243 251 257
2065 * SISO 40MHz: 0 0 0 0 77 77 127 160 184 220 242 250 257
2066 * SGI SISO 40MHz: 0 0 0 0 83 83 135 169 193 229 250 257 264
2067 * MIMO 40MHz: 0 0 0 0 123 123 182 214 235 264 279 285 289
2068 * SGI MIMO 40MHz: 0 0 0 0 131 131 191 222 242 270 284 289 293
2069 *
2070 * After the new mode has been tried for a short while (minimum of 6 failed
2071 * frames or 8 successful frames), compare success ratio and actual throughput
2072 * estimate of the new mode with the old. If either is better with the new
2073 * mode, continue to use the new mode.
2074 *
2075 * Continue comparing modes until all 3 possibilities have been tried.
2076 * If moving from legacy to HT, try all 3 possibilities from the new HT
2077 * mode. After trying all 3, a best mode is found. Continue to use this mode
2078 * for the longer "while" described above (e.g. 480 successful frames for
2079 * legacy), and then repeat the search process.
2080 *
2081 */
2082struct iwl_link_quality_cmd {
2083
2084 /* Index of destination/recipient station in uCode's station table */
2085 u8 sta_id;
2086 u8 reserved1;
2087 __le16 control; /* not used */
2088 struct iwl_link_qual_general_params general_params;
2089 struct iwl_link_qual_agg_params agg_params;
2090
2091 /*
2092 * Rate info; when using rate-scaling, Tx command's initial_rate_index
2093 * specifies 1st Tx rate attempted, via index into this table.
2094 * 4965 devices works its way through table when retrying Tx.
2095 */
2096 struct {
2097 __le32 rate_n_flags; /* RATE_MCS_*, IWL_RATE_* */
2098 } rs_table[LINK_QUAL_MAX_RETRY_NUM];
2099 __le32 reserved2;
2100} __packed;
2101
2102/*
2103 * BT configuration enable flags:
2104 * bit 0 - 1: BT channel announcement enabled
2105 * 0: disable
2106 * bit 1 - 1: priority of BT device enabled
2107 * 0: disable
2108 */
2109#define BT_COEX_DISABLE (0x0)
2110#define BT_ENABLE_CHANNEL_ANNOUNCE BIT(0)
2111#define BT_ENABLE_PRIORITY BIT(1)
2112
2113#define BT_COEX_ENABLE (BT_ENABLE_CHANNEL_ANNOUNCE | BT_ENABLE_PRIORITY)
2114
2115#define BT_LEAD_TIME_DEF (0x1E)
2116
2117#define BT_MAX_KILL_DEF (0x5)
2118
2119/*
2120 * REPLY_BT_CONFIG = 0x9b (command, has simple generic response)
2121 *
2122 * 3945 and 4965 devices support hardware handshake with Bluetooth device on
2123 * same platform. Bluetooth device alerts wireless device when it will Tx;
2124 * wireless device can delay or kill its own Tx to accommodate.
2125 */
2126struct iwl_bt_cmd {
2127 u8 flags;
2128 u8 lead_time;
2129 u8 max_kill;
2130 u8 reserved;
2131 __le32 kill_ack_mask;
2132 __le32 kill_cts_mask;
2133} __packed;
2134
2135
2136/******************************************************************************
2137 * (6)
2138 * Spectrum Management (802.11h) Commands, Responses, Notifications:
2139 *
2140 *****************************************************************************/
2141
2142/*
2143 * Spectrum Management
2144 */
2145#define MEASUREMENT_FILTER_FLAG (RXON_FILTER_PROMISC_MSK | \
2146 RXON_FILTER_CTL2HOST_MSK | \
2147 RXON_FILTER_ACCEPT_GRP_MSK | \
2148 RXON_FILTER_DIS_DECRYPT_MSK | \
2149 RXON_FILTER_DIS_GRP_DECRYPT_MSK | \
2150 RXON_FILTER_ASSOC_MSK | \
2151 RXON_FILTER_BCON_AWARE_MSK)
2152
2153struct iwl_measure_channel {
2154 __le32 duration; /* measurement duration in extended beacon
2155 * format */
2156 u8 channel; /* channel to measure */
2157 u8 type; /* see enum iwl_measure_type */
2158 __le16 reserved;
2159} __packed;
2160
2161/*
2162 * REPLY_SPECTRUM_MEASUREMENT_CMD = 0x74 (command)
2163 */
2164struct iwl_spectrum_cmd {
2165 __le16 len; /* number of bytes starting from token */
2166 u8 token; /* token id */
2167 u8 id; /* measurement id -- 0 or 1 */
2168 u8 origin; /* 0 = TGh, 1 = other, 2 = TGk */
2169 u8 periodic; /* 1 = periodic */
2170 __le16 path_loss_timeout;
2171 __le32 start_time; /* start time in extended beacon format */
2172 __le32 reserved2;
2173 __le32 flags; /* rxon flags */
2174 __le32 filter_flags; /* rxon filter flags */
2175 __le16 channel_count; /* minimum 1, maximum 10 */
2176 __le16 reserved3;
2177 struct iwl_measure_channel channels[10];
2178} __packed;
2179
2180/*
2181 * REPLY_SPECTRUM_MEASUREMENT_CMD = 0x74 (response)
2182 */
2183struct iwl_spectrum_resp {
2184 u8 token;
2185 u8 id; /* id of the prior command replaced, or 0xff */
2186 __le16 status; /* 0 - command will be handled
2187 * 1 - cannot handle (conflicts with another
2188 * measurement) */
2189} __packed;
2190
2191enum iwl_measurement_state {
2192 IWL_MEASUREMENT_START = 0,
2193 IWL_MEASUREMENT_STOP = 1,
2194};
2195
2196enum iwl_measurement_status {
2197 IWL_MEASUREMENT_OK = 0,
2198 IWL_MEASUREMENT_CONCURRENT = 1,
2199 IWL_MEASUREMENT_CSA_CONFLICT = 2,
2200 IWL_MEASUREMENT_TGH_CONFLICT = 3,
2201 /* 4-5 reserved */
2202 IWL_MEASUREMENT_STOPPED = 6,
2203 IWL_MEASUREMENT_TIMEOUT = 7,
2204 IWL_MEASUREMENT_PERIODIC_FAILED = 8,
2205};
2206
2207#define NUM_ELEMENTS_IN_HISTOGRAM 8
2208
2209struct iwl_measurement_histogram {
2210 __le32 ofdm[NUM_ELEMENTS_IN_HISTOGRAM]; /* in 0.8usec counts */
2211 __le32 cck[NUM_ELEMENTS_IN_HISTOGRAM]; /* in 1usec counts */
2212} __packed;
2213
2214/* clear channel availability counters */
2215struct iwl_measurement_cca_counters {
2216 __le32 ofdm;
2217 __le32 cck;
2218} __packed;
2219
2220enum iwl_measure_type {
2221 IWL_MEASURE_BASIC = (1 << 0),
2222 IWL_MEASURE_CHANNEL_LOAD = (1 << 1),
2223 IWL_MEASURE_HISTOGRAM_RPI = (1 << 2),
2224 IWL_MEASURE_HISTOGRAM_NOISE = (1 << 3),
2225 IWL_MEASURE_FRAME = (1 << 4),
2226 /* bits 5:6 are reserved */
2227 IWL_MEASURE_IDLE = (1 << 7),
2228};
2229
2230/*
2231 * SPECTRUM_MEASURE_NOTIFICATION = 0x75 (notification only, not a command)
2232 */
2233struct iwl_spectrum_notification {
2234 u8 id; /* measurement id -- 0 or 1 */
2235 u8 token;
2236 u8 channel_index; /* index in measurement channel list */
2237 u8 state; /* 0 - start, 1 - stop */
2238 __le32 start_time; /* lower 32-bits of TSF */
2239 u8 band; /* 0 - 5.2GHz, 1 - 2.4GHz */
2240 u8 channel;
2241 u8 type; /* see enum iwl_measurement_type */
2242 u8 reserved1;
2243 /* NOTE: cca_ofdm, cca_cck, basic_type, and histogram are only only
2244 * valid if applicable for measurement type requested. */
2245 __le32 cca_ofdm; /* cca fraction time in 40Mhz clock periods */
2246 __le32 cca_cck; /* cca fraction time in 44Mhz clock periods */
2247 __le32 cca_time; /* channel load time in usecs */
2248 u8 basic_type; /* 0 - bss, 1 - ofdm preamble, 2 -
2249 * unidentified */
2250 u8 reserved2[3];
2251 struct iwl_measurement_histogram histogram;
2252 __le32 stop_time; /* lower 32-bits of TSF */
2253 __le32 status; /* see iwl_measurement_status */
2254} __packed;
2255
2256/******************************************************************************
2257 * (7)
2258 * Power Management Commands, Responses, Notifications:
2259 *
2260 *****************************************************************************/
2261
2262/**
2263 * struct iwl_powertable_cmd - Power Table Command
2264 * @flags: See below:
2265 *
2266 * POWER_TABLE_CMD = 0x77 (command, has simple generic response)
2267 *
2268 * PM allow:
2269 * bit 0 - '0' Driver not allow power management
2270 * '1' Driver allow PM (use rest of parameters)
2271 *
2272 * uCode send sleep notifications:
2273 * bit 1 - '0' Don't send sleep notification
2274 * '1' send sleep notification (SEND_PM_NOTIFICATION)
2275 *
2276 * Sleep over DTIM
2277 * bit 2 - '0' PM have to walk up every DTIM
2278 * '1' PM could sleep over DTIM till listen Interval.
2279 *
2280 * PCI power managed
2281 * bit 3 - '0' (PCI_CFG_LINK_CTRL & 0x1)
2282 * '1' !(PCI_CFG_LINK_CTRL & 0x1)
2283 *
2284 * Fast PD
2285 * bit 4 - '1' Put radio to sleep when receiving frame for others
2286 *
2287 * Force sleep Modes
2288 * bit 31/30- '00' use both mac/xtal sleeps
2289 * '01' force Mac sleep
2290 * '10' force xtal sleep
2291 * '11' Illegal set
2292 *
2293 * NOTE: if sleep_interval[SLEEP_INTRVL_TABLE_SIZE-1] > DTIM period then
2294 * ucode assume sleep over DTIM is allowed and we don't need to wake up
2295 * for every DTIM.
2296 */
2297#define IWL_POWER_VEC_SIZE 5
2298
2299#define IWL_POWER_DRIVER_ALLOW_SLEEP_MSK cpu_to_le16(BIT(0))
2300#define IWL_POWER_POWER_SAVE_ENA_MSK cpu_to_le16(BIT(0))
2301#define IWL_POWER_POWER_MANAGEMENT_ENA_MSK cpu_to_le16(BIT(1))
2302#define IWL_POWER_SLEEP_OVER_DTIM_MSK cpu_to_le16(BIT(2))
2303#define IWL_POWER_PCI_PM_MSK cpu_to_le16(BIT(3))
2304#define IWL_POWER_FAST_PD cpu_to_le16(BIT(4))
2305#define IWL_POWER_BEACON_FILTERING cpu_to_le16(BIT(5))
2306#define IWL_POWER_SHADOW_REG_ENA cpu_to_le16(BIT(6))
2307#define IWL_POWER_CT_KILL_SET cpu_to_le16(BIT(7))
2308
2309struct iwl3945_powertable_cmd {
2310 __le16 flags;
2311 u8 reserved[2];
2312 __le32 rx_data_timeout;
2313 __le32 tx_data_timeout;
2314 __le32 sleep_interval[IWL_POWER_VEC_SIZE];
2315} __packed;
2316
2317struct iwl_powertable_cmd {
2318 __le16 flags;
2319 u8 keep_alive_seconds; /* 3945 reserved */
2320 u8 debug_flags; /* 3945 reserved */
2321 __le32 rx_data_timeout;
2322 __le32 tx_data_timeout;
2323 __le32 sleep_interval[IWL_POWER_VEC_SIZE];
2324 __le32 keep_alive_beacons;
2325} __packed;
2326
2327/*
2328 * PM_SLEEP_NOTIFICATION = 0x7A (notification only, not a command)
2329 * all devices identical.
2330 */
2331struct iwl_sleep_notification {
2332 u8 pm_sleep_mode;
2333 u8 pm_wakeup_src;
2334 __le16 reserved;
2335 __le32 sleep_time;
2336 __le32 tsf_low;
2337 __le32 bcon_timer;
2338} __packed;
2339
2340/* Sleep states. all devices identical. */
2341enum {
2342 IWL_PM_NO_SLEEP = 0,
2343 IWL_PM_SLP_MAC = 1,
2344 IWL_PM_SLP_FULL_MAC_UNASSOCIATE = 2,
2345 IWL_PM_SLP_FULL_MAC_CARD_STATE = 3,
2346 IWL_PM_SLP_PHY = 4,
2347 IWL_PM_SLP_REPENT = 5,
2348 IWL_PM_WAKEUP_BY_TIMER = 6,
2349 IWL_PM_WAKEUP_BY_DRIVER = 7,
2350 IWL_PM_WAKEUP_BY_RFKILL = 8,
2351 /* 3 reserved */
2352 IWL_PM_NUM_OF_MODES = 12,
2353};
2354
2355/*
2356 * CARD_STATE_NOTIFICATION = 0xa1 (notification only, not a command)
2357 */
2358struct iwl_card_state_notif {
2359 __le32 flags;
2360} __packed;
2361
2362#define HW_CARD_DISABLED 0x01
2363#define SW_CARD_DISABLED 0x02
2364#define CT_CARD_DISABLED 0x04
2365#define RXON_CARD_DISABLED 0x10
2366
2367struct iwl_ct_kill_config {
2368 __le32 reserved;
2369 __le32 critical_temperature_M;
2370 __le32 critical_temperature_R;
2371} __packed;
2372
2373/******************************************************************************
2374 * (8)
2375 * Scan Commands, Responses, Notifications:
2376 *
2377 *****************************************************************************/
2378
2379#define SCAN_CHANNEL_TYPE_PASSIVE cpu_to_le32(0)
2380#define SCAN_CHANNEL_TYPE_ACTIVE cpu_to_le32(1)
2381
2382/**
2383 * struct iwl_scan_channel - entry in REPLY_SCAN_CMD channel table
2384 *
2385 * One for each channel in the scan list.
2386 * Each channel can independently select:
2387 * 1) SSID for directed active scans
2388 * 2) Txpower setting (for rate specified within Tx command)
2389 * 3) How long to stay on-channel (behavior may be modified by quiet_time,
2390 * quiet_plcp_th, good_CRC_th)
2391 *
2392 * To avoid uCode errors, make sure the following are true (see comments
2393 * under struct iwl_scan_cmd about max_out_time and quiet_time):
2394 * 1) If using passive_dwell (i.e. passive_dwell != 0):
2395 * active_dwell <= passive_dwell (< max_out_time if max_out_time != 0)
2396 * 2) quiet_time <= active_dwell
2397 * 3) If restricting off-channel time (i.e. max_out_time !=0):
2398 * passive_dwell < max_out_time
2399 * active_dwell < max_out_time
2400 */
2401struct iwl3945_scan_channel {
2402 /*
2403 * type is defined as:
2404 * 0:0 1 = active, 0 = passive
2405 * 1:4 SSID direct bit map; if a bit is set, then corresponding
2406 * SSID IE is transmitted in probe request.
2407 * 5:7 reserved
2408 */
2409 u8 type;
2410 u8 channel; /* band is selected by iwl3945_scan_cmd "flags" field */
2411 struct iwl3945_tx_power tpc;
2412 __le16 active_dwell; /* in 1024-uSec TU (time units), typ 5-50 */
2413 __le16 passive_dwell; /* in 1024-uSec TU (time units), typ 20-500 */
2414} __packed;
2415
2416/* set number of direct probes u8 type */
2417#define IWL39_SCAN_PROBE_MASK(n) ((BIT(n) | (BIT(n) - BIT(1))))
2418
2419struct iwl_scan_channel {
2420 /*
2421 * type is defined as:
2422 * 0:0 1 = active, 0 = passive
2423 * 1:20 SSID direct bit map; if a bit is set, then corresponding
2424 * SSID IE is transmitted in probe request.
2425 * 21:31 reserved
2426 */
2427 __le32 type;
2428 __le16 channel; /* band is selected by iwl_scan_cmd "flags" field */
2429 u8 tx_gain; /* gain for analog radio */
2430 u8 dsp_atten; /* gain for DSP */
2431 __le16 active_dwell; /* in 1024-uSec TU (time units), typ 5-50 */
2432 __le16 passive_dwell; /* in 1024-uSec TU (time units), typ 20-500 */
2433} __packed;
2434
2435/* set number of direct probes __le32 type */
2436#define IWL_SCAN_PROBE_MASK(n) cpu_to_le32((BIT(n) | (BIT(n) - BIT(1))))
2437
2438/**
2439 * struct iwl_ssid_ie - directed scan network information element
2440 *
2441 * Up to 20 of these may appear in REPLY_SCAN_CMD (Note: Only 4 are in
2442 * 3945 SCAN api), selected by "type" bit field in struct iwl_scan_channel;
2443 * each channel may select different ssids from among the 20 (4) entries.
2444 * SSID IEs get transmitted in reverse order of entry.
2445 */
2446struct iwl_ssid_ie {
2447 u8 id;
2448 u8 len;
2449 u8 ssid[32];
2450} __packed;
2451
2452#define PROBE_OPTION_MAX_3945 4
2453#define PROBE_OPTION_MAX 20
2454#define TX_CMD_LIFE_TIME_INFINITE cpu_to_le32(0xFFFFFFFF)
2455#define IWL_GOOD_CRC_TH_DISABLED 0
2456#define IWL_GOOD_CRC_TH_DEFAULT cpu_to_le16(1)
2457#define IWL_GOOD_CRC_TH_NEVER cpu_to_le16(0xffff)
2458#define IWL_MAX_SCAN_SIZE 1024
2459#define IWL_MAX_CMD_SIZE 4096
2460
2461/*
2462 * REPLY_SCAN_CMD = 0x80 (command)
2463 *
2464 * The hardware scan command is very powerful; the driver can set it up to
2465 * maintain (relatively) normal network traffic while doing a scan in the
2466 * background. The max_out_time and suspend_time control the ratio of how
2467 * long the device stays on an associated network channel ("service channel")
2468 * vs. how long it's away from the service channel, i.e. tuned to other channels
2469 * for scanning.
2470 *
2471 * max_out_time is the max time off-channel (in usec), and suspend_time
2472 * is how long (in "extended beacon" format) that the scan is "suspended"
2473 * after returning to the service channel. That is, suspend_time is the
2474 * time that we stay on the service channel, doing normal work, between
2475 * scan segments. The driver may set these parameters differently to support
2476 * scanning when associated vs. not associated, and light vs. heavy traffic
2477 * loads when associated.
2478 *
2479 * After receiving this command, the device's scan engine does the following;
2480 *
2481 * 1) Sends SCAN_START notification to driver
2482 * 2) Checks to see if it has time to do scan for one channel
2483 * 3) Sends NULL packet, with power-save (PS) bit set to 1,
2484 * to tell AP that we're going off-channel
2485 * 4) Tunes to first channel in scan list, does active or passive scan
2486 * 5) Sends SCAN_RESULT notification to driver
2487 * 6) Checks to see if it has time to do scan on *next* channel in list
2488 * 7) Repeats 4-6 until it no longer has time to scan the next channel
2489 * before max_out_time expires
2490 * 8) Returns to service channel
2491 * 9) Sends NULL packet with PS=0 to tell AP that we're back
2492 * 10) Stays on service channel until suspend_time expires
2493 * 11) Repeats entire process 2-10 until list is complete
2494 * 12) Sends SCAN_COMPLETE notification
2495 *
2496 * For fast, efficient scans, the scan command also has support for staying on
2497 * a channel for just a short time, if doing active scanning and getting no
2498 * responses to the transmitted probe request. This time is controlled by
2499 * quiet_time, and the number of received packets below which a channel is
2500 * considered "quiet" is controlled by quiet_plcp_threshold.
2501 *
2502 * For active scanning on channels that have regulatory restrictions against
2503 * blindly transmitting, the scan can listen before transmitting, to make sure
2504 * that there is already legitimate activity on the channel. If enough
2505 * packets are cleanly received on the channel (controlled by good_CRC_th,
2506 * typical value 1), the scan engine starts transmitting probe requests.
2507 *
2508 * Driver must use separate scan commands for 2.4 vs. 5 GHz bands.
2509 *
2510 * To avoid uCode errors, see timing restrictions described under
2511 * struct iwl_scan_channel.
2512 */
2513
2514struct iwl3945_scan_cmd {
2515 __le16 len;
2516 u8 reserved0;
2517 u8 channel_count; /* # channels in channel list */
2518 __le16 quiet_time; /* dwell only this # millisecs on quiet channel
2519 * (only for active scan) */
2520 __le16 quiet_plcp_th; /* quiet chnl is < this # pkts (typ. 1) */
2521 __le16 good_CRC_th; /* passive -> active promotion threshold */
2522 __le16 reserved1;
2523 __le32 max_out_time; /* max usec to be away from associated (service)
2524 * channel */
2525 __le32 suspend_time; /* pause scan this long (in "extended beacon
2526 * format") when returning to service channel:
2527 * 3945; 31:24 # beacons, 19:0 additional usec,
2528 * 4965; 31:22 # beacons, 21:0 additional usec.
2529 */
2530 __le32 flags; /* RXON_FLG_* */
2531 __le32 filter_flags; /* RXON_FILTER_* */
2532
2533 /* For active scans (set to all-0s for passive scans).
2534 * Does not include payload. Must specify Tx rate; no rate scaling. */
2535 struct iwl3945_tx_cmd tx_cmd;
2536
2537 /* For directed active scans (set to all-0s otherwise) */
2538 struct iwl_ssid_ie direct_scan[PROBE_OPTION_MAX_3945];
2539
2540 /*
2541 * Probe request frame, followed by channel list.
2542 *
2543 * Size of probe request frame is specified by byte count in tx_cmd.
2544 * Channel list follows immediately after probe request frame.
2545 * Number of channels in list is specified by channel_count.
2546 * Each channel in list is of type:
2547 *
2548 * struct iwl3945_scan_channel channels[0];
2549 *
2550 * NOTE: Only one band of channels can be scanned per pass. You
2551 * must not mix 2.4GHz channels and 5.2GHz channels, and you must wait
2552 * for one scan to complete (i.e. receive SCAN_COMPLETE_NOTIFICATION)
2553 * before requesting another scan.
2554 */
2555 u8 data[0];
2556} __packed;
2557
2558struct iwl_scan_cmd {
2559 __le16 len;
2560 u8 reserved0;
2561 u8 channel_count; /* # channels in channel list */
2562 __le16 quiet_time; /* dwell only this # millisecs on quiet channel
2563 * (only for active scan) */
2564 __le16 quiet_plcp_th; /* quiet chnl is < this # pkts (typ. 1) */
2565 __le16 good_CRC_th; /* passive -> active promotion threshold */
2566 __le16 rx_chain; /* RXON_RX_CHAIN_* */
2567 __le32 max_out_time; /* max usec to be away from associated (service)
2568 * channel */
2569 __le32 suspend_time; /* pause scan this long (in "extended beacon
2570 * format") when returning to service chnl:
2571 * 3945; 31:24 # beacons, 19:0 additional usec,
2572 * 4965; 31:22 # beacons, 21:0 additional usec.
2573 */
2574 __le32 flags; /* RXON_FLG_* */
2575 __le32 filter_flags; /* RXON_FILTER_* */
2576
2577 /* For active scans (set to all-0s for passive scans).
2578 * Does not include payload. Must specify Tx rate; no rate scaling. */
2579 struct iwl_tx_cmd tx_cmd;
2580
2581 /* For directed active scans (set to all-0s otherwise) */
2582 struct iwl_ssid_ie direct_scan[PROBE_OPTION_MAX];
2583
2584 /*
2585 * Probe request frame, followed by channel list.
2586 *
2587 * Size of probe request frame is specified by byte count in tx_cmd.
2588 * Channel list follows immediately after probe request frame.
2589 * Number of channels in list is specified by channel_count.
2590 * Each channel in list is of type:
2591 *
2592 * struct iwl_scan_channel channels[0];
2593 *
2594 * NOTE: Only one band of channels can be scanned per pass. You
2595 * must not mix 2.4GHz channels and 5.2GHz channels, and you must wait
2596 * for one scan to complete (i.e. receive SCAN_COMPLETE_NOTIFICATION)
2597 * before requesting another scan.
2598 */
2599 u8 data[0];
2600} __packed;
2601
2602/* Can abort will notify by complete notification with abort status. */
2603#define CAN_ABORT_STATUS cpu_to_le32(0x1)
2604/* complete notification statuses */
2605#define ABORT_STATUS 0x2
2606
2607/*
2608 * REPLY_SCAN_CMD = 0x80 (response)
2609 */
2610struct iwl_scanreq_notification {
2611 __le32 status; /* 1: okay, 2: cannot fulfill request */
2612} __packed;
2613
2614/*
2615 * SCAN_START_NOTIFICATION = 0x82 (notification only, not a command)
2616 */
2617struct iwl_scanstart_notification {
2618 __le32 tsf_low;
2619 __le32 tsf_high;
2620 __le32 beacon_timer;
2621 u8 channel;
2622 u8 band;
2623 u8 reserved[2];
2624 __le32 status;
2625} __packed;
2626
2627#define SCAN_OWNER_STATUS 0x1;
2628#define MEASURE_OWNER_STATUS 0x2;
2629
2630#define IWL_PROBE_STATUS_OK 0
2631#define IWL_PROBE_STATUS_TX_FAILED BIT(0)
2632/* error statuses combined with TX_FAILED */
2633#define IWL_PROBE_STATUS_FAIL_TTL BIT(1)
2634#define IWL_PROBE_STATUS_FAIL_BT BIT(2)
2635
2636#define NUMBER_OF_STATISTICS 1 /* first __le32 is good CRC */
2637/*
2638 * SCAN_RESULTS_NOTIFICATION = 0x83 (notification only, not a command)
2639 */
2640struct iwl_scanresults_notification {
2641 u8 channel;
2642 u8 band;
2643 u8 probe_status;
2644 u8 num_probe_not_sent; /* not enough time to send */
2645 __le32 tsf_low;
2646 __le32 tsf_high;
2647 __le32 statistics[NUMBER_OF_STATISTICS];
2648} __packed;
2649
2650/*
2651 * SCAN_COMPLETE_NOTIFICATION = 0x84 (notification only, not a command)
2652 */
2653struct iwl_scancomplete_notification {
2654 u8 scanned_channels;
2655 u8 status;
2656 u8 last_channel;
2657 __le32 tsf_low;
2658 __le32 tsf_high;
2659} __packed;
2660
2661
2662/******************************************************************************
2663 * (9)
2664 * IBSS/AP Commands and Notifications:
2665 *
2666 *****************************************************************************/
2667
2668enum iwl_ibss_manager {
2669 IWL_NOT_IBSS_MANAGER = 0,
2670 IWL_IBSS_MANAGER = 1,
2671};
2672
2673/*
2674 * BEACON_NOTIFICATION = 0x90 (notification only, not a command)
2675 */
2676
2677struct iwl3945_beacon_notif {
2678 struct iwl3945_tx_resp beacon_notify_hdr;
2679 __le32 low_tsf;
2680 __le32 high_tsf;
2681 __le32 ibss_mgr_status;
2682} __packed;
2683
2684struct iwl4965_beacon_notif {
2685 struct iwl4965_tx_resp beacon_notify_hdr;
2686 __le32 low_tsf;
2687 __le32 high_tsf;
2688 __le32 ibss_mgr_status;
2689} __packed;
2690
2691/*
2692 * REPLY_TX_BEACON = 0x91 (command, has simple generic response)
2693 */
2694
2695struct iwl3945_tx_beacon_cmd {
2696 struct iwl3945_tx_cmd tx;
2697 __le16 tim_idx;
2698 u8 tim_size;
2699 u8 reserved1;
2700 struct ieee80211_hdr frame[0]; /* beacon frame */
2701} __packed;
2702
2703struct iwl_tx_beacon_cmd {
2704 struct iwl_tx_cmd tx;
2705 __le16 tim_idx;
2706 u8 tim_size;
2707 u8 reserved1;
2708 struct ieee80211_hdr frame[0]; /* beacon frame */
2709} __packed;
2710
2711/******************************************************************************
2712 * (10)
2713 * Statistics Commands and Notifications:
2714 *
2715 *****************************************************************************/
2716
2717#define IWL_TEMP_CONVERT 260
2718
2719#define SUP_RATE_11A_MAX_NUM_CHANNELS 8
2720#define SUP_RATE_11B_MAX_NUM_CHANNELS 4
2721#define SUP_RATE_11G_MAX_NUM_CHANNELS 12
2722
2723/* Used for passing to driver number of successes and failures per rate */
2724struct rate_histogram {
2725 union {
2726 __le32 a[SUP_RATE_11A_MAX_NUM_CHANNELS];
2727 __le32 b[SUP_RATE_11B_MAX_NUM_CHANNELS];
2728 __le32 g[SUP_RATE_11G_MAX_NUM_CHANNELS];
2729 } success;
2730 union {
2731 __le32 a[SUP_RATE_11A_MAX_NUM_CHANNELS];
2732 __le32 b[SUP_RATE_11B_MAX_NUM_CHANNELS];
2733 __le32 g[SUP_RATE_11G_MAX_NUM_CHANNELS];
2734 } failed;
2735} __packed;
2736
2737/* statistics command response */
2738
2739struct iwl39_statistics_rx_phy {
2740 __le32 ina_cnt;
2741 __le32 fina_cnt;
2742 __le32 plcp_err;
2743 __le32 crc32_err;
2744 __le32 overrun_err;
2745 __le32 early_overrun_err;
2746 __le32 crc32_good;
2747 __le32 false_alarm_cnt;
2748 __le32 fina_sync_err_cnt;
2749 __le32 sfd_timeout;
2750 __le32 fina_timeout;
2751 __le32 unresponded_rts;
2752 __le32 rxe_frame_limit_overrun;
2753 __le32 sent_ack_cnt;
2754 __le32 sent_cts_cnt;
2755} __packed;
2756
2757struct iwl39_statistics_rx_non_phy {
2758 __le32 bogus_cts; /* CTS received when not expecting CTS */
2759 __le32 bogus_ack; /* ACK received when not expecting ACK */
2760 __le32 non_bssid_frames; /* number of frames with BSSID that
2761 * doesn't belong to the STA BSSID */
2762 __le32 filtered_frames; /* count frames that were dumped in the
2763 * filtering process */
2764 __le32 non_channel_beacons; /* beacons with our bss id but not on
2765 * our serving channel */
2766} __packed;
2767
2768struct iwl39_statistics_rx {
2769 struct iwl39_statistics_rx_phy ofdm;
2770 struct iwl39_statistics_rx_phy cck;
2771 struct iwl39_statistics_rx_non_phy general;
2772} __packed;
2773
2774struct iwl39_statistics_tx {
2775 __le32 preamble_cnt;
2776 __le32 rx_detected_cnt;
2777 __le32 bt_prio_defer_cnt;
2778 __le32 bt_prio_kill_cnt;
2779 __le32 few_bytes_cnt;
2780 __le32 cts_timeout;
2781 __le32 ack_timeout;
2782 __le32 expected_ack_cnt;
2783 __le32 actual_ack_cnt;
2784} __packed;
2785
2786struct statistics_dbg {
2787 __le32 burst_check;
2788 __le32 burst_count;
2789 __le32 wait_for_silence_timeout_cnt;
2790 __le32 reserved[3];
2791} __packed;
2792
2793struct iwl39_statistics_div {
2794 __le32 tx_on_a;
2795 __le32 tx_on_b;
2796 __le32 exec_time;
2797 __le32 probe_time;
2798} __packed;
2799
2800struct iwl39_statistics_general {
2801 __le32 temperature;
2802 struct statistics_dbg dbg;
2803 __le32 sleep_time;
2804 __le32 slots_out;
2805 __le32 slots_idle;
2806 __le32 ttl_timestamp;
2807 struct iwl39_statistics_div div;
2808} __packed;
2809
2810struct statistics_rx_phy {
2811 __le32 ina_cnt;
2812 __le32 fina_cnt;
2813 __le32 plcp_err;
2814 __le32 crc32_err;
2815 __le32 overrun_err;
2816 __le32 early_overrun_err;
2817 __le32 crc32_good;
2818 __le32 false_alarm_cnt;
2819 __le32 fina_sync_err_cnt;
2820 __le32 sfd_timeout;
2821 __le32 fina_timeout;
2822 __le32 unresponded_rts;
2823 __le32 rxe_frame_limit_overrun;
2824 __le32 sent_ack_cnt;
2825 __le32 sent_cts_cnt;
2826 __le32 sent_ba_rsp_cnt;
2827 __le32 dsp_self_kill;
2828 __le32 mh_format_err;
2829 __le32 re_acq_main_rssi_sum;
2830 __le32 reserved3;
2831} __packed;
2832
2833struct statistics_rx_ht_phy {
2834 __le32 plcp_err;
2835 __le32 overrun_err;
2836 __le32 early_overrun_err;
2837 __le32 crc32_good;
2838 __le32 crc32_err;
2839 __le32 mh_format_err;
2840 __le32 agg_crc32_good;
2841 __le32 agg_mpdu_cnt;
2842 __le32 agg_cnt;
2843 __le32 unsupport_mcs;
2844} __packed;
2845
2846#define INTERFERENCE_DATA_AVAILABLE cpu_to_le32(1)
2847
2848struct statistics_rx_non_phy {
2849 __le32 bogus_cts; /* CTS received when not expecting CTS */
2850 __le32 bogus_ack; /* ACK received when not expecting ACK */
2851 __le32 non_bssid_frames; /* number of frames with BSSID that
2852 * doesn't belong to the STA BSSID */
2853 __le32 filtered_frames; /* count frames that were dumped in the
2854 * filtering process */
2855 __le32 non_channel_beacons; /* beacons with our bss id but not on
2856 * our serving channel */
2857 __le32 channel_beacons; /* beacons with our bss id and in our
2858 * serving channel */
2859 __le32 num_missed_bcon; /* number of missed beacons */
2860 __le32 adc_rx_saturation_time; /* count in 0.8us units the time the
2861 * ADC was in saturation */
2862 __le32 ina_detection_search_time;/* total time (in 0.8us) searched
2863 * for INA */
2864 __le32 beacon_silence_rssi_a; /* RSSI silence after beacon frame */
2865 __le32 beacon_silence_rssi_b; /* RSSI silence after beacon frame */
2866 __le32 beacon_silence_rssi_c; /* RSSI silence after beacon frame */
2867 __le32 interference_data_flag; /* flag for interference data
2868 * availability. 1 when data is
2869 * available. */
2870 __le32 channel_load; /* counts RX Enable time in uSec */
2871 __le32 dsp_false_alarms; /* DSP false alarm (both OFDM
2872 * and CCK) counter */
2873 __le32 beacon_rssi_a;
2874 __le32 beacon_rssi_b;
2875 __le32 beacon_rssi_c;
2876 __le32 beacon_energy_a;
2877 __le32 beacon_energy_b;
2878 __le32 beacon_energy_c;
2879} __packed;
2880
2881struct statistics_rx {
2882 struct statistics_rx_phy ofdm;
2883 struct statistics_rx_phy cck;
2884 struct statistics_rx_non_phy general;
2885 struct statistics_rx_ht_phy ofdm_ht;
2886} __packed;
2887
2888/**
2889 * struct statistics_tx_power - current tx power
2890 *
2891 * @ant_a: current tx power on chain a in 1/2 dB step
2892 * @ant_b: current tx power on chain b in 1/2 dB step
2893 * @ant_c: current tx power on chain c in 1/2 dB step
2894 */
2895struct statistics_tx_power {
2896 u8 ant_a;
2897 u8 ant_b;
2898 u8 ant_c;
2899 u8 reserved;
2900} __packed;
2901
2902struct statistics_tx_non_phy_agg {
2903 __le32 ba_timeout;
2904 __le32 ba_reschedule_frames;
2905 __le32 scd_query_agg_frame_cnt;
2906 __le32 scd_query_no_agg;
2907 __le32 scd_query_agg;
2908 __le32 scd_query_mismatch;
2909 __le32 frame_not_ready;
2910 __le32 underrun;
2911 __le32 bt_prio_kill;
2912 __le32 rx_ba_rsp_cnt;
2913} __packed;
2914
2915struct statistics_tx {
2916 __le32 preamble_cnt;
2917 __le32 rx_detected_cnt;
2918 __le32 bt_prio_defer_cnt;
2919 __le32 bt_prio_kill_cnt;
2920 __le32 few_bytes_cnt;
2921 __le32 cts_timeout;
2922 __le32 ack_timeout;
2923 __le32 expected_ack_cnt;
2924 __le32 actual_ack_cnt;
2925 __le32 dump_msdu_cnt;
2926 __le32 burst_abort_next_frame_mismatch_cnt;
2927 __le32 burst_abort_missing_next_frame_cnt;
2928 __le32 cts_timeout_collision;
2929 __le32 ack_or_ba_timeout_collision;
2930 struct statistics_tx_non_phy_agg agg;
2931
2932 __le32 reserved1;
2933} __packed;
2934
2935
2936struct statistics_div {
2937 __le32 tx_on_a;
2938 __le32 tx_on_b;
2939 __le32 exec_time;
2940 __le32 probe_time;
2941 __le32 reserved1;
2942 __le32 reserved2;
2943} __packed;
2944
2945struct statistics_general_common {
2946 __le32 temperature; /* radio temperature */
2947 struct statistics_dbg dbg;
2948 __le32 sleep_time;
2949 __le32 slots_out;
2950 __le32 slots_idle;
2951 __le32 ttl_timestamp;
2952 struct statistics_div div;
2953 __le32 rx_enable_counter;
2954 /*
2955 * num_of_sos_states:
2956 * count the number of times we have to re-tune
2957 * in order to get out of bad PHY status
2958 */
2959 __le32 num_of_sos_states;
2960} __packed;
2961
2962struct statistics_general {
2963 struct statistics_general_common common;
2964 __le32 reserved2;
2965 __le32 reserved3;
2966} __packed;
2967
2968#define UCODE_STATISTICS_CLEAR_MSK (0x1 << 0)
2969#define UCODE_STATISTICS_FREQUENCY_MSK (0x1 << 1)
2970#define UCODE_STATISTICS_NARROW_BAND_MSK (0x1 << 2)
2971
2972/*
2973 * REPLY_STATISTICS_CMD = 0x9c,
2974 * all devices identical.
2975 *
2976 * This command triggers an immediate response containing uCode statistics.
2977 * The response is in the same format as STATISTICS_NOTIFICATION 0x9d, below.
2978 *
2979 * If the CLEAR_STATS configuration flag is set, uCode will clear its
2980 * internal copy of the statistics (counters) after issuing the response.
2981 * This flag does not affect STATISTICS_NOTIFICATIONs after beacons (see below).
2982 *
2983 * If the DISABLE_NOTIF configuration flag is set, uCode will not issue
2984 * STATISTICS_NOTIFICATIONs after received beacons (see below). This flag
2985 * does not affect the response to the REPLY_STATISTICS_CMD 0x9c itself.
2986 */
2987#define IWL_STATS_CONF_CLEAR_STATS cpu_to_le32(0x1) /* see above */
2988#define IWL_STATS_CONF_DISABLE_NOTIF cpu_to_le32(0x2)/* see above */
2989struct iwl_statistics_cmd {
2990 __le32 configuration_flags; /* IWL_STATS_CONF_* */
2991} __packed;
2992
2993/*
2994 * STATISTICS_NOTIFICATION = 0x9d (notification only, not a command)
2995 *
2996 * By default, uCode issues this notification after receiving a beacon
2997 * while associated. To disable this behavior, set DISABLE_NOTIF flag in the
2998 * REPLY_STATISTICS_CMD 0x9c, above.
2999 *
3000 * Statistics counters continue to increment beacon after beacon, but are
3001 * cleared when changing channels or when driver issues REPLY_STATISTICS_CMD
3002 * 0x9c with CLEAR_STATS bit set (see above).
3003 *
3004 * uCode also issues this notification during scans. uCode clears statistics
3005 * appropriately so that each notification contains statistics for only the
3006 * one channel that has just been scanned.
3007 */
3008#define STATISTICS_REPLY_FLG_BAND_24G_MSK cpu_to_le32(0x2)
3009#define STATISTICS_REPLY_FLG_HT40_MODE_MSK cpu_to_le32(0x8)
3010
3011struct iwl3945_notif_statistics {
3012 __le32 flag;
3013 struct iwl39_statistics_rx rx;
3014 struct iwl39_statistics_tx tx;
3015 struct iwl39_statistics_general general;
3016} __packed;
3017
3018struct iwl_notif_statistics {
3019 __le32 flag;
3020 struct statistics_rx rx;
3021 struct statistics_tx tx;
3022 struct statistics_general general;
3023} __packed;
3024
3025/*
3026 * MISSED_BEACONS_NOTIFICATION = 0xa2 (notification only, not a command)
3027 *
3028 * uCode send MISSED_BEACONS_NOTIFICATION to driver when detect beacon missed
3029 * in regardless of how many missed beacons, which mean when driver receive the
3030 * notification, inside the command, it can find all the beacons information
3031 * which include number of total missed beacons, number of consecutive missed
3032 * beacons, number of beacons received and number of beacons expected to
3033 * receive.
3034 *
3035 * If uCode detected consecutive_missed_beacons > 5, it will reset the radio
3036 * in order to bring the radio/PHY back to working state; which has no relation
3037 * to when driver will perform sensitivity calibration.
3038 *
3039 * Driver should set it own missed_beacon_threshold to decide when to perform
3040 * sensitivity calibration based on number of consecutive missed beacons in
3041 * order to improve overall performance, especially in noisy environment.
3042 *
3043 */
3044
3045#define IWL_MISSED_BEACON_THRESHOLD_MIN (1)
3046#define IWL_MISSED_BEACON_THRESHOLD_DEF (5)
3047#define IWL_MISSED_BEACON_THRESHOLD_MAX IWL_MISSED_BEACON_THRESHOLD_DEF
3048
3049struct iwl_missed_beacon_notif {
3050 __le32 consecutive_missed_beacons;
3051 __le32 total_missed_becons;
3052 __le32 num_expected_beacons;
3053 __le32 num_recvd_beacons;
3054} __packed;
3055
3056
3057/******************************************************************************
3058 * (11)
3059 * Rx Calibration Commands:
3060 *
3061 * With the uCode used for open source drivers, most Tx calibration (except
3062 * for Tx Power) and most Rx calibration is done by uCode during the
3063 * "initialize" phase of uCode boot. Driver must calibrate only:
3064 *
3065 * 1) Tx power (depends on temperature), described elsewhere
3066 * 2) Receiver gain balance (optimize MIMO, and detect disconnected antennas)
3067 * 3) Receiver sensitivity (to optimize signal detection)
3068 *
3069 *****************************************************************************/
3070
3071/**
3072 * SENSITIVITY_CMD = 0xa8 (command, has simple generic response)
3073 *
3074 * This command sets up the Rx signal detector for a sensitivity level that
3075 * is high enough to lock onto all signals within the associated network,
3076 * but low enough to ignore signals that are below a certain threshold, so as
3077 * not to have too many "false alarms". False alarms are signals that the
3078 * Rx DSP tries to lock onto, but then discards after determining that they
3079 * are noise.
3080 *
3081 * The optimum number of false alarms is between 5 and 50 per 200 TUs
3082 * (200 * 1024 uSecs, i.e. 204.8 milliseconds) of actual Rx time (i.e.
3083 * time listening, not transmitting). Driver must adjust sensitivity so that
3084 * the ratio of actual false alarms to actual Rx time falls within this range.
3085 *
3086 * While associated, uCode delivers STATISTICS_NOTIFICATIONs after each
3087 * received beacon. These provide information to the driver to analyze the
3088 * sensitivity. Don't analyze statistics that come in from scanning, or any
3089 * other non-associated-network source. Pertinent statistics include:
3090 *
3091 * From "general" statistics (struct statistics_rx_non_phy):
3092 *
3093 * (beacon_energy_[abc] & 0x0FF00) >> 8 (unsigned, higher value is lower level)
3094 * Measure of energy of desired signal. Used for establishing a level
3095 * below which the device does not detect signals.
3096 *
3097 * (beacon_silence_rssi_[abc] & 0x0FF00) >> 8 (unsigned, units in dB)
3098 * Measure of background noise in silent period after beacon.
3099 *
3100 * channel_load
3101 * uSecs of actual Rx time during beacon period (varies according to
3102 * how much time was spent transmitting).
3103 *
3104 * From "cck" and "ofdm" statistics (struct statistics_rx_phy), separately:
3105 *
3106 * false_alarm_cnt
3107 * Signal locks abandoned early (before phy-level header).
3108 *
3109 * plcp_err
3110 * Signal locks abandoned late (during phy-level header).
3111 *
3112 * NOTE: Both false_alarm_cnt and plcp_err increment monotonically from
3113 * beacon to beacon, i.e. each value is an accumulation of all errors
3114 * before and including the latest beacon. Values will wrap around to 0
3115 * after counting up to 2^32 - 1. Driver must differentiate vs.
3116 * previous beacon's values to determine # false alarms in the current
3117 * beacon period.
3118 *
3119 * Total number of false alarms = false_alarms + plcp_errs
3120 *
3121 * For OFDM, adjust the following table entries in struct iwl_sensitivity_cmd
3122 * (notice that the start points for OFDM are at or close to settings for
3123 * maximum sensitivity):
3124 *
3125 * START / MIN / MAX
3126 * HD_AUTO_CORR32_X1_TH_ADD_MIN_INDEX 90 / 85 / 120
3127 * HD_AUTO_CORR32_X1_TH_ADD_MIN_MRC_INDEX 170 / 170 / 210
3128 * HD_AUTO_CORR32_X4_TH_ADD_MIN_INDEX 105 / 105 / 140
3129 * HD_AUTO_CORR32_X4_TH_ADD_MIN_MRC_INDEX 220 / 220 / 270
3130 *
3131 * If actual rate of OFDM false alarms (+ plcp_errors) is too high
3132 * (greater than 50 for each 204.8 msecs listening), reduce sensitivity
3133 * by *adding* 1 to all 4 of the table entries above, up to the max for
3134 * each entry. Conversely, if false alarm rate is too low (less than 5
3135 * for each 204.8 msecs listening), *subtract* 1 from each entry to
3136 * increase sensitivity.
3137 *
3138 * For CCK sensitivity, keep track of the following:
3139 *
3140 * 1). 20-beacon history of maximum background noise, indicated by
3141 * (beacon_silence_rssi_[abc] & 0x0FF00), units in dB, across the
3142 * 3 receivers. For any given beacon, the "silence reference" is
3143 * the maximum of last 60 samples (20 beacons * 3 receivers).
3144 *
3145 * 2). 10-beacon history of strongest signal level, as indicated
3146 * by (beacon_energy_[abc] & 0x0FF00) >> 8, across the 3 receivers,
3147 * i.e. the strength of the signal through the best receiver at the
3148 * moment. These measurements are "upside down", with lower values
3149 * for stronger signals, so max energy will be *minimum* value.
3150 *
3151 * Then for any given beacon, the driver must determine the *weakest*
3152 * of the strongest signals; this is the minimum level that needs to be
3153 * successfully detected, when using the best receiver at the moment.
3154 * "Max cck energy" is the maximum (higher value means lower energy!)
3155 * of the last 10 minima. Once this is determined, driver must add
3156 * a little margin by adding "6" to it.
3157 *
3158 * 3). Number of consecutive beacon periods with too few false alarms.
3159 * Reset this to 0 at the first beacon period that falls within the
3160 * "good" range (5 to 50 false alarms per 204.8 milliseconds rx).
3161 *
3162 * Then, adjust the following CCK table entries in struct iwl_sensitivity_cmd
3163 * (notice that the start points for CCK are at maximum sensitivity):
3164 *
3165 * START / MIN / MAX
3166 * HD_AUTO_CORR40_X4_TH_ADD_MIN_INDEX 125 / 125 / 200
3167 * HD_AUTO_CORR40_X4_TH_ADD_MIN_MRC_INDEX 200 / 200 / 400
3168 * HD_MIN_ENERGY_CCK_DET_INDEX 100 / 0 / 100
3169 *
3170 * If actual rate of CCK false alarms (+ plcp_errors) is too high
3171 * (greater than 50 for each 204.8 msecs listening), method for reducing
3172 * sensitivity is:
3173 *
3174 * 1) *Add* 3 to value in HD_AUTO_CORR40_X4_TH_ADD_MIN_MRC_INDEX,
3175 * up to max 400.
3176 *
3177 * 2) If current value in HD_AUTO_CORR40_X4_TH_ADD_MIN_INDEX is < 160,
3178 * sensitivity has been reduced a significant amount; bring it up to
3179 * a moderate 161. Otherwise, *add* 3, up to max 200.
3180 *
3181 * 3) a) If current value in HD_AUTO_CORR40_X4_TH_ADD_MIN_INDEX is > 160,
3182 * sensitivity has been reduced only a moderate or small amount;
3183 * *subtract* 2 from value in HD_MIN_ENERGY_CCK_DET_INDEX,
3184 * down to min 0. Otherwise (if gain has been significantly reduced),
3185 * don't change the HD_MIN_ENERGY_CCK_DET_INDEX value.
3186 *
3187 * b) Save a snapshot of the "silence reference".
3188 *
3189 * If actual rate of CCK false alarms (+ plcp_errors) is too low
3190 * (less than 5 for each 204.8 msecs listening), method for increasing
3191 * sensitivity is used only if:
3192 *
3193 * 1a) Previous beacon did not have too many false alarms
3194 * 1b) AND difference between previous "silence reference" and current
3195 * "silence reference" (prev - current) is 2 or more,
3196 * OR 2) 100 or more consecutive beacon periods have had rate of
3197 * less than 5 false alarms per 204.8 milliseconds rx time.
3198 *
3199 * Method for increasing sensitivity:
3200 *
3201 * 1) *Subtract* 3 from value in HD_AUTO_CORR40_X4_TH_ADD_MIN_INDEX,
3202 * down to min 125.
3203 *
3204 * 2) *Subtract* 3 from value in HD_AUTO_CORR40_X4_TH_ADD_MIN_MRC_INDEX,
3205 * down to min 200.
3206 *
3207 * 3) *Add* 2 to value in HD_MIN_ENERGY_CCK_DET_INDEX, up to max 100.
3208 *
3209 * If actual rate of CCK false alarms (+ plcp_errors) is within good range
3210 * (between 5 and 50 for each 204.8 msecs listening):
3211 *
3212 * 1) Save a snapshot of the silence reference.
3213 *
3214 * 2) If previous beacon had too many CCK false alarms (+ plcp_errors),
3215 * give some extra margin to energy threshold by *subtracting* 8
3216 * from value in HD_MIN_ENERGY_CCK_DET_INDEX.
3217 *
3218 * For all cases (too few, too many, good range), make sure that the CCK
3219 * detection threshold (energy) is below the energy level for robust
3220 * detection over the past 10 beacon periods, the "Max cck energy".
3221 * Lower values mean higher energy; this means making sure that the value
3222 * in HD_MIN_ENERGY_CCK_DET_INDEX is at or *above* "Max cck energy".
3223 *
3224 */
3225
3226/*
3227 * Table entries in SENSITIVITY_CMD (struct iwl_sensitivity_cmd)
3228 */
3229#define HD_TABLE_SIZE (11) /* number of entries */
3230#define HD_MIN_ENERGY_CCK_DET_INDEX (0) /* table indexes */
3231#define HD_MIN_ENERGY_OFDM_DET_INDEX (1)
3232#define HD_AUTO_CORR32_X1_TH_ADD_MIN_INDEX (2)
3233#define HD_AUTO_CORR32_X1_TH_ADD_MIN_MRC_INDEX (3)
3234#define HD_AUTO_CORR40_X4_TH_ADD_MIN_MRC_INDEX (4)
3235#define HD_AUTO_CORR32_X4_TH_ADD_MIN_INDEX (5)
3236#define HD_AUTO_CORR32_X4_TH_ADD_MIN_MRC_INDEX (6)
3237#define HD_BARKER_CORR_TH_ADD_MIN_INDEX (7)
3238#define HD_BARKER_CORR_TH_ADD_MIN_MRC_INDEX (8)
3239#define HD_AUTO_CORR40_X4_TH_ADD_MIN_INDEX (9)
3240#define HD_OFDM_ENERGY_TH_IN_INDEX (10)
3241
3242/* Control field in struct iwl_sensitivity_cmd */
3243#define SENSITIVITY_CMD_CONTROL_DEFAULT_TABLE cpu_to_le16(0)
3244#define SENSITIVITY_CMD_CONTROL_WORK_TABLE cpu_to_le16(1)
3245
3246/**
3247 * struct iwl_sensitivity_cmd
3248 * @control: (1) updates working table, (0) updates default table
3249 * @table: energy threshold values, use HD_* as index into table
3250 *
3251 * Always use "1" in "control" to update uCode's working table and DSP.
3252 */
3253struct iwl_sensitivity_cmd {
3254 __le16 control; /* always use "1" */
3255 __le16 table[HD_TABLE_SIZE]; /* use HD_* as index */
3256} __packed;
3257
3258
3259/**
3260 * REPLY_PHY_CALIBRATION_CMD = 0xb0 (command, has simple generic response)
3261 *
3262 * This command sets the relative gains of 4965 device's 3 radio receiver chains.
3263 *
3264 * After the first association, driver should accumulate signal and noise
3265 * statistics from the STATISTICS_NOTIFICATIONs that follow the first 20
3266 * beacons from the associated network (don't collect statistics that come
3267 * in from scanning, or any other non-network source).
3268 *
3269 * DISCONNECTED ANTENNA:
3270 *
3271 * Driver should determine which antennas are actually connected, by comparing
3272 * average beacon signal levels for the 3 Rx chains. Accumulate (add) the
3273 * following values over 20 beacons, one accumulator for each of the chains
3274 * a/b/c, from struct statistics_rx_non_phy:
3275 *
3276 * beacon_rssi_[abc] & 0x0FF (unsigned, units in dB)
3277 *
3278 * Find the strongest signal from among a/b/c. Compare the other two to the
3279 * strongest. If any signal is more than 15 dB (times 20, unless you
3280 * divide the accumulated values by 20) below the strongest, the driver
3281 * considers that antenna to be disconnected, and should not try to use that
3282 * antenna/chain for Rx or Tx. If both A and B seem to be disconnected,
3283 * driver should declare the stronger one as connected, and attempt to use it
3284 * (A and B are the only 2 Tx chains!).
3285 *
3286 *
3287 * RX BALANCE:
3288 *
3289 * Driver should balance the 3 receivers (but just the ones that are connected
3290 * to antennas, see above) for gain, by comparing the average signal levels
3291 * detected during the silence after each beacon (background noise).
3292 * Accumulate (add) the following values over 20 beacons, one accumulator for
3293 * each of the chains a/b/c, from struct statistics_rx_non_phy:
3294 *
3295 * beacon_silence_rssi_[abc] & 0x0FF (unsigned, units in dB)
3296 *
3297 * Find the weakest background noise level from among a/b/c. This Rx chain
3298 * will be the reference, with 0 gain adjustment. Attenuate other channels by
3299 * finding noise difference:
3300 *
3301 * (accum_noise[i] - accum_noise[reference]) / 30
3302 *
3303 * The "30" adjusts the dB in the 20 accumulated samples to units of 1.5 dB.
3304 * For use in diff_gain_[abc] fields of struct iwl_calibration_cmd, the
3305 * driver should limit the difference results to a range of 0-3 (0-4.5 dB),
3306 * and set bit 2 to indicate "reduce gain". The value for the reference
3307 * (weakest) chain should be "0".
3308 *
3309 * diff_gain_[abc] bit fields:
3310 * 2: (1) reduce gain, (0) increase gain
3311 * 1-0: amount of gain, units of 1.5 dB
3312 */
3313
3314/* Phy calibration command for series */
3315/* The default calibrate table size if not specified by firmware */
3316#define IWL_DEFAULT_STANDARD_PHY_CALIBRATE_TBL_SIZE 18
3317enum {
3318 IWL_PHY_CALIBRATE_DIFF_GAIN_CMD = 7,
3319 IWL_MAX_STANDARD_PHY_CALIBRATE_TBL_SIZE = 19,
3320};
3321
3322#define IWL_MAX_PHY_CALIBRATE_TBL_SIZE (253)
3323
3324struct iwl_calib_hdr {
3325 u8 op_code;
3326 u8 first_group;
3327 u8 groups_num;
3328 u8 data_valid;
3329} __packed;
3330
3331/* IWL_PHY_CALIBRATE_DIFF_GAIN_CMD (7) */
3332struct iwl_calib_diff_gain_cmd {
3333 struct iwl_calib_hdr hdr;
3334 s8 diff_gain_a; /* see above */
3335 s8 diff_gain_b;
3336 s8 diff_gain_c;
3337 u8 reserved1;
3338} __packed;
3339
3340/******************************************************************************
3341 * (12)
3342 * Miscellaneous Commands:
3343 *
3344 *****************************************************************************/
3345
3346/*
3347 * LEDs Command & Response
3348 * REPLY_LEDS_CMD = 0x48 (command, has simple generic response)
3349 *
3350 * For each of 3 possible LEDs (Activity/Link/Tech, selected by "id" field),
3351 * this command turns it on or off, or sets up a periodic blinking cycle.
3352 */
3353struct iwl_led_cmd {
3354 __le32 interval; /* "interval" in uSec */
3355 u8 id; /* 1: Activity, 2: Link, 3: Tech */
3356 u8 off; /* # intervals off while blinking;
3357 * "0", with >0 "on" value, turns LED on */
3358 u8 on; /* # intervals on while blinking;
3359 * "0", regardless of "off", turns LED off */
3360 u8 reserved;
3361} __packed;
3362
3363
3364/******************************************************************************
3365 * (13)
3366 * Union of all expected notifications/responses:
3367 *
3368 *****************************************************************************/
3369
3370struct iwl_rx_packet {
3371 /*
3372 * The first 4 bytes of the RX frame header contain both the RX frame
3373 * size and some flags.
3374 * Bit fields:
3375 * 31: flag flush RB request
3376 * 30: flag ignore TC (terminal counter) request
3377 * 29: flag fast IRQ request
3378 * 28-14: Reserved
3379 * 13-00: RX frame size
3380 */
3381 __le32 len_n_flags;
3382 struct iwl_cmd_header hdr;
3383 union {
3384 struct iwl3945_rx_frame rx_frame;
3385 struct iwl3945_tx_resp tx_resp;
3386 struct iwl3945_beacon_notif beacon_status;
3387
3388 struct iwl_alive_resp alive_frame;
3389 struct iwl_spectrum_notification spectrum_notif;
3390 struct iwl_csa_notification csa_notif;
3391 struct iwl_error_resp err_resp;
3392 struct iwl_card_state_notif card_state_notif;
3393 struct iwl_add_sta_resp add_sta;
3394 struct iwl_rem_sta_resp rem_sta;
3395 struct iwl_sleep_notification sleep_notif;
3396 struct iwl_spectrum_resp spectrum;
3397 struct iwl_notif_statistics stats;
3398 struct iwl_compressed_ba_resp compressed_ba;
3399 struct iwl_missed_beacon_notif missed_beacon;
3400 __le32 status;
3401 u8 raw[0];
3402 } u;
3403} __packed;
3404
3405#endif /* __iwl_legacy_commands_h__ */
diff --git a/drivers/net/wireless/iwlegacy/iwl-core.c b/drivers/net/wireless/iwlegacy/iwl-core.c
new file mode 100644
index 000000000000..c95c3bcb724d
--- /dev/null
+++ b/drivers/net/wireless/iwlegacy/iwl-core.c
@@ -0,0 +1,2668 @@
1/******************************************************************************
2 *
3 * GPL LICENSE SUMMARY
4 *
5 * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of version 2 of the GNU General Public License as
9 * published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful, but
12 * WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
19 * USA
20 *
21 * The full GNU General Public License is included in this distribution
22 * in the file called LICENSE.GPL.
23 *
24 * Contact Information:
25 * Intel Linux Wireless <ilw@linux.intel.com>
26 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
27 *****************************************************************************/
28
29#include <linux/kernel.h>
30#include <linux/module.h>
31#include <linux/etherdevice.h>
32#include <linux/sched.h>
33#include <linux/slab.h>
34#include <net/mac80211.h>
35
36#include "iwl-eeprom.h"
37#include "iwl-dev.h"
38#include "iwl-debug.h"
39#include "iwl-core.h"
40#include "iwl-io.h"
41#include "iwl-power.h"
42#include "iwl-sta.h"
43#include "iwl-helpers.h"
44
45
46MODULE_DESCRIPTION("iwl-legacy: common functions for 3945 and 4965");
47MODULE_VERSION(IWLWIFI_VERSION);
48MODULE_AUTHOR(DRV_COPYRIGHT " " DRV_AUTHOR);
49MODULE_LICENSE("GPL");
50
51/*
52 * set bt_coex_active to true, uCode will do kill/defer
53 * every time the priority line is asserted (BT is sending signals on the
54 * priority line in the PCIx).
55 * set bt_coex_active to false, uCode will ignore the BT activity and
56 * perform the normal operation
57 *
58 * User might experience transmit issue on some platform due to WiFi/BT
59 * co-exist problem. The possible behaviors are:
60 * Able to scan and finding all the available AP
61 * Not able to associate with any AP
62 * On those platforms, WiFi communication can be restored by set
63 * "bt_coex_active" module parameter to "false"
64 *
65 * default: bt_coex_active = true (BT_COEX_ENABLE)
66 */
67bool bt_coex_active = true;
68EXPORT_SYMBOL_GPL(bt_coex_active);
69module_param(bt_coex_active, bool, S_IRUGO);
70MODULE_PARM_DESC(bt_coex_active, "enable wifi/bluetooth co-exist");
71
72u32 iwl_debug_level;
73EXPORT_SYMBOL(iwl_debug_level);
74
75const u8 iwl_bcast_addr[ETH_ALEN] = { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF };
76EXPORT_SYMBOL(iwl_bcast_addr);
77
78
79/* This function both allocates and initializes hw and priv. */
80struct ieee80211_hw *iwl_legacy_alloc_all(struct iwl_cfg *cfg)
81{
82 struct iwl_priv *priv;
83 /* mac80211 allocates memory for this device instance, including
84 * space for this driver's private structure */
85 struct ieee80211_hw *hw;
86
87 hw = ieee80211_alloc_hw(sizeof(struct iwl_priv),
88 cfg->ops->ieee80211_ops);
89 if (hw == NULL) {
90 pr_err("%s: Can not allocate network device\n",
91 cfg->name);
92 goto out;
93 }
94
95 priv = hw->priv;
96 priv->hw = hw;
97
98out:
99 return hw;
100}
101EXPORT_SYMBOL(iwl_legacy_alloc_all);
102
103#define MAX_BIT_RATE_40_MHZ 150 /* Mbps */
104#define MAX_BIT_RATE_20_MHZ 72 /* Mbps */
105static void iwl_legacy_init_ht_hw_capab(const struct iwl_priv *priv,
106 struct ieee80211_sta_ht_cap *ht_info,
107 enum ieee80211_band band)
108{
109 u16 max_bit_rate = 0;
110 u8 rx_chains_num = priv->hw_params.rx_chains_num;
111 u8 tx_chains_num = priv->hw_params.tx_chains_num;
112
113 ht_info->cap = 0;
114 memset(&ht_info->mcs, 0, sizeof(ht_info->mcs));
115
116 ht_info->ht_supported = true;
117
118 ht_info->cap |= IEEE80211_HT_CAP_SGI_20;
119 max_bit_rate = MAX_BIT_RATE_20_MHZ;
120 if (priv->hw_params.ht40_channel & BIT(band)) {
121 ht_info->cap |= IEEE80211_HT_CAP_SUP_WIDTH_20_40;
122 ht_info->cap |= IEEE80211_HT_CAP_SGI_40;
123 ht_info->mcs.rx_mask[4] = 0x01;
124 max_bit_rate = MAX_BIT_RATE_40_MHZ;
125 }
126
127 if (priv->cfg->mod_params->amsdu_size_8K)
128 ht_info->cap |= IEEE80211_HT_CAP_MAX_AMSDU;
129
130 ht_info->ampdu_factor = CFG_HT_RX_AMPDU_FACTOR_DEF;
131 ht_info->ampdu_density = CFG_HT_MPDU_DENSITY_DEF;
132
133 ht_info->mcs.rx_mask[0] = 0xFF;
134 if (rx_chains_num >= 2)
135 ht_info->mcs.rx_mask[1] = 0xFF;
136 if (rx_chains_num >= 3)
137 ht_info->mcs.rx_mask[2] = 0xFF;
138
139 /* Highest supported Rx data rate */
140 max_bit_rate *= rx_chains_num;
141 WARN_ON(max_bit_rate & ~IEEE80211_HT_MCS_RX_HIGHEST_MASK);
142 ht_info->mcs.rx_highest = cpu_to_le16(max_bit_rate);
143
144 /* Tx MCS capabilities */
145 ht_info->mcs.tx_params = IEEE80211_HT_MCS_TX_DEFINED;
146 if (tx_chains_num != rx_chains_num) {
147 ht_info->mcs.tx_params |= IEEE80211_HT_MCS_TX_RX_DIFF;
148 ht_info->mcs.tx_params |= ((tx_chains_num - 1) <<
149 IEEE80211_HT_MCS_TX_MAX_STREAMS_SHIFT);
150 }
151}
152
153/**
154 * iwl_legacy_init_geos - Initialize mac80211's geo/channel info based from eeprom
155 */
156int iwl_legacy_init_geos(struct iwl_priv *priv)
157{
158 struct iwl_channel_info *ch;
159 struct ieee80211_supported_band *sband;
160 struct ieee80211_channel *channels;
161 struct ieee80211_channel *geo_ch;
162 struct ieee80211_rate *rates;
163 int i = 0;
164
165 if (priv->bands[IEEE80211_BAND_2GHZ].n_bitrates ||
166 priv->bands[IEEE80211_BAND_5GHZ].n_bitrates) {
167 IWL_DEBUG_INFO(priv, "Geography modes already initialized.\n");
168 set_bit(STATUS_GEO_CONFIGURED, &priv->status);
169 return 0;
170 }
171
172 channels = kzalloc(sizeof(struct ieee80211_channel) *
173 priv->channel_count, GFP_KERNEL);
174 if (!channels)
175 return -ENOMEM;
176
177 rates = kzalloc((sizeof(struct ieee80211_rate) * IWL_RATE_COUNT_LEGACY),
178 GFP_KERNEL);
179 if (!rates) {
180 kfree(channels);
181 return -ENOMEM;
182 }
183
184 /* 5.2GHz channels start after the 2.4GHz channels */
185 sband = &priv->bands[IEEE80211_BAND_5GHZ];
186 sband->channels = &channels[ARRAY_SIZE(iwl_eeprom_band_1)];
187 /* just OFDM */
188 sband->bitrates = &rates[IWL_FIRST_OFDM_RATE];
189 sband->n_bitrates = IWL_RATE_COUNT_LEGACY - IWL_FIRST_OFDM_RATE;
190
191 if (priv->cfg->sku & IWL_SKU_N)
192 iwl_legacy_init_ht_hw_capab(priv, &sband->ht_cap,
193 IEEE80211_BAND_5GHZ);
194
195 sband = &priv->bands[IEEE80211_BAND_2GHZ];
196 sband->channels = channels;
197 /* OFDM & CCK */
198 sband->bitrates = rates;
199 sband->n_bitrates = IWL_RATE_COUNT_LEGACY;
200
201 if (priv->cfg->sku & IWL_SKU_N)
202 iwl_legacy_init_ht_hw_capab(priv, &sband->ht_cap,
203 IEEE80211_BAND_2GHZ);
204
205 priv->ieee_channels = channels;
206 priv->ieee_rates = rates;
207
208 for (i = 0; i < priv->channel_count; i++) {
209 ch = &priv->channel_info[i];
210
211 if (!iwl_legacy_is_channel_valid(ch))
212 continue;
213
214 if (iwl_legacy_is_channel_a_band(ch))
215 sband = &priv->bands[IEEE80211_BAND_5GHZ];
216 else
217 sband = &priv->bands[IEEE80211_BAND_2GHZ];
218
219 geo_ch = &sband->channels[sband->n_channels++];
220
221 geo_ch->center_freq =
222 ieee80211_channel_to_frequency(ch->channel, ch->band);
223 geo_ch->max_power = ch->max_power_avg;
224 geo_ch->max_antenna_gain = 0xff;
225 geo_ch->hw_value = ch->channel;
226
227 if (iwl_legacy_is_channel_valid(ch)) {
228 if (!(ch->flags & EEPROM_CHANNEL_IBSS))
229 geo_ch->flags |= IEEE80211_CHAN_NO_IBSS;
230
231 if (!(ch->flags & EEPROM_CHANNEL_ACTIVE))
232 geo_ch->flags |= IEEE80211_CHAN_PASSIVE_SCAN;
233
234 if (ch->flags & EEPROM_CHANNEL_RADAR)
235 geo_ch->flags |= IEEE80211_CHAN_RADAR;
236
237 geo_ch->flags |= ch->ht40_extension_channel;
238
239 if (ch->max_power_avg > priv->tx_power_device_lmt)
240 priv->tx_power_device_lmt = ch->max_power_avg;
241 } else {
242 geo_ch->flags |= IEEE80211_CHAN_DISABLED;
243 }
244
245 IWL_DEBUG_INFO(priv, "Channel %d Freq=%d[%sGHz] %s flag=0x%X\n",
246 ch->channel, geo_ch->center_freq,
247 iwl_legacy_is_channel_a_band(ch) ? "5.2" : "2.4",
248 geo_ch->flags & IEEE80211_CHAN_DISABLED ?
249 "restricted" : "valid",
250 geo_ch->flags);
251 }
252
253 if ((priv->bands[IEEE80211_BAND_5GHZ].n_channels == 0) &&
254 priv->cfg->sku & IWL_SKU_A) {
255 IWL_INFO(priv, "Incorrectly detected BG card as ABG. "
256 "Please send your PCI ID 0x%04X:0x%04X to maintainer.\n",
257 priv->pci_dev->device,
258 priv->pci_dev->subsystem_device);
259 priv->cfg->sku &= ~IWL_SKU_A;
260 }
261
262 IWL_INFO(priv, "Tunable channels: %d 802.11bg, %d 802.11a channels\n",
263 priv->bands[IEEE80211_BAND_2GHZ].n_channels,
264 priv->bands[IEEE80211_BAND_5GHZ].n_channels);
265
266 set_bit(STATUS_GEO_CONFIGURED, &priv->status);
267
268 return 0;
269}
270EXPORT_SYMBOL(iwl_legacy_init_geos);
271
272/*
273 * iwl_legacy_free_geos - undo allocations in iwl_legacy_init_geos
274 */
275void iwl_legacy_free_geos(struct iwl_priv *priv)
276{
277 kfree(priv->ieee_channels);
278 kfree(priv->ieee_rates);
279 clear_bit(STATUS_GEO_CONFIGURED, &priv->status);
280}
281EXPORT_SYMBOL(iwl_legacy_free_geos);
282
283static bool iwl_legacy_is_channel_extension(struct iwl_priv *priv,
284 enum ieee80211_band band,
285 u16 channel, u8 extension_chan_offset)
286{
287 const struct iwl_channel_info *ch_info;
288
289 ch_info = iwl_legacy_get_channel_info(priv, band, channel);
290 if (!iwl_legacy_is_channel_valid(ch_info))
291 return false;
292
293 if (extension_chan_offset == IEEE80211_HT_PARAM_CHA_SEC_ABOVE)
294 return !(ch_info->ht40_extension_channel &
295 IEEE80211_CHAN_NO_HT40PLUS);
296 else if (extension_chan_offset == IEEE80211_HT_PARAM_CHA_SEC_BELOW)
297 return !(ch_info->ht40_extension_channel &
298 IEEE80211_CHAN_NO_HT40MINUS);
299
300 return false;
301}
302
303bool iwl_legacy_is_ht40_tx_allowed(struct iwl_priv *priv,
304 struct iwl_rxon_context *ctx,
305 struct ieee80211_sta_ht_cap *ht_cap)
306{
307 if (!ctx->ht.enabled || !ctx->ht.is_40mhz)
308 return false;
309
310 /*
311 * We do not check for IEEE80211_HT_CAP_SUP_WIDTH_20_40
312 * the bit will not set if it is pure 40MHz case
313 */
314 if (ht_cap && !ht_cap->ht_supported)
315 return false;
316
317#ifdef CONFIG_IWLWIFI_LEGACY_DEBUGFS
318 if (priv->disable_ht40)
319 return false;
320#endif
321
322 return iwl_legacy_is_channel_extension(priv, priv->band,
323 le16_to_cpu(ctx->staging.channel),
324 ctx->ht.extension_chan_offset);
325}
326EXPORT_SYMBOL(iwl_legacy_is_ht40_tx_allowed);
327
328static u16 iwl_legacy_adjust_beacon_interval(u16 beacon_val, u16 max_beacon_val)
329{
330 u16 new_val;
331 u16 beacon_factor;
332
333 /*
334 * If mac80211 hasn't given us a beacon interval, program
335 * the default into the device.
336 */
337 if (!beacon_val)
338 return DEFAULT_BEACON_INTERVAL;
339
340 /*
341 * If the beacon interval we obtained from the peer
342 * is too large, we'll have to wake up more often
343 * (and in IBSS case, we'll beacon too much)
344 *
345 * For example, if max_beacon_val is 4096, and the
346 * requested beacon interval is 7000, we'll have to
347 * use 3500 to be able to wake up on the beacons.
348 *
349 * This could badly influence beacon detection stats.
350 */
351
352 beacon_factor = (beacon_val + max_beacon_val) / max_beacon_val;
353 new_val = beacon_val / beacon_factor;
354
355 if (!new_val)
356 new_val = max_beacon_val;
357
358 return new_val;
359}
360
361int
362iwl_legacy_send_rxon_timing(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
363{
364 u64 tsf;
365 s32 interval_tm, rem;
366 struct ieee80211_conf *conf = NULL;
367 u16 beacon_int;
368 struct ieee80211_vif *vif = ctx->vif;
369
370 conf = iwl_legacy_ieee80211_get_hw_conf(priv->hw);
371
372 lockdep_assert_held(&priv->mutex);
373
374 memset(&ctx->timing, 0, sizeof(struct iwl_rxon_time_cmd));
375
376 ctx->timing.timestamp = cpu_to_le64(priv->timestamp);
377 ctx->timing.listen_interval = cpu_to_le16(conf->listen_interval);
378
379 beacon_int = vif ? vif->bss_conf.beacon_int : 0;
380
381 /*
382 * TODO: For IBSS we need to get atim_window from mac80211,
383 * for now just always use 0
384 */
385 ctx->timing.atim_window = 0;
386
387 beacon_int = iwl_legacy_adjust_beacon_interval(beacon_int,
388 priv->hw_params.max_beacon_itrvl * TIME_UNIT);
389 ctx->timing.beacon_interval = cpu_to_le16(beacon_int);
390
391 tsf = priv->timestamp; /* tsf is modifed by do_div: copy it */
392 interval_tm = beacon_int * TIME_UNIT;
393 rem = do_div(tsf, interval_tm);
394 ctx->timing.beacon_init_val = cpu_to_le32(interval_tm - rem);
395
396 ctx->timing.dtim_period = vif ? (vif->bss_conf.dtim_period ?: 1) : 1;
397
398 IWL_DEBUG_ASSOC(priv,
399 "beacon interval %d beacon timer %d beacon tim %d\n",
400 le16_to_cpu(ctx->timing.beacon_interval),
401 le32_to_cpu(ctx->timing.beacon_init_val),
402 le16_to_cpu(ctx->timing.atim_window));
403
404 return iwl_legacy_send_cmd_pdu(priv, ctx->rxon_timing_cmd,
405 sizeof(ctx->timing), &ctx->timing);
406}
407EXPORT_SYMBOL(iwl_legacy_send_rxon_timing);
408
409void
410iwl_legacy_set_rxon_hwcrypto(struct iwl_priv *priv,
411 struct iwl_rxon_context *ctx,
412 int hw_decrypt)
413{
414 struct iwl_legacy_rxon_cmd *rxon = &ctx->staging;
415
416 if (hw_decrypt)
417 rxon->filter_flags &= ~RXON_FILTER_DIS_DECRYPT_MSK;
418 else
419 rxon->filter_flags |= RXON_FILTER_DIS_DECRYPT_MSK;
420
421}
422EXPORT_SYMBOL(iwl_legacy_set_rxon_hwcrypto);
423
424/* validate RXON structure is valid */
425int
426iwl_legacy_check_rxon_cmd(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
427{
428 struct iwl_legacy_rxon_cmd *rxon = &ctx->staging;
429 bool error = false;
430
431 if (rxon->flags & RXON_FLG_BAND_24G_MSK) {
432 if (rxon->flags & RXON_FLG_TGJ_NARROW_BAND_MSK) {
433 IWL_WARN(priv, "check 2.4G: wrong narrow\n");
434 error = true;
435 }
436 if (rxon->flags & RXON_FLG_RADAR_DETECT_MSK) {
437 IWL_WARN(priv, "check 2.4G: wrong radar\n");
438 error = true;
439 }
440 } else {
441 if (!(rxon->flags & RXON_FLG_SHORT_SLOT_MSK)) {
442 IWL_WARN(priv, "check 5.2G: not short slot!\n");
443 error = true;
444 }
445 if (rxon->flags & RXON_FLG_CCK_MSK) {
446 IWL_WARN(priv, "check 5.2G: CCK!\n");
447 error = true;
448 }
449 }
450 if ((rxon->node_addr[0] | rxon->bssid_addr[0]) & 0x1) {
451 IWL_WARN(priv, "mac/bssid mcast!\n");
452 error = true;
453 }
454
455 /* make sure basic rates 6Mbps and 1Mbps are supported */
456 if ((rxon->ofdm_basic_rates & IWL_RATE_6M_MASK) == 0 &&
457 (rxon->cck_basic_rates & IWL_RATE_1M_MASK) == 0) {
458 IWL_WARN(priv, "neither 1 nor 6 are basic\n");
459 error = true;
460 }
461
462 if (le16_to_cpu(rxon->assoc_id) > 2007) {
463 IWL_WARN(priv, "aid > 2007\n");
464 error = true;
465 }
466
467 if ((rxon->flags & (RXON_FLG_CCK_MSK | RXON_FLG_SHORT_SLOT_MSK))
468 == (RXON_FLG_CCK_MSK | RXON_FLG_SHORT_SLOT_MSK)) {
469 IWL_WARN(priv, "CCK and short slot\n");
470 error = true;
471 }
472
473 if ((rxon->flags & (RXON_FLG_CCK_MSK | RXON_FLG_AUTO_DETECT_MSK))
474 == (RXON_FLG_CCK_MSK | RXON_FLG_AUTO_DETECT_MSK)) {
475 IWL_WARN(priv, "CCK and auto detect");
476 error = true;
477 }
478
479 if ((rxon->flags & (RXON_FLG_AUTO_DETECT_MSK |
480 RXON_FLG_TGG_PROTECT_MSK)) ==
481 RXON_FLG_TGG_PROTECT_MSK) {
482 IWL_WARN(priv, "TGg but no auto-detect\n");
483 error = true;
484 }
485
486 if (error)
487 IWL_WARN(priv, "Tuning to channel %d\n",
488 le16_to_cpu(rxon->channel));
489
490 if (error) {
491 IWL_ERR(priv, "Invalid RXON\n");
492 return -EINVAL;
493 }
494 return 0;
495}
496EXPORT_SYMBOL(iwl_legacy_check_rxon_cmd);
497
498/**
499 * iwl_legacy_full_rxon_required - check if full RXON (vs RXON_ASSOC) cmd is needed
500 * @priv: staging_rxon is compared to active_rxon
501 *
502 * If the RXON structure is changing enough to require a new tune,
503 * or is clearing the RXON_FILTER_ASSOC_MSK, then return 1 to indicate that
504 * a new tune (full RXON command, rather than RXON_ASSOC cmd) is required.
505 */
506int iwl_legacy_full_rxon_required(struct iwl_priv *priv,
507 struct iwl_rxon_context *ctx)
508{
509 const struct iwl_legacy_rxon_cmd *staging = &ctx->staging;
510 const struct iwl_legacy_rxon_cmd *active = &ctx->active;
511
512#define CHK(cond) \
513 if ((cond)) { \
514 IWL_DEBUG_INFO(priv, "need full RXON - " #cond "\n"); \
515 return 1; \
516 }
517
518#define CHK_NEQ(c1, c2) \
519 if ((c1) != (c2)) { \
520 IWL_DEBUG_INFO(priv, "need full RXON - " \
521 #c1 " != " #c2 " - %d != %d\n", \
522 (c1), (c2)); \
523 return 1; \
524 }
525
526 /* These items are only settable from the full RXON command */
527 CHK(!iwl_legacy_is_associated_ctx(ctx));
528 CHK(compare_ether_addr(staging->bssid_addr, active->bssid_addr));
529 CHK(compare_ether_addr(staging->node_addr, active->node_addr));
530 CHK(compare_ether_addr(staging->wlap_bssid_addr,
531 active->wlap_bssid_addr));
532 CHK_NEQ(staging->dev_type, active->dev_type);
533 CHK_NEQ(staging->channel, active->channel);
534 CHK_NEQ(staging->air_propagation, active->air_propagation);
535 CHK_NEQ(staging->ofdm_ht_single_stream_basic_rates,
536 active->ofdm_ht_single_stream_basic_rates);
537 CHK_NEQ(staging->ofdm_ht_dual_stream_basic_rates,
538 active->ofdm_ht_dual_stream_basic_rates);
539 CHK_NEQ(staging->assoc_id, active->assoc_id);
540
541 /* flags, filter_flags, ofdm_basic_rates, and cck_basic_rates can
542 * be updated with the RXON_ASSOC command -- however only some
543 * flag transitions are allowed using RXON_ASSOC */
544
545 /* Check if we are not switching bands */
546 CHK_NEQ(staging->flags & RXON_FLG_BAND_24G_MSK,
547 active->flags & RXON_FLG_BAND_24G_MSK);
548
549 /* Check if we are switching association toggle */
550 CHK_NEQ(staging->filter_flags & RXON_FILTER_ASSOC_MSK,
551 active->filter_flags & RXON_FILTER_ASSOC_MSK);
552
553#undef CHK
554#undef CHK_NEQ
555
556 return 0;
557}
558EXPORT_SYMBOL(iwl_legacy_full_rxon_required);
559
560u8 iwl_legacy_get_lowest_plcp(struct iwl_priv *priv,
561 struct iwl_rxon_context *ctx)
562{
563 /*
564 * Assign the lowest rate -- should really get this from
565 * the beacon skb from mac80211.
566 */
567 if (ctx->staging.flags & RXON_FLG_BAND_24G_MSK)
568 return IWL_RATE_1M_PLCP;
569 else
570 return IWL_RATE_6M_PLCP;
571}
572EXPORT_SYMBOL(iwl_legacy_get_lowest_plcp);
573
574static void _iwl_legacy_set_rxon_ht(struct iwl_priv *priv,
575 struct iwl_ht_config *ht_conf,
576 struct iwl_rxon_context *ctx)
577{
578 struct iwl_legacy_rxon_cmd *rxon = &ctx->staging;
579
580 if (!ctx->ht.enabled) {
581 rxon->flags &= ~(RXON_FLG_CHANNEL_MODE_MSK |
582 RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK |
583 RXON_FLG_HT40_PROT_MSK |
584 RXON_FLG_HT_PROT_MSK);
585 return;
586 }
587
588 rxon->flags |= cpu_to_le32(ctx->ht.protection <<
589 RXON_FLG_HT_OPERATING_MODE_POS);
590
591 /* Set up channel bandwidth:
592 * 20 MHz only, 20/40 mixed or pure 40 if ht40 ok */
593 /* clear the HT channel mode before set the mode */
594 rxon->flags &= ~(RXON_FLG_CHANNEL_MODE_MSK |
595 RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK);
596 if (iwl_legacy_is_ht40_tx_allowed(priv, ctx, NULL)) {
597 /* pure ht40 */
598 if (ctx->ht.protection ==
599 IEEE80211_HT_OP_MODE_PROTECTION_20MHZ) {
600 rxon->flags |= RXON_FLG_CHANNEL_MODE_PURE_40;
601 /* Note: control channel is opposite of extension channel */
602 switch (ctx->ht.extension_chan_offset) {
603 case IEEE80211_HT_PARAM_CHA_SEC_ABOVE:
604 rxon->flags &=
605 ~RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK;
606 break;
607 case IEEE80211_HT_PARAM_CHA_SEC_BELOW:
608 rxon->flags |=
609 RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK;
610 break;
611 }
612 } else {
613 /* Note: control channel is opposite of extension channel */
614 switch (ctx->ht.extension_chan_offset) {
615 case IEEE80211_HT_PARAM_CHA_SEC_ABOVE:
616 rxon->flags &=
617 ~(RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK);
618 rxon->flags |= RXON_FLG_CHANNEL_MODE_MIXED;
619 break;
620 case IEEE80211_HT_PARAM_CHA_SEC_BELOW:
621 rxon->flags |=
622 RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK;
623 rxon->flags |= RXON_FLG_CHANNEL_MODE_MIXED;
624 break;
625 case IEEE80211_HT_PARAM_CHA_SEC_NONE:
626 default:
627 /* channel location only valid if in Mixed mode */
628 IWL_ERR(priv,
629 "invalid extension channel offset\n");
630 break;
631 }
632 }
633 } else {
634 rxon->flags |= RXON_FLG_CHANNEL_MODE_LEGACY;
635 }
636
637 if (priv->cfg->ops->hcmd->set_rxon_chain)
638 priv->cfg->ops->hcmd->set_rxon_chain(priv, ctx);
639
640 IWL_DEBUG_ASSOC(priv, "rxon flags 0x%X operation mode :0x%X "
641 "extension channel offset 0x%x\n",
642 le32_to_cpu(rxon->flags), ctx->ht.protection,
643 ctx->ht.extension_chan_offset);
644}
645
646void iwl_legacy_set_rxon_ht(struct iwl_priv *priv, struct iwl_ht_config *ht_conf)
647{
648 struct iwl_rxon_context *ctx;
649
650 for_each_context(priv, ctx)
651 _iwl_legacy_set_rxon_ht(priv, ht_conf, ctx);
652}
653EXPORT_SYMBOL(iwl_legacy_set_rxon_ht);
654
655/* Return valid, unused, channel for a passive scan to reset the RF */
656u8 iwl_legacy_get_single_channel_number(struct iwl_priv *priv,
657 enum ieee80211_band band)
658{
659 const struct iwl_channel_info *ch_info;
660 int i;
661 u8 channel = 0;
662 u8 min, max;
663 struct iwl_rxon_context *ctx;
664
665 if (band == IEEE80211_BAND_5GHZ) {
666 min = 14;
667 max = priv->channel_count;
668 } else {
669 min = 0;
670 max = 14;
671 }
672
673 for (i = min; i < max; i++) {
674 bool busy = false;
675
676 for_each_context(priv, ctx) {
677 busy = priv->channel_info[i].channel ==
678 le16_to_cpu(ctx->staging.channel);
679 if (busy)
680 break;
681 }
682
683 if (busy)
684 continue;
685
686 channel = priv->channel_info[i].channel;
687 ch_info = iwl_legacy_get_channel_info(priv, band, channel);
688 if (iwl_legacy_is_channel_valid(ch_info))
689 break;
690 }
691
692 return channel;
693}
694EXPORT_SYMBOL(iwl_legacy_get_single_channel_number);
695
696/**
697 * iwl_legacy_set_rxon_channel - Set the band and channel values in staging RXON
698 * @ch: requested channel as a pointer to struct ieee80211_channel
699
700 * NOTE: Does not commit to the hardware; it sets appropriate bit fields
701 * in the staging RXON flag structure based on the ch->band
702 */
703int
704iwl_legacy_set_rxon_channel(struct iwl_priv *priv, struct ieee80211_channel *ch,
705 struct iwl_rxon_context *ctx)
706{
707 enum ieee80211_band band = ch->band;
708 u16 channel = ch->hw_value;
709
710 if ((le16_to_cpu(ctx->staging.channel) == channel) &&
711 (priv->band == band))
712 return 0;
713
714 ctx->staging.channel = cpu_to_le16(channel);
715 if (band == IEEE80211_BAND_5GHZ)
716 ctx->staging.flags &= ~RXON_FLG_BAND_24G_MSK;
717 else
718 ctx->staging.flags |= RXON_FLG_BAND_24G_MSK;
719
720 priv->band = band;
721
722 IWL_DEBUG_INFO(priv, "Staging channel set to %d [%d]\n", channel, band);
723
724 return 0;
725}
726EXPORT_SYMBOL(iwl_legacy_set_rxon_channel);
727
728void iwl_legacy_set_flags_for_band(struct iwl_priv *priv,
729 struct iwl_rxon_context *ctx,
730 enum ieee80211_band band,
731 struct ieee80211_vif *vif)
732{
733 if (band == IEEE80211_BAND_5GHZ) {
734 ctx->staging.flags &=
735 ~(RXON_FLG_BAND_24G_MSK | RXON_FLG_AUTO_DETECT_MSK
736 | RXON_FLG_CCK_MSK);
737 ctx->staging.flags |= RXON_FLG_SHORT_SLOT_MSK;
738 } else {
739 /* Copied from iwl_post_associate() */
740 if (vif && vif->bss_conf.use_short_slot)
741 ctx->staging.flags |= RXON_FLG_SHORT_SLOT_MSK;
742 else
743 ctx->staging.flags &= ~RXON_FLG_SHORT_SLOT_MSK;
744
745 ctx->staging.flags |= RXON_FLG_BAND_24G_MSK;
746 ctx->staging.flags |= RXON_FLG_AUTO_DETECT_MSK;
747 ctx->staging.flags &= ~RXON_FLG_CCK_MSK;
748 }
749}
750EXPORT_SYMBOL(iwl_legacy_set_flags_for_band);
751
752/*
753 * initialize rxon structure with default values from eeprom
754 */
755void iwl_legacy_connection_init_rx_config(struct iwl_priv *priv,
756 struct iwl_rxon_context *ctx)
757{
758 const struct iwl_channel_info *ch_info;
759
760 memset(&ctx->staging, 0, sizeof(ctx->staging));
761
762 if (!ctx->vif) {
763 ctx->staging.dev_type = ctx->unused_devtype;
764 } else
765 switch (ctx->vif->type) {
766
767 case NL80211_IFTYPE_STATION:
768 ctx->staging.dev_type = ctx->station_devtype;
769 ctx->staging.filter_flags = RXON_FILTER_ACCEPT_GRP_MSK;
770 break;
771
772 case NL80211_IFTYPE_ADHOC:
773 ctx->staging.dev_type = ctx->ibss_devtype;
774 ctx->staging.flags = RXON_FLG_SHORT_PREAMBLE_MSK;
775 ctx->staging.filter_flags = RXON_FILTER_BCON_AWARE_MSK |
776 RXON_FILTER_ACCEPT_GRP_MSK;
777 break;
778
779 default:
780 IWL_ERR(priv, "Unsupported interface type %d\n",
781 ctx->vif->type);
782 break;
783 }
784
785#if 0
786 /* TODO: Figure out when short_preamble would be set and cache from
787 * that */
788 if (!hw_to_local(priv->hw)->short_preamble)
789 ctx->staging.flags &= ~RXON_FLG_SHORT_PREAMBLE_MSK;
790 else
791 ctx->staging.flags |= RXON_FLG_SHORT_PREAMBLE_MSK;
792#endif
793
794 ch_info = iwl_legacy_get_channel_info(priv, priv->band,
795 le16_to_cpu(ctx->active.channel));
796
797 if (!ch_info)
798 ch_info = &priv->channel_info[0];
799
800 ctx->staging.channel = cpu_to_le16(ch_info->channel);
801 priv->band = ch_info->band;
802
803 iwl_legacy_set_flags_for_band(priv, ctx, priv->band, ctx->vif);
804
805 ctx->staging.ofdm_basic_rates =
806 (IWL_OFDM_RATES_MASK >> IWL_FIRST_OFDM_RATE) & 0xFF;
807 ctx->staging.cck_basic_rates =
808 (IWL_CCK_RATES_MASK >> IWL_FIRST_CCK_RATE) & 0xF;
809
810 /* clear both MIX and PURE40 mode flag */
811 ctx->staging.flags &= ~(RXON_FLG_CHANNEL_MODE_MIXED |
812 RXON_FLG_CHANNEL_MODE_PURE_40);
813 if (ctx->vif)
814 memcpy(ctx->staging.node_addr, ctx->vif->addr, ETH_ALEN);
815
816 ctx->staging.ofdm_ht_single_stream_basic_rates = 0xff;
817 ctx->staging.ofdm_ht_dual_stream_basic_rates = 0xff;
818}
819EXPORT_SYMBOL(iwl_legacy_connection_init_rx_config);
820
821void iwl_legacy_set_rate(struct iwl_priv *priv)
822{
823 const struct ieee80211_supported_band *hw = NULL;
824 struct ieee80211_rate *rate;
825 struct iwl_rxon_context *ctx;
826 int i;
827
828 hw = iwl_get_hw_mode(priv, priv->band);
829 if (!hw) {
830 IWL_ERR(priv, "Failed to set rate: unable to get hw mode\n");
831 return;
832 }
833
834 priv->active_rate = 0;
835
836 for (i = 0; i < hw->n_bitrates; i++) {
837 rate = &(hw->bitrates[i]);
838 if (rate->hw_value < IWL_RATE_COUNT_LEGACY)
839 priv->active_rate |= (1 << rate->hw_value);
840 }
841
842 IWL_DEBUG_RATE(priv, "Set active_rate = %0x\n", priv->active_rate);
843
844 for_each_context(priv, ctx) {
845 ctx->staging.cck_basic_rates =
846 (IWL_CCK_BASIC_RATES_MASK >> IWL_FIRST_CCK_RATE) & 0xF;
847
848 ctx->staging.ofdm_basic_rates =
849 (IWL_OFDM_BASIC_RATES_MASK >> IWL_FIRST_OFDM_RATE) & 0xFF;
850 }
851}
852EXPORT_SYMBOL(iwl_legacy_set_rate);
853
854void iwl_legacy_chswitch_done(struct iwl_priv *priv, bool is_success)
855{
856 struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
857
858 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
859 return;
860
861 if (priv->switch_rxon.switch_in_progress) {
862 ieee80211_chswitch_done(ctx->vif, is_success);
863 mutex_lock(&priv->mutex);
864 priv->switch_rxon.switch_in_progress = false;
865 mutex_unlock(&priv->mutex);
866 }
867}
868EXPORT_SYMBOL(iwl_legacy_chswitch_done);
869
870void iwl_legacy_rx_csa(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb)
871{
872 struct iwl_rx_packet *pkt = rxb_addr(rxb);
873 struct iwl_csa_notification *csa = &(pkt->u.csa_notif);
874
875 struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
876 struct iwl_legacy_rxon_cmd *rxon = (void *)&ctx->active;
877
878 if (priv->switch_rxon.switch_in_progress) {
879 if (!le32_to_cpu(csa->status) &&
880 (csa->channel == priv->switch_rxon.channel)) {
881 rxon->channel = csa->channel;
882 ctx->staging.channel = csa->channel;
883 IWL_DEBUG_11H(priv, "CSA notif: channel %d\n",
884 le16_to_cpu(csa->channel));
885 iwl_legacy_chswitch_done(priv, true);
886 } else {
887 IWL_ERR(priv, "CSA notif (fail) : channel %d\n",
888 le16_to_cpu(csa->channel));
889 iwl_legacy_chswitch_done(priv, false);
890 }
891 }
892}
893EXPORT_SYMBOL(iwl_legacy_rx_csa);
894
895#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
896void iwl_legacy_print_rx_config_cmd(struct iwl_priv *priv,
897 struct iwl_rxon_context *ctx)
898{
899 struct iwl_legacy_rxon_cmd *rxon = &ctx->staging;
900
901 IWL_DEBUG_RADIO(priv, "RX CONFIG:\n");
902 iwl_print_hex_dump(priv, IWL_DL_RADIO, (u8 *) rxon, sizeof(*rxon));
903 IWL_DEBUG_RADIO(priv, "u16 channel: 0x%x\n",
904 le16_to_cpu(rxon->channel));
905 IWL_DEBUG_RADIO(priv, "u32 flags: 0x%08X\n", le32_to_cpu(rxon->flags));
906 IWL_DEBUG_RADIO(priv, "u32 filter_flags: 0x%08x\n",
907 le32_to_cpu(rxon->filter_flags));
908 IWL_DEBUG_RADIO(priv, "u8 dev_type: 0x%x\n", rxon->dev_type);
909 IWL_DEBUG_RADIO(priv, "u8 ofdm_basic_rates: 0x%02x\n",
910 rxon->ofdm_basic_rates);
911 IWL_DEBUG_RADIO(priv, "u8 cck_basic_rates: 0x%02x\n",
912 rxon->cck_basic_rates);
913 IWL_DEBUG_RADIO(priv, "u8[6] node_addr: %pM\n", rxon->node_addr);
914 IWL_DEBUG_RADIO(priv, "u8[6] bssid_addr: %pM\n", rxon->bssid_addr);
915 IWL_DEBUG_RADIO(priv, "u16 assoc_id: 0x%x\n",
916 le16_to_cpu(rxon->assoc_id));
917}
918EXPORT_SYMBOL(iwl_legacy_print_rx_config_cmd);
919#endif
920/**
921 * iwl_legacy_irq_handle_error - called for HW or SW error interrupt from card
922 */
923void iwl_legacy_irq_handle_error(struct iwl_priv *priv)
924{
925 /* Set the FW error flag -- cleared on iwl_down */
926 set_bit(STATUS_FW_ERROR, &priv->status);
927
928 /* Cancel currently queued command. */
929 clear_bit(STATUS_HCMD_ACTIVE, &priv->status);
930
931 IWL_ERR(priv, "Loaded firmware version: %s\n",
932 priv->hw->wiphy->fw_version);
933
934 priv->cfg->ops->lib->dump_nic_error_log(priv);
935 if (priv->cfg->ops->lib->dump_fh)
936 priv->cfg->ops->lib->dump_fh(priv, NULL, false);
937 priv->cfg->ops->lib->dump_nic_event_log(priv, false, NULL, false);
938#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
939 if (iwl_legacy_get_debug_level(priv) & IWL_DL_FW_ERRORS)
940 iwl_legacy_print_rx_config_cmd(priv,
941 &priv->contexts[IWL_RXON_CTX_BSS]);
942#endif
943
944 wake_up_interruptible(&priv->wait_command_queue);
945
946 /* Keep the restart process from trying to send host
947 * commands by clearing the INIT status bit */
948 clear_bit(STATUS_READY, &priv->status);
949
950 if (!test_bit(STATUS_EXIT_PENDING, &priv->status)) {
951 IWL_DEBUG(priv, IWL_DL_FW_ERRORS,
952 "Restarting adapter due to uCode error.\n");
953
954 if (priv->cfg->mod_params->restart_fw)
955 queue_work(priv->workqueue, &priv->restart);
956 }
957}
958EXPORT_SYMBOL(iwl_legacy_irq_handle_error);
959
960static int iwl_legacy_apm_stop_master(struct iwl_priv *priv)
961{
962 int ret = 0;
963
964 /* stop device's busmaster DMA activity */
965 iwl_legacy_set_bit(priv, CSR_RESET, CSR_RESET_REG_FLAG_STOP_MASTER);
966
967 ret = iwl_poll_bit(priv, CSR_RESET, CSR_RESET_REG_FLAG_MASTER_DISABLED,
968 CSR_RESET_REG_FLAG_MASTER_DISABLED, 100);
969 if (ret)
970 IWL_WARN(priv, "Master Disable Timed Out, 100 usec\n");
971
972 IWL_DEBUG_INFO(priv, "stop master\n");
973
974 return ret;
975}
976
977void iwl_legacy_apm_stop(struct iwl_priv *priv)
978{
979 IWL_DEBUG_INFO(priv, "Stop card, put in low power state\n");
980
981 /* Stop device's DMA activity */
982 iwl_legacy_apm_stop_master(priv);
983
984 /* Reset the entire device */
985 iwl_legacy_set_bit(priv, CSR_RESET, CSR_RESET_REG_FLAG_SW_RESET);
986
987 udelay(10);
988
989 /*
990 * Clear "initialization complete" bit to move adapter from
991 * D0A* (powered-up Active) --> D0U* (Uninitialized) state.
992 */
993 iwl_legacy_clear_bit(priv, CSR_GP_CNTRL,
994 CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
995}
996EXPORT_SYMBOL(iwl_legacy_apm_stop);
997
998
999/*
1000 * Start up NIC's basic functionality after it has been reset
1001 * (e.g. after platform boot, or shutdown via iwl_legacy_apm_stop())
1002 * NOTE: This does not load uCode nor start the embedded processor
1003 */
1004int iwl_legacy_apm_init(struct iwl_priv *priv)
1005{
1006 int ret = 0;
1007 u16 lctl;
1008
1009 IWL_DEBUG_INFO(priv, "Init card's basic functions\n");
1010
1011 /*
1012 * Use "set_bit" below rather than "write", to preserve any hardware
1013 * bits already set by default after reset.
1014 */
1015
1016 /* Disable L0S exit timer (platform NMI Work/Around) */
1017 iwl_legacy_set_bit(priv, CSR_GIO_CHICKEN_BITS,
1018 CSR_GIO_CHICKEN_BITS_REG_BIT_DIS_L0S_EXIT_TIMER);
1019
1020 /*
1021 * Disable L0s without affecting L1;
1022 * don't wait for ICH L0s (ICH bug W/A)
1023 */
1024 iwl_legacy_set_bit(priv, CSR_GIO_CHICKEN_BITS,
1025 CSR_GIO_CHICKEN_BITS_REG_BIT_L1A_NO_L0S_RX);
1026
1027 /* Set FH wait threshold to maximum (HW error during stress W/A) */
1028 iwl_legacy_set_bit(priv, CSR_DBG_HPET_MEM_REG,
1029 CSR_DBG_HPET_MEM_REG_VAL);
1030
1031 /*
1032 * Enable HAP INTA (interrupt from management bus) to
1033 * wake device's PCI Express link L1a -> L0s
1034 * NOTE: This is no-op for 3945 (non-existant bit)
1035 */
1036 iwl_legacy_set_bit(priv, CSR_HW_IF_CONFIG_REG,
1037 CSR_HW_IF_CONFIG_REG_BIT_HAP_WAKE_L1A);
1038
1039 /*
1040 * HW bug W/A for instability in PCIe bus L0->L0S->L1 transition.
1041 * Check if BIOS (or OS) enabled L1-ASPM on this device.
1042 * If so (likely), disable L0S, so device moves directly L0->L1;
1043 * costs negligible amount of power savings.
1044 * If not (unlikely), enable L0S, so there is at least some
1045 * power savings, even without L1.
1046 */
1047 if (priv->cfg->base_params->set_l0s) {
1048 lctl = iwl_legacy_pcie_link_ctl(priv);
1049 if ((lctl & PCI_CFG_LINK_CTRL_VAL_L1_EN) ==
1050 PCI_CFG_LINK_CTRL_VAL_L1_EN) {
1051 /* L1-ASPM enabled; disable(!) L0S */
1052 iwl_legacy_set_bit(priv, CSR_GIO_REG,
1053 CSR_GIO_REG_VAL_L0S_ENABLED);
1054 IWL_DEBUG_POWER(priv, "L1 Enabled; Disabling L0S\n");
1055 } else {
1056 /* L1-ASPM disabled; enable(!) L0S */
1057 iwl_legacy_clear_bit(priv, CSR_GIO_REG,
1058 CSR_GIO_REG_VAL_L0S_ENABLED);
1059 IWL_DEBUG_POWER(priv, "L1 Disabled; Enabling L0S\n");
1060 }
1061 }
1062
1063 /* Configure analog phase-lock-loop before activating to D0A */
1064 if (priv->cfg->base_params->pll_cfg_val)
1065 iwl_legacy_set_bit(priv, CSR_ANA_PLL_CFG,
1066 priv->cfg->base_params->pll_cfg_val);
1067
1068 /*
1069 * Set "initialization complete" bit to move adapter from
1070 * D0U* --> D0A* (powered-up active) state.
1071 */
1072 iwl_legacy_set_bit(priv, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
1073
1074 /*
1075 * Wait for clock stabilization; once stabilized, access to
1076 * device-internal resources is supported, e.g. iwl_legacy_write_prph()
1077 * and accesses to uCode SRAM.
1078 */
1079 ret = iwl_poll_bit(priv, CSR_GP_CNTRL,
1080 CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
1081 CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY, 25000);
1082 if (ret < 0) {
1083 IWL_DEBUG_INFO(priv, "Failed to init the card\n");
1084 goto out;
1085 }
1086
1087 /*
1088 * Enable DMA and BSM (if used) clocks, wait for them to stabilize.
1089 * BSM (Boostrap State Machine) is only in 3945 and 4965.
1090 *
1091 * Write to "CLK_EN_REG"; "1" bits enable clocks, while "0" bits
1092 * do not disable clocks. This preserves any hardware bits already
1093 * set by default in "CLK_CTRL_REG" after reset.
1094 */
1095 if (priv->cfg->base_params->use_bsm)
1096 iwl_legacy_write_prph(priv, APMG_CLK_EN_REG,
1097 APMG_CLK_VAL_DMA_CLK_RQT | APMG_CLK_VAL_BSM_CLK_RQT);
1098 else
1099 iwl_legacy_write_prph(priv, APMG_CLK_EN_REG,
1100 APMG_CLK_VAL_DMA_CLK_RQT);
1101 udelay(20);
1102
1103 /* Disable L1-Active */
1104 iwl_legacy_set_bits_prph(priv, APMG_PCIDEV_STT_REG,
1105 APMG_PCIDEV_STT_VAL_L1_ACT_DIS);
1106
1107out:
1108 return ret;
1109}
1110EXPORT_SYMBOL(iwl_legacy_apm_init);
1111
1112
1113int iwl_legacy_set_tx_power(struct iwl_priv *priv, s8 tx_power, bool force)
1114{
1115 int ret;
1116 s8 prev_tx_power;
1117
1118 lockdep_assert_held(&priv->mutex);
1119
1120 if (priv->tx_power_user_lmt == tx_power && !force)
1121 return 0;
1122
1123 if (!priv->cfg->ops->lib->send_tx_power)
1124 return -EOPNOTSUPP;
1125
1126 if (tx_power < IWL4965_TX_POWER_TARGET_POWER_MIN) {
1127 IWL_WARN(priv,
1128 "Requested user TXPOWER %d below lower limit %d.\n",
1129 tx_power,
1130 IWL4965_TX_POWER_TARGET_POWER_MIN);
1131 return -EINVAL;
1132 }
1133
1134 if (tx_power > priv->tx_power_device_lmt) {
1135 IWL_WARN(priv,
1136 "Requested user TXPOWER %d above upper limit %d.\n",
1137 tx_power, priv->tx_power_device_lmt);
1138 return -EINVAL;
1139 }
1140
1141 if (!iwl_legacy_is_ready_rf(priv))
1142 return -EIO;
1143
1144 /* scan complete use tx_power_next, need to be updated */
1145 priv->tx_power_next = tx_power;
1146 if (test_bit(STATUS_SCANNING, &priv->status) && !force) {
1147 IWL_DEBUG_INFO(priv, "Deferring tx power set while scanning\n");
1148 return 0;
1149 }
1150
1151 prev_tx_power = priv->tx_power_user_lmt;
1152 priv->tx_power_user_lmt = tx_power;
1153
1154 ret = priv->cfg->ops->lib->send_tx_power(priv);
1155
1156 /* if fail to set tx_power, restore the orig. tx power */
1157 if (ret) {
1158 priv->tx_power_user_lmt = prev_tx_power;
1159 priv->tx_power_next = prev_tx_power;
1160 }
1161 return ret;
1162}
1163EXPORT_SYMBOL(iwl_legacy_set_tx_power);
1164
1165void iwl_legacy_send_bt_config(struct iwl_priv *priv)
1166{
1167 struct iwl_bt_cmd bt_cmd = {
1168 .lead_time = BT_LEAD_TIME_DEF,
1169 .max_kill = BT_MAX_KILL_DEF,
1170 .kill_ack_mask = 0,
1171 .kill_cts_mask = 0,
1172 };
1173
1174 if (!bt_coex_active)
1175 bt_cmd.flags = BT_COEX_DISABLE;
1176 else
1177 bt_cmd.flags = BT_COEX_ENABLE;
1178
1179 IWL_DEBUG_INFO(priv, "BT coex %s\n",
1180 (bt_cmd.flags == BT_COEX_DISABLE) ? "disable" : "active");
1181
1182 if (iwl_legacy_send_cmd_pdu(priv, REPLY_BT_CONFIG,
1183 sizeof(struct iwl_bt_cmd), &bt_cmd))
1184 IWL_ERR(priv, "failed to send BT Coex Config\n");
1185}
1186EXPORT_SYMBOL(iwl_legacy_send_bt_config);
1187
1188int iwl_legacy_send_statistics_request(struct iwl_priv *priv, u8 flags, bool clear)
1189{
1190 struct iwl_statistics_cmd statistics_cmd = {
1191 .configuration_flags =
1192 clear ? IWL_STATS_CONF_CLEAR_STATS : 0,
1193 };
1194
1195 if (flags & CMD_ASYNC)
1196 return iwl_legacy_send_cmd_pdu_async(priv, REPLY_STATISTICS_CMD,
1197 sizeof(struct iwl_statistics_cmd),
1198 &statistics_cmd, NULL);
1199 else
1200 return iwl_legacy_send_cmd_pdu(priv, REPLY_STATISTICS_CMD,
1201 sizeof(struct iwl_statistics_cmd),
1202 &statistics_cmd);
1203}
1204EXPORT_SYMBOL(iwl_legacy_send_statistics_request);
1205
1206void iwl_legacy_rx_pm_sleep_notif(struct iwl_priv *priv,
1207 struct iwl_rx_mem_buffer *rxb)
1208{
1209#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
1210 struct iwl_rx_packet *pkt = rxb_addr(rxb);
1211 struct iwl_sleep_notification *sleep = &(pkt->u.sleep_notif);
1212 IWL_DEBUG_RX(priv, "sleep mode: %d, src: %d\n",
1213 sleep->pm_sleep_mode, sleep->pm_wakeup_src);
1214#endif
1215}
1216EXPORT_SYMBOL(iwl_legacy_rx_pm_sleep_notif);
1217
1218void iwl_legacy_rx_pm_debug_statistics_notif(struct iwl_priv *priv,
1219 struct iwl_rx_mem_buffer *rxb)
1220{
1221 struct iwl_rx_packet *pkt = rxb_addr(rxb);
1222 u32 len = le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK;
1223 IWL_DEBUG_RADIO(priv, "Dumping %d bytes of unhandled "
1224 "notification for %s:\n", len,
1225 iwl_legacy_get_cmd_string(pkt->hdr.cmd));
1226 iwl_print_hex_dump(priv, IWL_DL_RADIO, pkt->u.raw, len);
1227}
1228EXPORT_SYMBOL(iwl_legacy_rx_pm_debug_statistics_notif);
1229
1230void iwl_legacy_rx_reply_error(struct iwl_priv *priv,
1231 struct iwl_rx_mem_buffer *rxb)
1232{
1233 struct iwl_rx_packet *pkt = rxb_addr(rxb);
1234
1235 IWL_ERR(priv, "Error Reply type 0x%08X cmd %s (0x%02X) "
1236 "seq 0x%04X ser 0x%08X\n",
1237 le32_to_cpu(pkt->u.err_resp.error_type),
1238 iwl_legacy_get_cmd_string(pkt->u.err_resp.cmd_id),
1239 pkt->u.err_resp.cmd_id,
1240 le16_to_cpu(pkt->u.err_resp.bad_cmd_seq_num),
1241 le32_to_cpu(pkt->u.err_resp.error_info));
1242}
1243EXPORT_SYMBOL(iwl_legacy_rx_reply_error);
1244
1245void iwl_legacy_clear_isr_stats(struct iwl_priv *priv)
1246{
1247 memset(&priv->isr_stats, 0, sizeof(priv->isr_stats));
1248}
1249
1250int iwl_legacy_mac_conf_tx(struct ieee80211_hw *hw, u16 queue,
1251 const struct ieee80211_tx_queue_params *params)
1252{
1253 struct iwl_priv *priv = hw->priv;
1254 struct iwl_rxon_context *ctx;
1255 unsigned long flags;
1256 int q;
1257
1258 IWL_DEBUG_MAC80211(priv, "enter\n");
1259
1260 if (!iwl_legacy_is_ready_rf(priv)) {
1261 IWL_DEBUG_MAC80211(priv, "leave - RF not ready\n");
1262 return -EIO;
1263 }
1264
1265 if (queue >= AC_NUM) {
1266 IWL_DEBUG_MAC80211(priv, "leave - queue >= AC_NUM %d\n", queue);
1267 return 0;
1268 }
1269
1270 q = AC_NUM - 1 - queue;
1271
1272 spin_lock_irqsave(&priv->lock, flags);
1273
1274 for_each_context(priv, ctx) {
1275 ctx->qos_data.def_qos_parm.ac[q].cw_min =
1276 cpu_to_le16(params->cw_min);
1277 ctx->qos_data.def_qos_parm.ac[q].cw_max =
1278 cpu_to_le16(params->cw_max);
1279 ctx->qos_data.def_qos_parm.ac[q].aifsn = params->aifs;
1280 ctx->qos_data.def_qos_parm.ac[q].edca_txop =
1281 cpu_to_le16((params->txop * 32));
1282
1283 ctx->qos_data.def_qos_parm.ac[q].reserved1 = 0;
1284 }
1285
1286 spin_unlock_irqrestore(&priv->lock, flags);
1287
1288 IWL_DEBUG_MAC80211(priv, "leave\n");
1289 return 0;
1290}
1291EXPORT_SYMBOL(iwl_legacy_mac_conf_tx);
1292
1293int iwl_legacy_mac_tx_last_beacon(struct ieee80211_hw *hw)
1294{
1295 struct iwl_priv *priv = hw->priv;
1296
1297 return priv->ibss_manager == IWL_IBSS_MANAGER;
1298}
1299EXPORT_SYMBOL_GPL(iwl_legacy_mac_tx_last_beacon);
1300
1301static int
1302iwl_legacy_set_mode(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
1303{
1304 iwl_legacy_connection_init_rx_config(priv, ctx);
1305
1306 if (priv->cfg->ops->hcmd->set_rxon_chain)
1307 priv->cfg->ops->hcmd->set_rxon_chain(priv, ctx);
1308
1309 return iwl_legacy_commit_rxon(priv, ctx);
1310}
1311
1312static int iwl_legacy_setup_interface(struct iwl_priv *priv,
1313 struct iwl_rxon_context *ctx)
1314{
1315 struct ieee80211_vif *vif = ctx->vif;
1316 int err;
1317
1318 lockdep_assert_held(&priv->mutex);
1319
1320 /*
1321 * This variable will be correct only when there's just
1322 * a single context, but all code using it is for hardware
1323 * that supports only one context.
1324 */
1325 priv->iw_mode = vif->type;
1326
1327 ctx->is_active = true;
1328
1329 err = iwl_legacy_set_mode(priv, ctx);
1330 if (err) {
1331 if (!ctx->always_active)
1332 ctx->is_active = false;
1333 return err;
1334 }
1335
1336 return 0;
1337}
1338
1339int
1340iwl_legacy_mac_add_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
1341{
1342 struct iwl_priv *priv = hw->priv;
1343 struct iwl_vif_priv *vif_priv = (void *)vif->drv_priv;
1344 struct iwl_rxon_context *tmp, *ctx = NULL;
1345 int err;
1346
1347 IWL_DEBUG_MAC80211(priv, "enter: type %d, addr %pM\n",
1348 vif->type, vif->addr);
1349
1350 mutex_lock(&priv->mutex);
1351
1352 if (!iwl_legacy_is_ready_rf(priv)) {
1353 IWL_WARN(priv, "Try to add interface when device not ready\n");
1354 err = -EINVAL;
1355 goto out;
1356 }
1357
1358 for_each_context(priv, tmp) {
1359 u32 possible_modes =
1360 tmp->interface_modes | tmp->exclusive_interface_modes;
1361
1362 if (tmp->vif) {
1363 /* check if this busy context is exclusive */
1364 if (tmp->exclusive_interface_modes &
1365 BIT(tmp->vif->type)) {
1366 err = -EINVAL;
1367 goto out;
1368 }
1369 continue;
1370 }
1371
1372 if (!(possible_modes & BIT(vif->type)))
1373 continue;
1374
1375 /* have maybe usable context w/o interface */
1376 ctx = tmp;
1377 break;
1378 }
1379
1380 if (!ctx) {
1381 err = -EOPNOTSUPP;
1382 goto out;
1383 }
1384
1385 vif_priv->ctx = ctx;
1386 ctx->vif = vif;
1387
1388 err = iwl_legacy_setup_interface(priv, ctx);
1389 if (!err)
1390 goto out;
1391
1392 ctx->vif = NULL;
1393 priv->iw_mode = NL80211_IFTYPE_STATION;
1394 out:
1395 mutex_unlock(&priv->mutex);
1396
1397 IWL_DEBUG_MAC80211(priv, "leave\n");
1398 return err;
1399}
1400EXPORT_SYMBOL(iwl_legacy_mac_add_interface);
1401
1402static void iwl_legacy_teardown_interface(struct iwl_priv *priv,
1403 struct ieee80211_vif *vif,
1404 bool mode_change)
1405{
1406 struct iwl_rxon_context *ctx = iwl_legacy_rxon_ctx_from_vif(vif);
1407
1408 lockdep_assert_held(&priv->mutex);
1409
1410 if (priv->scan_vif == vif) {
1411 iwl_legacy_scan_cancel_timeout(priv, 200);
1412 iwl_legacy_force_scan_end(priv);
1413 }
1414
1415 if (!mode_change) {
1416 iwl_legacy_set_mode(priv, ctx);
1417 if (!ctx->always_active)
1418 ctx->is_active = false;
1419 }
1420}
1421
1422void iwl_legacy_mac_remove_interface(struct ieee80211_hw *hw,
1423 struct ieee80211_vif *vif)
1424{
1425 struct iwl_priv *priv = hw->priv;
1426 struct iwl_rxon_context *ctx = iwl_legacy_rxon_ctx_from_vif(vif);
1427
1428 IWL_DEBUG_MAC80211(priv, "enter\n");
1429
1430 mutex_lock(&priv->mutex);
1431
1432 WARN_ON(ctx->vif != vif);
1433 ctx->vif = NULL;
1434
1435 iwl_legacy_teardown_interface(priv, vif, false);
1436
1437 memset(priv->bssid, 0, ETH_ALEN);
1438 mutex_unlock(&priv->mutex);
1439
1440 IWL_DEBUG_MAC80211(priv, "leave\n");
1441
1442}
1443EXPORT_SYMBOL(iwl_legacy_mac_remove_interface);
1444
1445int iwl_legacy_alloc_txq_mem(struct iwl_priv *priv)
1446{
1447 if (!priv->txq)
1448 priv->txq = kzalloc(
1449 sizeof(struct iwl_tx_queue) *
1450 priv->cfg->base_params->num_of_queues,
1451 GFP_KERNEL);
1452 if (!priv->txq) {
1453 IWL_ERR(priv, "Not enough memory for txq\n");
1454 return -ENOMEM;
1455 }
1456 return 0;
1457}
1458EXPORT_SYMBOL(iwl_legacy_alloc_txq_mem);
1459
1460void iwl_legacy_txq_mem(struct iwl_priv *priv)
1461{
1462 kfree(priv->txq);
1463 priv->txq = NULL;
1464}
1465EXPORT_SYMBOL(iwl_legacy_txq_mem);
1466
1467#ifdef CONFIG_IWLWIFI_LEGACY_DEBUGFS
1468
1469#define IWL_TRAFFIC_DUMP_SIZE (IWL_TRAFFIC_ENTRY_SIZE * IWL_TRAFFIC_ENTRIES)
1470
1471void iwl_legacy_reset_traffic_log(struct iwl_priv *priv)
1472{
1473 priv->tx_traffic_idx = 0;
1474 priv->rx_traffic_idx = 0;
1475 if (priv->tx_traffic)
1476 memset(priv->tx_traffic, 0, IWL_TRAFFIC_DUMP_SIZE);
1477 if (priv->rx_traffic)
1478 memset(priv->rx_traffic, 0, IWL_TRAFFIC_DUMP_SIZE);
1479}
1480
1481int iwl_legacy_alloc_traffic_mem(struct iwl_priv *priv)
1482{
1483 u32 traffic_size = IWL_TRAFFIC_DUMP_SIZE;
1484
1485 if (iwl_debug_level & IWL_DL_TX) {
1486 if (!priv->tx_traffic) {
1487 priv->tx_traffic =
1488 kzalloc(traffic_size, GFP_KERNEL);
1489 if (!priv->tx_traffic)
1490 return -ENOMEM;
1491 }
1492 }
1493 if (iwl_debug_level & IWL_DL_RX) {
1494 if (!priv->rx_traffic) {
1495 priv->rx_traffic =
1496 kzalloc(traffic_size, GFP_KERNEL);
1497 if (!priv->rx_traffic)
1498 return -ENOMEM;
1499 }
1500 }
1501 iwl_legacy_reset_traffic_log(priv);
1502 return 0;
1503}
1504EXPORT_SYMBOL(iwl_legacy_alloc_traffic_mem);
1505
1506void iwl_legacy_free_traffic_mem(struct iwl_priv *priv)
1507{
1508 kfree(priv->tx_traffic);
1509 priv->tx_traffic = NULL;
1510
1511 kfree(priv->rx_traffic);
1512 priv->rx_traffic = NULL;
1513}
1514EXPORT_SYMBOL(iwl_legacy_free_traffic_mem);
1515
1516void iwl_legacy_dbg_log_tx_data_frame(struct iwl_priv *priv,
1517 u16 length, struct ieee80211_hdr *header)
1518{
1519 __le16 fc;
1520 u16 len;
1521
1522 if (likely(!(iwl_debug_level & IWL_DL_TX)))
1523 return;
1524
1525 if (!priv->tx_traffic)
1526 return;
1527
1528 fc = header->frame_control;
1529 if (ieee80211_is_data(fc)) {
1530 len = (length > IWL_TRAFFIC_ENTRY_SIZE)
1531 ? IWL_TRAFFIC_ENTRY_SIZE : length;
1532 memcpy((priv->tx_traffic +
1533 (priv->tx_traffic_idx * IWL_TRAFFIC_ENTRY_SIZE)),
1534 header, len);
1535 priv->tx_traffic_idx =
1536 (priv->tx_traffic_idx + 1) % IWL_TRAFFIC_ENTRIES;
1537 }
1538}
1539EXPORT_SYMBOL(iwl_legacy_dbg_log_tx_data_frame);
1540
1541void iwl_legacy_dbg_log_rx_data_frame(struct iwl_priv *priv,
1542 u16 length, struct ieee80211_hdr *header)
1543{
1544 __le16 fc;
1545 u16 len;
1546
1547 if (likely(!(iwl_debug_level & IWL_DL_RX)))
1548 return;
1549
1550 if (!priv->rx_traffic)
1551 return;
1552
1553 fc = header->frame_control;
1554 if (ieee80211_is_data(fc)) {
1555 len = (length > IWL_TRAFFIC_ENTRY_SIZE)
1556 ? IWL_TRAFFIC_ENTRY_SIZE : length;
1557 memcpy((priv->rx_traffic +
1558 (priv->rx_traffic_idx * IWL_TRAFFIC_ENTRY_SIZE)),
1559 header, len);
1560 priv->rx_traffic_idx =
1561 (priv->rx_traffic_idx + 1) % IWL_TRAFFIC_ENTRIES;
1562 }
1563}
1564EXPORT_SYMBOL(iwl_legacy_dbg_log_rx_data_frame);
1565
1566const char *iwl_legacy_get_mgmt_string(int cmd)
1567{
1568 switch (cmd) {
1569 IWL_CMD(MANAGEMENT_ASSOC_REQ);
1570 IWL_CMD(MANAGEMENT_ASSOC_RESP);
1571 IWL_CMD(MANAGEMENT_REASSOC_REQ);
1572 IWL_CMD(MANAGEMENT_REASSOC_RESP);
1573 IWL_CMD(MANAGEMENT_PROBE_REQ);
1574 IWL_CMD(MANAGEMENT_PROBE_RESP);
1575 IWL_CMD(MANAGEMENT_BEACON);
1576 IWL_CMD(MANAGEMENT_ATIM);
1577 IWL_CMD(MANAGEMENT_DISASSOC);
1578 IWL_CMD(MANAGEMENT_AUTH);
1579 IWL_CMD(MANAGEMENT_DEAUTH);
1580 IWL_CMD(MANAGEMENT_ACTION);
1581 default:
1582 return "UNKNOWN";
1583
1584 }
1585}
1586
1587const char *iwl_legacy_get_ctrl_string(int cmd)
1588{
1589 switch (cmd) {
1590 IWL_CMD(CONTROL_BACK_REQ);
1591 IWL_CMD(CONTROL_BACK);
1592 IWL_CMD(CONTROL_PSPOLL);
1593 IWL_CMD(CONTROL_RTS);
1594 IWL_CMD(CONTROL_CTS);
1595 IWL_CMD(CONTROL_ACK);
1596 IWL_CMD(CONTROL_CFEND);
1597 IWL_CMD(CONTROL_CFENDACK);
1598 default:
1599 return "UNKNOWN";
1600
1601 }
1602}
1603
1604void iwl_legacy_clear_traffic_stats(struct iwl_priv *priv)
1605{
1606 memset(&priv->tx_stats, 0, sizeof(struct traffic_stats));
1607 memset(&priv->rx_stats, 0, sizeof(struct traffic_stats));
1608}
1609
1610/*
1611 * if CONFIG_IWLWIFI_LEGACY_DEBUGFS defined,
1612 * iwl_legacy_update_stats function will
1613 * record all the MGMT, CTRL and DATA pkt for both TX and Rx pass
1614 * Use debugFs to display the rx/rx_statistics
1615 * if CONFIG_IWLWIFI_LEGACY_DEBUGFS not being defined, then no MGMT and CTRL
1616 * information will be recorded, but DATA pkt still will be recorded
1617 * for the reason of iwl_led.c need to control the led blinking based on
1618 * number of tx and rx data.
1619 *
1620 */
1621void
1622iwl_legacy_update_stats(struct iwl_priv *priv, bool is_tx, __le16 fc, u16 len)
1623{
1624 struct traffic_stats *stats;
1625
1626 if (is_tx)
1627 stats = &priv->tx_stats;
1628 else
1629 stats = &priv->rx_stats;
1630
1631 if (ieee80211_is_mgmt(fc)) {
1632 switch (fc & cpu_to_le16(IEEE80211_FCTL_STYPE)) {
1633 case cpu_to_le16(IEEE80211_STYPE_ASSOC_REQ):
1634 stats->mgmt[MANAGEMENT_ASSOC_REQ]++;
1635 break;
1636 case cpu_to_le16(IEEE80211_STYPE_ASSOC_RESP):
1637 stats->mgmt[MANAGEMENT_ASSOC_RESP]++;
1638 break;
1639 case cpu_to_le16(IEEE80211_STYPE_REASSOC_REQ):
1640 stats->mgmt[MANAGEMENT_REASSOC_REQ]++;
1641 break;
1642 case cpu_to_le16(IEEE80211_STYPE_REASSOC_RESP):
1643 stats->mgmt[MANAGEMENT_REASSOC_RESP]++;
1644 break;
1645 case cpu_to_le16(IEEE80211_STYPE_PROBE_REQ):
1646 stats->mgmt[MANAGEMENT_PROBE_REQ]++;
1647 break;
1648 case cpu_to_le16(IEEE80211_STYPE_PROBE_RESP):
1649 stats->mgmt[MANAGEMENT_PROBE_RESP]++;
1650 break;
1651 case cpu_to_le16(IEEE80211_STYPE_BEACON):
1652 stats->mgmt[MANAGEMENT_BEACON]++;
1653 break;
1654 case cpu_to_le16(IEEE80211_STYPE_ATIM):
1655 stats->mgmt[MANAGEMENT_ATIM]++;
1656 break;
1657 case cpu_to_le16(IEEE80211_STYPE_DISASSOC):
1658 stats->mgmt[MANAGEMENT_DISASSOC]++;
1659 break;
1660 case cpu_to_le16(IEEE80211_STYPE_AUTH):
1661 stats->mgmt[MANAGEMENT_AUTH]++;
1662 break;
1663 case cpu_to_le16(IEEE80211_STYPE_DEAUTH):
1664 stats->mgmt[MANAGEMENT_DEAUTH]++;
1665 break;
1666 case cpu_to_le16(IEEE80211_STYPE_ACTION):
1667 stats->mgmt[MANAGEMENT_ACTION]++;
1668 break;
1669 }
1670 } else if (ieee80211_is_ctl(fc)) {
1671 switch (fc & cpu_to_le16(IEEE80211_FCTL_STYPE)) {
1672 case cpu_to_le16(IEEE80211_STYPE_BACK_REQ):
1673 stats->ctrl[CONTROL_BACK_REQ]++;
1674 break;
1675 case cpu_to_le16(IEEE80211_STYPE_BACK):
1676 stats->ctrl[CONTROL_BACK]++;
1677 break;
1678 case cpu_to_le16(IEEE80211_STYPE_PSPOLL):
1679 stats->ctrl[CONTROL_PSPOLL]++;
1680 break;
1681 case cpu_to_le16(IEEE80211_STYPE_RTS):
1682 stats->ctrl[CONTROL_RTS]++;
1683 break;
1684 case cpu_to_le16(IEEE80211_STYPE_CTS):
1685 stats->ctrl[CONTROL_CTS]++;
1686 break;
1687 case cpu_to_le16(IEEE80211_STYPE_ACK):
1688 stats->ctrl[CONTROL_ACK]++;
1689 break;
1690 case cpu_to_le16(IEEE80211_STYPE_CFEND):
1691 stats->ctrl[CONTROL_CFEND]++;
1692 break;
1693 case cpu_to_le16(IEEE80211_STYPE_CFENDACK):
1694 stats->ctrl[CONTROL_CFENDACK]++;
1695 break;
1696 }
1697 } else {
1698 /* data */
1699 stats->data_cnt++;
1700 stats->data_bytes += len;
1701 }
1702}
1703EXPORT_SYMBOL(iwl_legacy_update_stats);
1704#endif
1705
1706static void _iwl_legacy_force_rf_reset(struct iwl_priv *priv)
1707{
1708 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
1709 return;
1710
1711 if (!iwl_legacy_is_any_associated(priv)) {
1712 IWL_DEBUG_SCAN(priv, "force reset rejected: not associated\n");
1713 return;
1714 }
1715 /*
1716 * There is no easy and better way to force reset the radio,
1717 * the only known method is switching channel which will force to
1718 * reset and tune the radio.
1719 * Use internal short scan (single channel) operation to should
1720 * achieve this objective.
1721 * Driver should reset the radio when number of consecutive missed
1722 * beacon, or any other uCode error condition detected.
1723 */
1724 IWL_DEBUG_INFO(priv, "perform radio reset.\n");
1725 iwl_legacy_internal_short_hw_scan(priv);
1726}
1727
1728
1729int iwl_legacy_force_reset(struct iwl_priv *priv, int mode, bool external)
1730{
1731 struct iwl_force_reset *force_reset;
1732
1733 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
1734 return -EINVAL;
1735
1736 if (mode >= IWL_MAX_FORCE_RESET) {
1737 IWL_DEBUG_INFO(priv, "invalid reset request.\n");
1738 return -EINVAL;
1739 }
1740 force_reset = &priv->force_reset[mode];
1741 force_reset->reset_request_count++;
1742 if (!external) {
1743 if (force_reset->last_force_reset_jiffies &&
1744 time_after(force_reset->last_force_reset_jiffies +
1745 force_reset->reset_duration, jiffies)) {
1746 IWL_DEBUG_INFO(priv, "force reset rejected\n");
1747 force_reset->reset_reject_count++;
1748 return -EAGAIN;
1749 }
1750 }
1751 force_reset->reset_success_count++;
1752 force_reset->last_force_reset_jiffies = jiffies;
1753 IWL_DEBUG_INFO(priv, "perform force reset (%d)\n", mode);
1754 switch (mode) {
1755 case IWL_RF_RESET:
1756 _iwl_legacy_force_rf_reset(priv);
1757 break;
1758 case IWL_FW_RESET:
1759 /*
1760 * if the request is from external(ex: debugfs),
1761 * then always perform the request in regardless the module
1762 * parameter setting
1763 * if the request is from internal (uCode error or driver
1764 * detect failure), then fw_restart module parameter
1765 * need to be check before performing firmware reload
1766 */
1767 if (!external && !priv->cfg->mod_params->restart_fw) {
1768 IWL_DEBUG_INFO(priv, "Cancel firmware reload based on "
1769 "module parameter setting\n");
1770 break;
1771 }
1772 IWL_ERR(priv, "On demand firmware reload\n");
1773 /* Set the FW error flag -- cleared on iwl_down */
1774 set_bit(STATUS_FW_ERROR, &priv->status);
1775 wake_up_interruptible(&priv->wait_command_queue);
1776 /*
1777 * Keep the restart process from trying to send host
1778 * commands by clearing the INIT status bit
1779 */
1780 clear_bit(STATUS_READY, &priv->status);
1781 queue_work(priv->workqueue, &priv->restart);
1782 break;
1783 }
1784 return 0;
1785}
1786
1787int
1788iwl_legacy_mac_change_interface(struct ieee80211_hw *hw,
1789 struct ieee80211_vif *vif,
1790 enum nl80211_iftype newtype, bool newp2p)
1791{
1792 struct iwl_priv *priv = hw->priv;
1793 struct iwl_rxon_context *ctx = iwl_legacy_rxon_ctx_from_vif(vif);
1794 struct iwl_rxon_context *tmp;
1795 u32 interface_modes;
1796 int err;
1797
1798 newtype = ieee80211_iftype_p2p(newtype, newp2p);
1799
1800 mutex_lock(&priv->mutex);
1801
1802 interface_modes = ctx->interface_modes | ctx->exclusive_interface_modes;
1803
1804 if (!(interface_modes & BIT(newtype))) {
1805 err = -EBUSY;
1806 goto out;
1807 }
1808
1809 if (ctx->exclusive_interface_modes & BIT(newtype)) {
1810 for_each_context(priv, tmp) {
1811 if (ctx == tmp)
1812 continue;
1813
1814 if (!tmp->vif)
1815 continue;
1816
1817 /*
1818 * The current mode switch would be exclusive, but
1819 * another context is active ... refuse the switch.
1820 */
1821 err = -EBUSY;
1822 goto out;
1823 }
1824 }
1825
1826 /* success */
1827 iwl_legacy_teardown_interface(priv, vif, true);
1828 vif->type = newtype;
1829 err = iwl_legacy_setup_interface(priv, ctx);
1830 WARN_ON(err);
1831 /*
1832 * We've switched internally, but submitting to the
1833 * device may have failed for some reason. Mask this
1834 * error, because otherwise mac80211 will not switch
1835 * (and set the interface type back) and we'll be
1836 * out of sync with it.
1837 */
1838 err = 0;
1839
1840 out:
1841 mutex_unlock(&priv->mutex);
1842 return err;
1843}
1844EXPORT_SYMBOL(iwl_legacy_mac_change_interface);
1845
1846/*
1847 * On every watchdog tick we check (latest) time stamp. If it does not
1848 * change during timeout period and queue is not empty we reset firmware.
1849 */
1850static int iwl_legacy_check_stuck_queue(struct iwl_priv *priv, int cnt)
1851{
1852 struct iwl_tx_queue *txq = &priv->txq[cnt];
1853 struct iwl_queue *q = &txq->q;
1854 unsigned long timeout;
1855 int ret;
1856
1857 if (q->read_ptr == q->write_ptr) {
1858 txq->time_stamp = jiffies;
1859 return 0;
1860 }
1861
1862 timeout = txq->time_stamp +
1863 msecs_to_jiffies(priv->cfg->base_params->wd_timeout);
1864
1865 if (time_after(jiffies, timeout)) {
1866 IWL_ERR(priv, "Queue %d stuck for %u ms.\n",
1867 q->id, priv->cfg->base_params->wd_timeout);
1868 ret = iwl_legacy_force_reset(priv, IWL_FW_RESET, false);
1869 return (ret == -EAGAIN) ? 0 : 1;
1870 }
1871
1872 return 0;
1873}
1874
1875/*
1876 * Making watchdog tick be a quarter of timeout assure we will
1877 * discover the queue hung between timeout and 1.25*timeout
1878 */
1879#define IWL_WD_TICK(timeout) ((timeout) / 4)
1880
1881/*
1882 * Watchdog timer callback, we check each tx queue for stuck, if if hung
1883 * we reset the firmware. If everything is fine just rearm the timer.
1884 */
1885void iwl_legacy_bg_watchdog(unsigned long data)
1886{
1887 struct iwl_priv *priv = (struct iwl_priv *)data;
1888 int cnt;
1889 unsigned long timeout;
1890
1891 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
1892 return;
1893
1894 timeout = priv->cfg->base_params->wd_timeout;
1895 if (timeout == 0)
1896 return;
1897
1898 /* monitor and check for stuck cmd queue */
1899 if (iwl_legacy_check_stuck_queue(priv, priv->cmd_queue))
1900 return;
1901
1902 /* monitor and check for other stuck queues */
1903 if (iwl_legacy_is_any_associated(priv)) {
1904 for (cnt = 0; cnt < priv->hw_params.max_txq_num; cnt++) {
1905 /* skip as we already checked the command queue */
1906 if (cnt == priv->cmd_queue)
1907 continue;
1908 if (iwl_legacy_check_stuck_queue(priv, cnt))
1909 return;
1910 }
1911 }
1912
1913 mod_timer(&priv->watchdog, jiffies +
1914 msecs_to_jiffies(IWL_WD_TICK(timeout)));
1915}
1916EXPORT_SYMBOL(iwl_legacy_bg_watchdog);
1917
1918void iwl_legacy_setup_watchdog(struct iwl_priv *priv)
1919{
1920 unsigned int timeout = priv->cfg->base_params->wd_timeout;
1921
1922 if (timeout)
1923 mod_timer(&priv->watchdog,
1924 jiffies + msecs_to_jiffies(IWL_WD_TICK(timeout)));
1925 else
1926 del_timer(&priv->watchdog);
1927}
1928EXPORT_SYMBOL(iwl_legacy_setup_watchdog);
1929
1930/*
1931 * extended beacon time format
1932 * time in usec will be changed into a 32-bit value in extended:internal format
1933 * the extended part is the beacon counts
1934 * the internal part is the time in usec within one beacon interval
1935 */
1936u32
1937iwl_legacy_usecs_to_beacons(struct iwl_priv *priv,
1938 u32 usec, u32 beacon_interval)
1939{
1940 u32 quot;
1941 u32 rem;
1942 u32 interval = beacon_interval * TIME_UNIT;
1943
1944 if (!interval || !usec)
1945 return 0;
1946
1947 quot = (usec / interval) &
1948 (iwl_legacy_beacon_time_mask_high(priv,
1949 priv->hw_params.beacon_time_tsf_bits) >>
1950 priv->hw_params.beacon_time_tsf_bits);
1951 rem = (usec % interval) & iwl_legacy_beacon_time_mask_low(priv,
1952 priv->hw_params.beacon_time_tsf_bits);
1953
1954 return (quot << priv->hw_params.beacon_time_tsf_bits) + rem;
1955}
1956EXPORT_SYMBOL(iwl_legacy_usecs_to_beacons);
1957
1958/* base is usually what we get from ucode with each received frame,
1959 * the same as HW timer counter counting down
1960 */
1961__le32 iwl_legacy_add_beacon_time(struct iwl_priv *priv, u32 base,
1962 u32 addon, u32 beacon_interval)
1963{
1964 u32 base_low = base & iwl_legacy_beacon_time_mask_low(priv,
1965 priv->hw_params.beacon_time_tsf_bits);
1966 u32 addon_low = addon & iwl_legacy_beacon_time_mask_low(priv,
1967 priv->hw_params.beacon_time_tsf_bits);
1968 u32 interval = beacon_interval * TIME_UNIT;
1969 u32 res = (base & iwl_legacy_beacon_time_mask_high(priv,
1970 priv->hw_params.beacon_time_tsf_bits)) +
1971 (addon & iwl_legacy_beacon_time_mask_high(priv,
1972 priv->hw_params.beacon_time_tsf_bits));
1973
1974 if (base_low > addon_low)
1975 res += base_low - addon_low;
1976 else if (base_low < addon_low) {
1977 res += interval + base_low - addon_low;
1978 res += (1 << priv->hw_params.beacon_time_tsf_bits);
1979 } else
1980 res += (1 << priv->hw_params.beacon_time_tsf_bits);
1981
1982 return cpu_to_le32(res);
1983}
1984EXPORT_SYMBOL(iwl_legacy_add_beacon_time);
1985
1986#ifdef CONFIG_PM
1987
1988int iwl_legacy_pci_suspend(struct device *device)
1989{
1990 struct pci_dev *pdev = to_pci_dev(device);
1991 struct iwl_priv *priv = pci_get_drvdata(pdev);
1992
1993 /*
1994 * This function is called when system goes into suspend state
1995 * mac80211 will call iwl_mac_stop() from the mac80211 suspend function
1996 * first but since iwl_mac_stop() has no knowledge of who the caller is,
1997 * it will not call apm_ops.stop() to stop the DMA operation.
1998 * Calling apm_ops.stop here to make sure we stop the DMA.
1999 */
2000 iwl_legacy_apm_stop(priv);
2001
2002 return 0;
2003}
2004EXPORT_SYMBOL(iwl_legacy_pci_suspend);
2005
2006int iwl_legacy_pci_resume(struct device *device)
2007{
2008 struct pci_dev *pdev = to_pci_dev(device);
2009 struct iwl_priv *priv = pci_get_drvdata(pdev);
2010 bool hw_rfkill = false;
2011
2012 /*
2013 * We disable the RETRY_TIMEOUT register (0x41) to keep
2014 * PCI Tx retries from interfering with C3 CPU state.
2015 */
2016 pci_write_config_byte(pdev, PCI_CFG_RETRY_TIMEOUT, 0x00);
2017
2018 iwl_legacy_enable_interrupts(priv);
2019
2020 if (!(iwl_read32(priv, CSR_GP_CNTRL) &
2021 CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW))
2022 hw_rfkill = true;
2023
2024 if (hw_rfkill)
2025 set_bit(STATUS_RF_KILL_HW, &priv->status);
2026 else
2027 clear_bit(STATUS_RF_KILL_HW, &priv->status);
2028
2029 wiphy_rfkill_set_hw_state(priv->hw->wiphy, hw_rfkill);
2030
2031 return 0;
2032}
2033EXPORT_SYMBOL(iwl_legacy_pci_resume);
2034
2035const struct dev_pm_ops iwl_legacy_pm_ops = {
2036 .suspend = iwl_legacy_pci_suspend,
2037 .resume = iwl_legacy_pci_resume,
2038 .freeze = iwl_legacy_pci_suspend,
2039 .thaw = iwl_legacy_pci_resume,
2040 .poweroff = iwl_legacy_pci_suspend,
2041 .restore = iwl_legacy_pci_resume,
2042};
2043EXPORT_SYMBOL(iwl_legacy_pm_ops);
2044
2045#endif /* CONFIG_PM */
2046
2047static void
2048iwl_legacy_update_qos(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
2049{
2050 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
2051 return;
2052
2053 if (!ctx->is_active)
2054 return;
2055
2056 ctx->qos_data.def_qos_parm.qos_flags = 0;
2057
2058 if (ctx->qos_data.qos_active)
2059 ctx->qos_data.def_qos_parm.qos_flags |=
2060 QOS_PARAM_FLG_UPDATE_EDCA_MSK;
2061
2062 if (ctx->ht.enabled)
2063 ctx->qos_data.def_qos_parm.qos_flags |= QOS_PARAM_FLG_TGN_MSK;
2064
2065 IWL_DEBUG_QOS(priv, "send QoS cmd with Qos active=%d FLAGS=0x%X\n",
2066 ctx->qos_data.qos_active,
2067 ctx->qos_data.def_qos_parm.qos_flags);
2068
2069 iwl_legacy_send_cmd_pdu_async(priv, ctx->qos_cmd,
2070 sizeof(struct iwl_qosparam_cmd),
2071 &ctx->qos_data.def_qos_parm, NULL);
2072}
2073
2074/**
2075 * iwl_legacy_mac_config - mac80211 config callback
2076 */
2077int iwl_legacy_mac_config(struct ieee80211_hw *hw, u32 changed)
2078{
2079 struct iwl_priv *priv = hw->priv;
2080 const struct iwl_channel_info *ch_info;
2081 struct ieee80211_conf *conf = &hw->conf;
2082 struct ieee80211_channel *channel = conf->channel;
2083 struct iwl_ht_config *ht_conf = &priv->current_ht_config;
2084 struct iwl_rxon_context *ctx;
2085 unsigned long flags = 0;
2086 int ret = 0;
2087 u16 ch;
2088 int scan_active = 0;
2089 bool ht_changed[NUM_IWL_RXON_CTX] = {};
2090
2091 if (WARN_ON(!priv->cfg->ops->legacy))
2092 return -EOPNOTSUPP;
2093
2094 mutex_lock(&priv->mutex);
2095
2096 IWL_DEBUG_MAC80211(priv, "enter to channel %d changed 0x%X\n",
2097 channel->hw_value, changed);
2098
2099 if (unlikely(!priv->cfg->mod_params->disable_hw_scan &&
2100 test_bit(STATUS_SCANNING, &priv->status))) {
2101 scan_active = 1;
2102 IWL_DEBUG_MAC80211(priv, "leave - scanning\n");
2103 }
2104
2105 if (changed & (IEEE80211_CONF_CHANGE_SMPS |
2106 IEEE80211_CONF_CHANGE_CHANNEL)) {
2107 /* mac80211 uses static for non-HT which is what we want */
2108 priv->current_ht_config.smps = conf->smps_mode;
2109
2110 /*
2111 * Recalculate chain counts.
2112 *
2113 * If monitor mode is enabled then mac80211 will
2114 * set up the SM PS mode to OFF if an HT channel is
2115 * configured.
2116 */
2117 if (priv->cfg->ops->hcmd->set_rxon_chain)
2118 for_each_context(priv, ctx)
2119 priv->cfg->ops->hcmd->set_rxon_chain(priv, ctx);
2120 }
2121
2122 /* during scanning mac80211 will delay channel setting until
2123 * scan finish with changed = 0
2124 */
2125 if (!changed || (changed & IEEE80211_CONF_CHANGE_CHANNEL)) {
2126 if (scan_active)
2127 goto set_ch_out;
2128
2129 ch = channel->hw_value;
2130 ch_info = iwl_legacy_get_channel_info(priv, channel->band, ch);
2131 if (!iwl_legacy_is_channel_valid(ch_info)) {
2132 IWL_DEBUG_MAC80211(priv, "leave - invalid channel\n");
2133 ret = -EINVAL;
2134 goto set_ch_out;
2135 }
2136
2137 spin_lock_irqsave(&priv->lock, flags);
2138
2139 for_each_context(priv, ctx) {
2140 /* Configure HT40 channels */
2141 if (ctx->ht.enabled != conf_is_ht(conf)) {
2142 ctx->ht.enabled = conf_is_ht(conf);
2143 ht_changed[ctx->ctxid] = true;
2144 }
2145 if (ctx->ht.enabled) {
2146 if (conf_is_ht40_minus(conf)) {
2147 ctx->ht.extension_chan_offset =
2148 IEEE80211_HT_PARAM_CHA_SEC_BELOW;
2149 ctx->ht.is_40mhz = true;
2150 } else if (conf_is_ht40_plus(conf)) {
2151 ctx->ht.extension_chan_offset =
2152 IEEE80211_HT_PARAM_CHA_SEC_ABOVE;
2153 ctx->ht.is_40mhz = true;
2154 } else {
2155 ctx->ht.extension_chan_offset =
2156 IEEE80211_HT_PARAM_CHA_SEC_NONE;
2157 ctx->ht.is_40mhz = false;
2158 }
2159 } else
2160 ctx->ht.is_40mhz = false;
2161
2162 /*
2163 * Default to no protection. Protection mode will
2164 * later be set from BSS config in iwl_ht_conf
2165 */
2166 ctx->ht.protection =
2167 IEEE80211_HT_OP_MODE_PROTECTION_NONE;
2168
2169 /* if we are switching from ht to 2.4 clear flags
2170 * from any ht related info since 2.4 does not
2171 * support ht */
2172 if ((le16_to_cpu(ctx->staging.channel) != ch))
2173 ctx->staging.flags = 0;
2174
2175 iwl_legacy_set_rxon_channel(priv, channel, ctx);
2176 iwl_legacy_set_rxon_ht(priv, ht_conf);
2177
2178 iwl_legacy_set_flags_for_band(priv, ctx, channel->band,
2179 ctx->vif);
2180 }
2181
2182 spin_unlock_irqrestore(&priv->lock, flags);
2183
2184 if (priv->cfg->ops->legacy->update_bcast_stations)
2185 ret =
2186 priv->cfg->ops->legacy->update_bcast_stations(priv);
2187
2188 set_ch_out:
2189 /* The list of supported rates and rate mask can be different
2190 * for each band; since the band may have changed, reset
2191 * the rate mask to what mac80211 lists */
2192 iwl_legacy_set_rate(priv);
2193 }
2194
2195 if (changed & (IEEE80211_CONF_CHANGE_PS |
2196 IEEE80211_CONF_CHANGE_IDLE)) {
2197 ret = iwl_legacy_power_update_mode(priv, false);
2198 if (ret)
2199 IWL_DEBUG_MAC80211(priv, "Error setting sleep level\n");
2200 }
2201
2202 if (changed & IEEE80211_CONF_CHANGE_POWER) {
2203 IWL_DEBUG_MAC80211(priv, "TX Power old=%d new=%d\n",
2204 priv->tx_power_user_lmt, conf->power_level);
2205
2206 iwl_legacy_set_tx_power(priv, conf->power_level, false);
2207 }
2208
2209 if (!iwl_legacy_is_ready(priv)) {
2210 IWL_DEBUG_MAC80211(priv, "leave - not ready\n");
2211 goto out;
2212 }
2213
2214 if (scan_active)
2215 goto out;
2216
2217 for_each_context(priv, ctx) {
2218 if (memcmp(&ctx->active, &ctx->staging, sizeof(ctx->staging)))
2219 iwl_legacy_commit_rxon(priv, ctx);
2220 else
2221 IWL_DEBUG_INFO(priv,
2222 "Not re-sending same RXON configuration.\n");
2223 if (ht_changed[ctx->ctxid])
2224 iwl_legacy_update_qos(priv, ctx);
2225 }
2226
2227out:
2228 IWL_DEBUG_MAC80211(priv, "leave\n");
2229 mutex_unlock(&priv->mutex);
2230 return ret;
2231}
2232EXPORT_SYMBOL(iwl_legacy_mac_config);
2233
2234void iwl_legacy_mac_reset_tsf(struct ieee80211_hw *hw)
2235{
2236 struct iwl_priv *priv = hw->priv;
2237 unsigned long flags;
2238 /* IBSS can only be the IWL_RXON_CTX_BSS context */
2239 struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
2240
2241 if (WARN_ON(!priv->cfg->ops->legacy))
2242 return;
2243
2244 mutex_lock(&priv->mutex);
2245 IWL_DEBUG_MAC80211(priv, "enter\n");
2246
2247 spin_lock_irqsave(&priv->lock, flags);
2248 memset(&priv->current_ht_config, 0, sizeof(struct iwl_ht_config));
2249 spin_unlock_irqrestore(&priv->lock, flags);
2250
2251 spin_lock_irqsave(&priv->lock, flags);
2252
2253 /* new association get rid of ibss beacon skb */
2254 if (priv->beacon_skb)
2255 dev_kfree_skb(priv->beacon_skb);
2256
2257 priv->beacon_skb = NULL;
2258
2259 priv->timestamp = 0;
2260
2261 spin_unlock_irqrestore(&priv->lock, flags);
2262
2263 iwl_legacy_scan_cancel_timeout(priv, 100);
2264 if (!iwl_legacy_is_ready_rf(priv)) {
2265 IWL_DEBUG_MAC80211(priv, "leave - not ready\n");
2266 mutex_unlock(&priv->mutex);
2267 return;
2268 }
2269
2270 /* we are restarting association process
2271 * clear RXON_FILTER_ASSOC_MSK bit
2272 */
2273 ctx->staging.filter_flags &= ~RXON_FILTER_ASSOC_MSK;
2274 iwl_legacy_commit_rxon(priv, ctx);
2275
2276 iwl_legacy_set_rate(priv);
2277
2278 mutex_unlock(&priv->mutex);
2279
2280 IWL_DEBUG_MAC80211(priv, "leave\n");
2281}
2282EXPORT_SYMBOL(iwl_legacy_mac_reset_tsf);
2283
2284static void iwl_legacy_ht_conf(struct iwl_priv *priv,
2285 struct ieee80211_vif *vif)
2286{
2287 struct iwl_ht_config *ht_conf = &priv->current_ht_config;
2288 struct ieee80211_sta *sta;
2289 struct ieee80211_bss_conf *bss_conf = &vif->bss_conf;
2290 struct iwl_rxon_context *ctx = iwl_legacy_rxon_ctx_from_vif(vif);
2291
2292 IWL_DEBUG_ASSOC(priv, "enter:\n");
2293
2294 if (!ctx->ht.enabled)
2295 return;
2296
2297 ctx->ht.protection =
2298 bss_conf->ht_operation_mode & IEEE80211_HT_OP_MODE_PROTECTION;
2299 ctx->ht.non_gf_sta_present =
2300 !!(bss_conf->ht_operation_mode &
2301 IEEE80211_HT_OP_MODE_NON_GF_STA_PRSNT);
2302
2303 ht_conf->single_chain_sufficient = false;
2304
2305 switch (vif->type) {
2306 case NL80211_IFTYPE_STATION:
2307 rcu_read_lock();
2308 sta = ieee80211_find_sta(vif, bss_conf->bssid);
2309 if (sta) {
2310 struct ieee80211_sta_ht_cap *ht_cap = &sta->ht_cap;
2311 int maxstreams;
2312
2313 maxstreams = (ht_cap->mcs.tx_params &
2314 IEEE80211_HT_MCS_TX_MAX_STREAMS_MASK)
2315 >> IEEE80211_HT_MCS_TX_MAX_STREAMS_SHIFT;
2316 maxstreams += 1;
2317
2318 if ((ht_cap->mcs.rx_mask[1] == 0) &&
2319 (ht_cap->mcs.rx_mask[2] == 0))
2320 ht_conf->single_chain_sufficient = true;
2321 if (maxstreams <= 1)
2322 ht_conf->single_chain_sufficient = true;
2323 } else {
2324 /*
2325 * If at all, this can only happen through a race
2326 * when the AP disconnects us while we're still
2327 * setting up the connection, in that case mac80211
2328 * will soon tell us about that.
2329 */
2330 ht_conf->single_chain_sufficient = true;
2331 }
2332 rcu_read_unlock();
2333 break;
2334 case NL80211_IFTYPE_ADHOC:
2335 ht_conf->single_chain_sufficient = true;
2336 break;
2337 default:
2338 break;
2339 }
2340
2341 IWL_DEBUG_ASSOC(priv, "leave\n");
2342}
2343
2344static inline void iwl_legacy_set_no_assoc(struct iwl_priv *priv,
2345 struct ieee80211_vif *vif)
2346{
2347 struct iwl_rxon_context *ctx = iwl_legacy_rxon_ctx_from_vif(vif);
2348
2349 /*
2350 * inform the ucode that there is no longer an
2351 * association and that no more packets should be
2352 * sent
2353 */
2354 ctx->staging.filter_flags &= ~RXON_FILTER_ASSOC_MSK;
2355 ctx->staging.assoc_id = 0;
2356 iwl_legacy_commit_rxon(priv, ctx);
2357}
2358
2359static void iwl_legacy_beacon_update(struct ieee80211_hw *hw,
2360 struct ieee80211_vif *vif)
2361{
2362 struct iwl_priv *priv = hw->priv;
2363 unsigned long flags;
2364 __le64 timestamp;
2365 struct sk_buff *skb = ieee80211_beacon_get(hw, vif);
2366
2367 if (!skb)
2368 return;
2369
2370 IWL_DEBUG_MAC80211(priv, "enter\n");
2371
2372 lockdep_assert_held(&priv->mutex);
2373
2374 if (!priv->beacon_ctx) {
2375 IWL_ERR(priv, "update beacon but no beacon context!\n");
2376 dev_kfree_skb(skb);
2377 return;
2378 }
2379
2380 spin_lock_irqsave(&priv->lock, flags);
2381
2382 if (priv->beacon_skb)
2383 dev_kfree_skb(priv->beacon_skb);
2384
2385 priv->beacon_skb = skb;
2386
2387 timestamp = ((struct ieee80211_mgmt *)skb->data)->u.beacon.timestamp;
2388 priv->timestamp = le64_to_cpu(timestamp);
2389
2390 IWL_DEBUG_MAC80211(priv, "leave\n");
2391 spin_unlock_irqrestore(&priv->lock, flags);
2392
2393 if (!iwl_legacy_is_ready_rf(priv)) {
2394 IWL_DEBUG_MAC80211(priv, "leave - RF not ready\n");
2395 return;
2396 }
2397
2398 priv->cfg->ops->legacy->post_associate(priv);
2399}
2400
2401void iwl_legacy_mac_bss_info_changed(struct ieee80211_hw *hw,
2402 struct ieee80211_vif *vif,
2403 struct ieee80211_bss_conf *bss_conf,
2404 u32 changes)
2405{
2406 struct iwl_priv *priv = hw->priv;
2407 struct iwl_rxon_context *ctx = iwl_legacy_rxon_ctx_from_vif(vif);
2408 int ret;
2409
2410 if (WARN_ON(!priv->cfg->ops->legacy))
2411 return;
2412
2413 IWL_DEBUG_MAC80211(priv, "changes = 0x%X\n", changes);
2414
2415 if (!iwl_legacy_is_alive(priv))
2416 return;
2417
2418 mutex_lock(&priv->mutex);
2419
2420 if (changes & BSS_CHANGED_QOS) {
2421 unsigned long flags;
2422
2423 spin_lock_irqsave(&priv->lock, flags);
2424 ctx->qos_data.qos_active = bss_conf->qos;
2425 iwl_legacy_update_qos(priv, ctx);
2426 spin_unlock_irqrestore(&priv->lock, flags);
2427 }
2428
2429 if (changes & BSS_CHANGED_BEACON_ENABLED) {
2430 /*
2431 * the add_interface code must make sure we only ever
2432 * have a single interface that could be beaconing at
2433 * any time.
2434 */
2435 if (vif->bss_conf.enable_beacon)
2436 priv->beacon_ctx = ctx;
2437 else
2438 priv->beacon_ctx = NULL;
2439 }
2440
2441 if (changes & BSS_CHANGED_BSSID) {
2442 IWL_DEBUG_MAC80211(priv, "BSSID %pM\n", bss_conf->bssid);
2443
2444 /*
2445 * If there is currently a HW scan going on in the
2446 * background then we need to cancel it else the RXON
2447 * below/in post_associate will fail.
2448 */
2449 if (iwl_legacy_scan_cancel_timeout(priv, 100)) {
2450 IWL_WARN(priv,
2451 "Aborted scan still in progress after 100ms\n");
2452 IWL_DEBUG_MAC80211(priv,
2453 "leaving - scan abort failed.\n");
2454 mutex_unlock(&priv->mutex);
2455 return;
2456 }
2457
2458 /* mac80211 only sets assoc when in STATION mode */
2459 if (vif->type == NL80211_IFTYPE_ADHOC || bss_conf->assoc) {
2460 memcpy(ctx->staging.bssid_addr,
2461 bss_conf->bssid, ETH_ALEN);
2462
2463 /* currently needed in a few places */
2464 memcpy(priv->bssid, bss_conf->bssid, ETH_ALEN);
2465 } else {
2466 ctx->staging.filter_flags &=
2467 ~RXON_FILTER_ASSOC_MSK;
2468 }
2469
2470 }
2471
2472 /*
2473 * This needs to be after setting the BSSID in case
2474 * mac80211 decides to do both changes at once because
2475 * it will invoke post_associate.
2476 */
2477 if (vif->type == NL80211_IFTYPE_ADHOC && changes & BSS_CHANGED_BEACON)
2478 iwl_legacy_beacon_update(hw, vif);
2479
2480 if (changes & BSS_CHANGED_ERP_PREAMBLE) {
2481 IWL_DEBUG_MAC80211(priv, "ERP_PREAMBLE %d\n",
2482 bss_conf->use_short_preamble);
2483 if (bss_conf->use_short_preamble)
2484 ctx->staging.flags |= RXON_FLG_SHORT_PREAMBLE_MSK;
2485 else
2486 ctx->staging.flags &= ~RXON_FLG_SHORT_PREAMBLE_MSK;
2487 }
2488
2489 if (changes & BSS_CHANGED_ERP_CTS_PROT) {
2490 IWL_DEBUG_MAC80211(priv,
2491 "ERP_CTS %d\n", bss_conf->use_cts_prot);
2492 if (bss_conf->use_cts_prot &&
2493 (priv->band != IEEE80211_BAND_5GHZ))
2494 ctx->staging.flags |= RXON_FLG_TGG_PROTECT_MSK;
2495 else
2496 ctx->staging.flags &= ~RXON_FLG_TGG_PROTECT_MSK;
2497 if (bss_conf->use_cts_prot)
2498 ctx->staging.flags |= RXON_FLG_SELF_CTS_EN;
2499 else
2500 ctx->staging.flags &= ~RXON_FLG_SELF_CTS_EN;
2501 }
2502
2503 if (changes & BSS_CHANGED_BASIC_RATES) {
2504 /* XXX use this information
2505 *
2506 * To do that, remove code from iwl_legacy_set_rate() and put something
2507 * like this here:
2508 *
2509 if (A-band)
2510 ctx->staging.ofdm_basic_rates =
2511 bss_conf->basic_rates;
2512 else
2513 ctx->staging.ofdm_basic_rates =
2514 bss_conf->basic_rates >> 4;
2515 ctx->staging.cck_basic_rates =
2516 bss_conf->basic_rates & 0xF;
2517 */
2518 }
2519
2520 if (changes & BSS_CHANGED_HT) {
2521 iwl_legacy_ht_conf(priv, vif);
2522
2523 if (priv->cfg->ops->hcmd->set_rxon_chain)
2524 priv->cfg->ops->hcmd->set_rxon_chain(priv, ctx);
2525 }
2526
2527 if (changes & BSS_CHANGED_ASSOC) {
2528 IWL_DEBUG_MAC80211(priv, "ASSOC %d\n", bss_conf->assoc);
2529 if (bss_conf->assoc) {
2530 priv->timestamp = bss_conf->timestamp;
2531
2532 if (!iwl_legacy_is_rfkill(priv))
2533 priv->cfg->ops->legacy->post_associate(priv);
2534 } else
2535 iwl_legacy_set_no_assoc(priv, vif);
2536 }
2537
2538 if (changes && iwl_legacy_is_associated_ctx(ctx) && bss_conf->aid) {
2539 IWL_DEBUG_MAC80211(priv, "Changes (%#x) while associated\n",
2540 changes);
2541 ret = iwl_legacy_send_rxon_assoc(priv, ctx);
2542 if (!ret) {
2543 /* Sync active_rxon with latest change. */
2544 memcpy((void *)&ctx->active,
2545 &ctx->staging,
2546 sizeof(struct iwl_legacy_rxon_cmd));
2547 }
2548 }
2549
2550 if (changes & BSS_CHANGED_BEACON_ENABLED) {
2551 if (vif->bss_conf.enable_beacon) {
2552 memcpy(ctx->staging.bssid_addr,
2553 bss_conf->bssid, ETH_ALEN);
2554 memcpy(priv->bssid, bss_conf->bssid, ETH_ALEN);
2555 priv->cfg->ops->legacy->config_ap(priv);
2556 } else
2557 iwl_legacy_set_no_assoc(priv, vif);
2558 }
2559
2560 if (changes & BSS_CHANGED_IBSS) {
2561 ret = priv->cfg->ops->legacy->manage_ibss_station(priv, vif,
2562 bss_conf->ibss_joined);
2563 if (ret)
2564 IWL_ERR(priv, "failed to %s IBSS station %pM\n",
2565 bss_conf->ibss_joined ? "add" : "remove",
2566 bss_conf->bssid);
2567 }
2568
2569 mutex_unlock(&priv->mutex);
2570
2571 IWL_DEBUG_MAC80211(priv, "leave\n");
2572}
2573EXPORT_SYMBOL(iwl_legacy_mac_bss_info_changed);
2574
2575irqreturn_t iwl_legacy_isr(int irq, void *data)
2576{
2577 struct iwl_priv *priv = data;
2578 u32 inta, inta_mask;
2579 u32 inta_fh;
2580 unsigned long flags;
2581 if (!priv)
2582 return IRQ_NONE;
2583
2584 spin_lock_irqsave(&priv->lock, flags);
2585
2586 /* Disable (but don't clear!) interrupts here to avoid
2587 * back-to-back ISRs and sporadic interrupts from our NIC.
2588 * If we have something to service, the tasklet will re-enable ints.
2589 * If we *don't* have something, we'll re-enable before leaving here. */
2590 inta_mask = iwl_read32(priv, CSR_INT_MASK); /* just for debug */
2591 iwl_write32(priv, CSR_INT_MASK, 0x00000000);
2592
2593 /* Discover which interrupts are active/pending */
2594 inta = iwl_read32(priv, CSR_INT);
2595 inta_fh = iwl_read32(priv, CSR_FH_INT_STATUS);
2596
2597 /* Ignore interrupt if there's nothing in NIC to service.
2598 * This may be due to IRQ shared with another device,
2599 * or due to sporadic interrupts thrown from our NIC. */
2600 if (!inta && !inta_fh) {
2601 IWL_DEBUG_ISR(priv,
2602 "Ignore interrupt, inta == 0, inta_fh == 0\n");
2603 goto none;
2604 }
2605
2606 if ((inta == 0xFFFFFFFF) || ((inta & 0xFFFFFFF0) == 0xa5a5a5a0)) {
2607 /* Hardware disappeared. It might have already raised
2608 * an interrupt */
2609 IWL_WARN(priv, "HARDWARE GONE?? INTA == 0x%08x\n", inta);
2610 goto unplugged;
2611 }
2612
2613 IWL_DEBUG_ISR(priv, "ISR inta 0x%08x, enabled 0x%08x, fh 0x%08x\n",
2614 inta, inta_mask, inta_fh);
2615
2616 inta &= ~CSR_INT_BIT_SCD;
2617
2618 /* iwl_irq_tasklet() will service interrupts and re-enable them */
2619 if (likely(inta || inta_fh))
2620 tasklet_schedule(&priv->irq_tasklet);
2621
2622unplugged:
2623 spin_unlock_irqrestore(&priv->lock, flags);
2624 return IRQ_HANDLED;
2625
2626none:
2627 /* re-enable interrupts here since we don't have anything to service. */
2628 /* only Re-enable if diabled by irq */
2629 if (test_bit(STATUS_INT_ENABLED, &priv->status))
2630 iwl_legacy_enable_interrupts(priv);
2631 spin_unlock_irqrestore(&priv->lock, flags);
2632 return IRQ_NONE;
2633}
2634EXPORT_SYMBOL(iwl_legacy_isr);
2635
2636/*
2637 * iwl_legacy_tx_cmd_protection: Set rts/cts. 3945 and 4965 only share this
2638 * function.
2639 */
2640void iwl_legacy_tx_cmd_protection(struct iwl_priv *priv,
2641 struct ieee80211_tx_info *info,
2642 __le16 fc, __le32 *tx_flags)
2643{
2644 if (info->control.rates[0].flags & IEEE80211_TX_RC_USE_RTS_CTS) {
2645 *tx_flags |= TX_CMD_FLG_RTS_MSK;
2646 *tx_flags &= ~TX_CMD_FLG_CTS_MSK;
2647 *tx_flags |= TX_CMD_FLG_FULL_TXOP_PROT_MSK;
2648
2649 if (!ieee80211_is_mgmt(fc))
2650 return;
2651
2652 switch (fc & cpu_to_le16(IEEE80211_FCTL_STYPE)) {
2653 case cpu_to_le16(IEEE80211_STYPE_AUTH):
2654 case cpu_to_le16(IEEE80211_STYPE_DEAUTH):
2655 case cpu_to_le16(IEEE80211_STYPE_ASSOC_REQ):
2656 case cpu_to_le16(IEEE80211_STYPE_REASSOC_REQ):
2657 *tx_flags &= ~TX_CMD_FLG_RTS_MSK;
2658 *tx_flags |= TX_CMD_FLG_CTS_MSK;
2659 break;
2660 }
2661 } else if (info->control.rates[0].flags &
2662 IEEE80211_TX_RC_USE_CTS_PROTECT) {
2663 *tx_flags &= ~TX_CMD_FLG_RTS_MSK;
2664 *tx_flags |= TX_CMD_FLG_CTS_MSK;
2665 *tx_flags |= TX_CMD_FLG_FULL_TXOP_PROT_MSK;
2666 }
2667}
2668EXPORT_SYMBOL(iwl_legacy_tx_cmd_protection);
diff --git a/drivers/net/wireless/iwlegacy/iwl-core.h b/drivers/net/wireless/iwlegacy/iwl-core.h
new file mode 100644
index 000000000000..1159b0d255b8
--- /dev/null
+++ b/drivers/net/wireless/iwlegacy/iwl-core.h
@@ -0,0 +1,646 @@
1/******************************************************************************
2 *
3 * This file is provided under a dual BSD/GPLv2 license. When using or
4 * redistributing this file, you may do so under either license.
5 *
6 * GPL LICENSE SUMMARY
7 *
8 * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of version 2 of the GNU General Public License as
12 * published by the Free Software Foundation.
13 *
14 * This program is distributed in the hope that it will be useful, but
15 * WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
22 * USA
23 *
24 * The full GNU General Public License is included in this distribution
25 * in the file called LICENSE.GPL.
26 *
27 * Contact Information:
28 * Intel Linux Wireless <ilw@linux.intel.com>
29 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
30 *
31 * BSD LICENSE
32 *
33 * Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved.
34 * All rights reserved.
35 *
36 * Redistribution and use in source and binary forms, with or without
37 * modification, are permitted provided that the following conditions
38 * are met:
39 *
40 * * Redistributions of source code must retain the above copyright
41 * notice, this list of conditions and the following disclaimer.
42 * * Redistributions in binary form must reproduce the above copyright
43 * notice, this list of conditions and the following disclaimer in
44 * the documentation and/or other materials provided with the
45 * distribution.
46 * * Neither the name Intel Corporation nor the names of its
47 * contributors may be used to endorse or promote products derived
48 * from this software without specific prior written permission.
49 *
50 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
51 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
52 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
53 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
54 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
55 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
56 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
57 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
58 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
59 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
60 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
61 *****************************************************************************/
62
63#ifndef __iwl_legacy_core_h__
64#define __iwl_legacy_core_h__
65
66/************************
67 * forward declarations *
68 ************************/
69struct iwl_host_cmd;
70struct iwl_cmd;
71
72
73#define IWLWIFI_VERSION "in-tree:"
74#define DRV_COPYRIGHT "Copyright(c) 2003-2011 Intel Corporation"
75#define DRV_AUTHOR "<ilw@linux.intel.com>"
76
77#define IWL_PCI_DEVICE(dev, subdev, cfg) \
78 .vendor = PCI_VENDOR_ID_INTEL, .device = (dev), \
79 .subvendor = PCI_ANY_ID, .subdevice = (subdev), \
80 .driver_data = (kernel_ulong_t)&(cfg)
81
82#define TIME_UNIT 1024
83
84#define IWL_SKU_G 0x1
85#define IWL_SKU_A 0x2
86#define IWL_SKU_N 0x8
87
88#define IWL_CMD(x) case x: return #x
89
90struct iwl_hcmd_ops {
91 int (*rxon_assoc)(struct iwl_priv *priv, struct iwl_rxon_context *ctx);
92 int (*commit_rxon)(struct iwl_priv *priv, struct iwl_rxon_context *ctx);
93 void (*set_rxon_chain)(struct iwl_priv *priv,
94 struct iwl_rxon_context *ctx);
95};
96
97struct iwl_hcmd_utils_ops {
98 u16 (*get_hcmd_size)(u8 cmd_id, u16 len);
99 u16 (*build_addsta_hcmd)(const struct iwl_legacy_addsta_cmd *cmd,
100 u8 *data);
101 int (*request_scan)(struct iwl_priv *priv, struct ieee80211_vif *vif);
102 void (*post_scan)(struct iwl_priv *priv);
103};
104
105struct iwl_apm_ops {
106 int (*init)(struct iwl_priv *priv);
107 void (*config)(struct iwl_priv *priv);
108};
109
110struct iwl_debugfs_ops {
111 ssize_t (*rx_stats_read)(struct file *file, char __user *user_buf,
112 size_t count, loff_t *ppos);
113 ssize_t (*tx_stats_read)(struct file *file, char __user *user_buf,
114 size_t count, loff_t *ppos);
115 ssize_t (*general_stats_read)(struct file *file, char __user *user_buf,
116 size_t count, loff_t *ppos);
117};
118
119struct iwl_temp_ops {
120 void (*temperature)(struct iwl_priv *priv);
121};
122
123struct iwl_lib_ops {
124 /* set hw dependent parameters */
125 int (*set_hw_params)(struct iwl_priv *priv);
126 /* Handling TX */
127 void (*txq_update_byte_cnt_tbl)(struct iwl_priv *priv,
128 struct iwl_tx_queue *txq,
129 u16 byte_cnt);
130 int (*txq_attach_buf_to_tfd)(struct iwl_priv *priv,
131 struct iwl_tx_queue *txq,
132 dma_addr_t addr,
133 u16 len, u8 reset, u8 pad);
134 void (*txq_free_tfd)(struct iwl_priv *priv,
135 struct iwl_tx_queue *txq);
136 int (*txq_init)(struct iwl_priv *priv,
137 struct iwl_tx_queue *txq);
138 /* setup Rx handler */
139 void (*rx_handler_setup)(struct iwl_priv *priv);
140 /* alive notification after init uCode load */
141 void (*init_alive_start)(struct iwl_priv *priv);
142 /* check validity of rtc data address */
143 int (*is_valid_rtc_data_addr)(u32 addr);
144 /* 1st ucode load */
145 int (*load_ucode)(struct iwl_priv *priv);
146 int (*dump_nic_event_log)(struct iwl_priv *priv,
147 bool full_log, char **buf, bool display);
148 void (*dump_nic_error_log)(struct iwl_priv *priv);
149 int (*dump_fh)(struct iwl_priv *priv, char **buf, bool display);
150 int (*set_channel_switch)(struct iwl_priv *priv,
151 struct ieee80211_channel_switch *ch_switch);
152 /* power management */
153 struct iwl_apm_ops apm_ops;
154
155 /* power */
156 int (*send_tx_power) (struct iwl_priv *priv);
157 void (*update_chain_flags)(struct iwl_priv *priv);
158
159 /* eeprom operations (as defined in iwl-eeprom.h) */
160 struct iwl_eeprom_ops eeprom_ops;
161
162 /* temperature */
163 struct iwl_temp_ops temp_ops;
164 /* check for plcp health */
165 bool (*check_plcp_health)(struct iwl_priv *priv,
166 struct iwl_rx_packet *pkt);
167
168 struct iwl_debugfs_ops debugfs_ops;
169
170};
171
172struct iwl_led_ops {
173 int (*cmd)(struct iwl_priv *priv, struct iwl_led_cmd *led_cmd);
174};
175
176struct iwl_legacy_ops {
177 void (*post_associate)(struct iwl_priv *priv);
178 void (*config_ap)(struct iwl_priv *priv);
179 /* station management */
180 int (*update_bcast_stations)(struct iwl_priv *priv);
181 int (*manage_ibss_station)(struct iwl_priv *priv,
182 struct ieee80211_vif *vif, bool add);
183};
184
185struct iwl_ops {
186 const struct iwl_lib_ops *lib;
187 const struct iwl_hcmd_ops *hcmd;
188 const struct iwl_hcmd_utils_ops *utils;
189 const struct iwl_led_ops *led;
190 const struct iwl_nic_ops *nic;
191 const struct iwl_legacy_ops *legacy;
192 const struct ieee80211_ops *ieee80211_ops;
193};
194
195struct iwl_mod_params {
196 int sw_crypto; /* def: 0 = using hardware encryption */
197 int disable_hw_scan; /* def: 0 = use h/w scan */
198 int num_of_queues; /* def: HW dependent */
199 int disable_11n; /* def: 0 = 11n capabilities enabled */
200 int amsdu_size_8K; /* def: 1 = enable 8K amsdu size */
201 int antenna; /* def: 0 = both antennas (use diversity) */
202 int restart_fw; /* def: 1 = restart firmware */
203};
204
205/*
206 * @led_compensation: compensate on the led on/off time per HW according
207 * to the deviation to achieve the desired led frequency.
208 * The detail algorithm is described in iwl-led.c
209 * @chain_noise_num_beacons: number of beacons used to compute chain noise
210 * @plcp_delta_threshold: plcp error rate threshold used to trigger
211 * radio tuning when there is a high receiving plcp error rate
212 * @wd_timeout: TX queues watchdog timeout
213 * @temperature_kelvin: temperature report by uCode in kelvin
214 * @max_event_log_size: size of event log buffer size for ucode event logging
215 * @ucode_tracing: support ucode continuous tracing
216 * @sensitivity_calib_by_driver: driver has the capability to perform
217 * sensitivity calibration operation
218 * @chain_noise_calib_by_driver: driver has the capability to perform
219 * chain noise calibration operation
220 */
221struct iwl_base_params {
222 int eeprom_size;
223 int num_of_queues; /* def: HW dependent */
224 int num_of_ampdu_queues;/* def: HW dependent */
225 /* for iwl_legacy_apm_init() */
226 u32 pll_cfg_val;
227 bool set_l0s;
228 bool use_bsm;
229
230 u16 led_compensation;
231 int chain_noise_num_beacons;
232 u8 plcp_delta_threshold;
233 unsigned int wd_timeout;
234 bool temperature_kelvin;
235 u32 max_event_log_size;
236 const bool ucode_tracing;
237 const bool sensitivity_calib_by_driver;
238 const bool chain_noise_calib_by_driver;
239};
240
241/**
242 * struct iwl_cfg
243 * @fw_name_pre: Firmware filename prefix. The api version and extension
244 * (.ucode) will be added to filename before loading from disk. The
245 * filename is constructed as fw_name_pre<api>.ucode.
246 * @ucode_api_max: Highest version of uCode API supported by driver.
247 * @ucode_api_min: Lowest version of uCode API supported by driver.
248 * @scan_antennas: available antenna for scan operation
249 * @led_mode: 0=blinking, 1=On(RF On)/Off(RF Off)
250 *
251 * We enable the driver to be backward compatible wrt API version. The
252 * driver specifies which APIs it supports (with @ucode_api_max being the
253 * highest and @ucode_api_min the lowest). Firmware will only be loaded if
254 * it has a supported API version. The firmware's API version will be
255 * stored in @iwl_priv, enabling the driver to make runtime changes based
256 * on firmware version used.
257 *
258 * For example,
259 * if (IWL_UCODE_API(priv->ucode_ver) >= 2) {
260 * Driver interacts with Firmware API version >= 2.
261 * } else {
262 * Driver interacts with Firmware API version 1.
263 * }
264 *
265 * The ideal usage of this infrastructure is to treat a new ucode API
266 * release as a new hardware revision. That is, through utilizing the
267 * iwl_hcmd_utils_ops etc. we accommodate different command structures
268 * and flows between hardware versions as well as their API
269 * versions.
270 *
271 */
272struct iwl_cfg {
273 /* params specific to an individual device within a device family */
274 const char *name;
275 const char *fw_name_pre;
276 const unsigned int ucode_api_max;
277 const unsigned int ucode_api_min;
278 u8 valid_tx_ant;
279 u8 valid_rx_ant;
280 unsigned int sku;
281 u16 eeprom_ver;
282 u16 eeprom_calib_ver;
283 const struct iwl_ops *ops;
284 /* module based parameters which can be set from modprobe cmd */
285 const struct iwl_mod_params *mod_params;
286 /* params not likely to change within a device family */
287 struct iwl_base_params *base_params;
288 /* params likely to change within a device family */
289 u8 scan_rx_antennas[IEEE80211_NUM_BANDS];
290 u8 scan_tx_antennas[IEEE80211_NUM_BANDS];
291 enum iwl_led_mode led_mode;
292};
293
294/***************************
295 * L i b *
296 ***************************/
297
298struct ieee80211_hw *iwl_legacy_alloc_all(struct iwl_cfg *cfg);
299int iwl_legacy_mac_conf_tx(struct ieee80211_hw *hw, u16 queue,
300 const struct ieee80211_tx_queue_params *params);
301int iwl_legacy_mac_tx_last_beacon(struct ieee80211_hw *hw);
302void iwl_legacy_set_rxon_hwcrypto(struct iwl_priv *priv,
303 struct iwl_rxon_context *ctx,
304 int hw_decrypt);
305int iwl_legacy_check_rxon_cmd(struct iwl_priv *priv,
306 struct iwl_rxon_context *ctx);
307int iwl_legacy_full_rxon_required(struct iwl_priv *priv,
308 struct iwl_rxon_context *ctx);
309int iwl_legacy_set_rxon_channel(struct iwl_priv *priv,
310 struct ieee80211_channel *ch,
311 struct iwl_rxon_context *ctx);
312void iwl_legacy_set_flags_for_band(struct iwl_priv *priv,
313 struct iwl_rxon_context *ctx,
314 enum ieee80211_band band,
315 struct ieee80211_vif *vif);
316u8 iwl_legacy_get_single_channel_number(struct iwl_priv *priv,
317 enum ieee80211_band band);
318void iwl_legacy_set_rxon_ht(struct iwl_priv *priv,
319 struct iwl_ht_config *ht_conf);
320bool iwl_legacy_is_ht40_tx_allowed(struct iwl_priv *priv,
321 struct iwl_rxon_context *ctx,
322 struct ieee80211_sta_ht_cap *ht_cap);
323void iwl_legacy_connection_init_rx_config(struct iwl_priv *priv,
324 struct iwl_rxon_context *ctx);
325void iwl_legacy_set_rate(struct iwl_priv *priv);
326int iwl_legacy_set_decrypted_flag(struct iwl_priv *priv,
327 struct ieee80211_hdr *hdr,
328 u32 decrypt_res,
329 struct ieee80211_rx_status *stats);
330void iwl_legacy_irq_handle_error(struct iwl_priv *priv);
331int iwl_legacy_mac_add_interface(struct ieee80211_hw *hw,
332 struct ieee80211_vif *vif);
333void iwl_legacy_mac_remove_interface(struct ieee80211_hw *hw,
334 struct ieee80211_vif *vif);
335int iwl_legacy_mac_change_interface(struct ieee80211_hw *hw,
336 struct ieee80211_vif *vif,
337 enum nl80211_iftype newtype, bool newp2p);
338int iwl_legacy_alloc_txq_mem(struct iwl_priv *priv);
339void iwl_legacy_txq_mem(struct iwl_priv *priv);
340
341#ifdef CONFIG_IWLWIFI_LEGACY_DEBUGFS
342int iwl_legacy_alloc_traffic_mem(struct iwl_priv *priv);
343void iwl_legacy_free_traffic_mem(struct iwl_priv *priv);
344void iwl_legacy_reset_traffic_log(struct iwl_priv *priv);
345void iwl_legacy_dbg_log_tx_data_frame(struct iwl_priv *priv,
346 u16 length, struct ieee80211_hdr *header);
347void iwl_legacy_dbg_log_rx_data_frame(struct iwl_priv *priv,
348 u16 length, struct ieee80211_hdr *header);
349const char *iwl_legacy_get_mgmt_string(int cmd);
350const char *iwl_legacy_get_ctrl_string(int cmd);
351void iwl_legacy_clear_traffic_stats(struct iwl_priv *priv);
352void iwl_legacy_update_stats(struct iwl_priv *priv, bool is_tx, __le16 fc,
353 u16 len);
354#else
355static inline int iwl_legacy_alloc_traffic_mem(struct iwl_priv *priv)
356{
357 return 0;
358}
359static inline void iwl_legacy_free_traffic_mem(struct iwl_priv *priv)
360{
361}
362static inline void iwl_legacy_reset_traffic_log(struct iwl_priv *priv)
363{
364}
365static inline void iwl_legacy_dbg_log_tx_data_frame(struct iwl_priv *priv,
366 u16 length, struct ieee80211_hdr *header)
367{
368}
369static inline void iwl_legacy_dbg_log_rx_data_frame(struct iwl_priv *priv,
370 u16 length, struct ieee80211_hdr *header)
371{
372}
373static inline void iwl_legacy_update_stats(struct iwl_priv *priv, bool is_tx,
374 __le16 fc, u16 len)
375{
376}
377#endif
378/*****************************************************
379 * RX handlers.
380 * **************************************************/
381void iwl_legacy_rx_pm_sleep_notif(struct iwl_priv *priv,
382 struct iwl_rx_mem_buffer *rxb);
383void iwl_legacy_rx_pm_debug_statistics_notif(struct iwl_priv *priv,
384 struct iwl_rx_mem_buffer *rxb);
385void iwl_legacy_rx_reply_error(struct iwl_priv *priv,
386 struct iwl_rx_mem_buffer *rxb);
387
388/*****************************************************
389* RX
390******************************************************/
391void iwl_legacy_cmd_queue_free(struct iwl_priv *priv);
392int iwl_legacy_rx_queue_alloc(struct iwl_priv *priv);
393void iwl_legacy_rx_queue_update_write_ptr(struct iwl_priv *priv,
394 struct iwl_rx_queue *q);
395int iwl_legacy_rx_queue_space(const struct iwl_rx_queue *q);
396void iwl_legacy_tx_cmd_complete(struct iwl_priv *priv,
397 struct iwl_rx_mem_buffer *rxb);
398/* Handlers */
399void iwl_legacy_rx_spectrum_measure_notif(struct iwl_priv *priv,
400 struct iwl_rx_mem_buffer *rxb);
401void iwl_legacy_recover_from_statistics(struct iwl_priv *priv,
402 struct iwl_rx_packet *pkt);
403void iwl_legacy_chswitch_done(struct iwl_priv *priv, bool is_success);
404void iwl_legacy_rx_csa(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb);
405
406/* TX helpers */
407
408/*****************************************************
409* TX
410******************************************************/
411void iwl_legacy_txq_update_write_ptr(struct iwl_priv *priv,
412 struct iwl_tx_queue *txq);
413int iwl_legacy_tx_queue_init(struct iwl_priv *priv, struct iwl_tx_queue *txq,
414 int slots_num, u32 txq_id);
415void iwl_legacy_tx_queue_reset(struct iwl_priv *priv,
416 struct iwl_tx_queue *txq,
417 int slots_num, u32 txq_id);
418void iwl_legacy_tx_queue_free(struct iwl_priv *priv, int txq_id);
419void iwl_legacy_setup_watchdog(struct iwl_priv *priv);
420/*****************************************************
421 * TX power
422 ****************************************************/
423int iwl_legacy_set_tx_power(struct iwl_priv *priv, s8 tx_power, bool force);
424
425/*******************************************************************************
426 * Rate
427 ******************************************************************************/
428
429u8 iwl_legacy_get_lowest_plcp(struct iwl_priv *priv,
430 struct iwl_rxon_context *ctx);
431
432/*******************************************************************************
433 * Scanning
434 ******************************************************************************/
435void iwl_legacy_init_scan_params(struct iwl_priv *priv);
436int iwl_legacy_scan_cancel(struct iwl_priv *priv);
437int iwl_legacy_scan_cancel_timeout(struct iwl_priv *priv, unsigned long ms);
438void iwl_legacy_force_scan_end(struct iwl_priv *priv);
439int iwl_legacy_mac_hw_scan(struct ieee80211_hw *hw,
440 struct ieee80211_vif *vif,
441 struct cfg80211_scan_request *req);
442void iwl_legacy_internal_short_hw_scan(struct iwl_priv *priv);
443int iwl_legacy_force_reset(struct iwl_priv *priv, int mode, bool external);
444u16 iwl_legacy_fill_probe_req(struct iwl_priv *priv,
445 struct ieee80211_mgmt *frame,
446 const u8 *ta, const u8 *ie, int ie_len, int left);
447void iwl_legacy_setup_rx_scan_handlers(struct iwl_priv *priv);
448u16 iwl_legacy_get_active_dwell_time(struct iwl_priv *priv,
449 enum ieee80211_band band,
450 u8 n_probes);
451u16 iwl_legacy_get_passive_dwell_time(struct iwl_priv *priv,
452 enum ieee80211_band band,
453 struct ieee80211_vif *vif);
454void iwl_legacy_setup_scan_deferred_work(struct iwl_priv *priv);
455void iwl_legacy_cancel_scan_deferred_work(struct iwl_priv *priv);
456
457/* For faster active scanning, scan will move to the next channel if fewer than
458 * PLCP_QUIET_THRESH packets are heard on this channel within
459 * ACTIVE_QUIET_TIME after sending probe request. This shortens the dwell
460 * time if it's a quiet channel (nothing responded to our probe, and there's
461 * no other traffic).
462 * Disable "quiet" feature by setting PLCP_QUIET_THRESH to 0. */
463#define IWL_ACTIVE_QUIET_TIME cpu_to_le16(10) /* msec */
464#define IWL_PLCP_QUIET_THRESH cpu_to_le16(1) /* packets */
465
466#define IWL_SCAN_CHECK_WATCHDOG (HZ * 7)
467
468/*****************************************************
469 * S e n d i n g H o s t C o m m a n d s *
470 *****************************************************/
471
472const char *iwl_legacy_get_cmd_string(u8 cmd);
473int __must_check iwl_legacy_send_cmd_sync(struct iwl_priv *priv,
474 struct iwl_host_cmd *cmd);
475int iwl_legacy_send_cmd(struct iwl_priv *priv, struct iwl_host_cmd *cmd);
476int __must_check iwl_legacy_send_cmd_pdu(struct iwl_priv *priv, u8 id,
477 u16 len, const void *data);
478int iwl_legacy_send_cmd_pdu_async(struct iwl_priv *priv, u8 id, u16 len,
479 const void *data,
480 void (*callback)(struct iwl_priv *priv,
481 struct iwl_device_cmd *cmd,
482 struct iwl_rx_packet *pkt));
483
484int iwl_legacy_enqueue_hcmd(struct iwl_priv *priv, struct iwl_host_cmd *cmd);
485
486
487/*****************************************************
488 * PCI *
489 *****************************************************/
490
491static inline u16 iwl_legacy_pcie_link_ctl(struct iwl_priv *priv)
492{
493 int pos;
494 u16 pci_lnk_ctl;
495 pos = pci_find_capability(priv->pci_dev, PCI_CAP_ID_EXP);
496 pci_read_config_word(priv->pci_dev, pos + PCI_EXP_LNKCTL, &pci_lnk_ctl);
497 return pci_lnk_ctl;
498}
499
500void iwl_legacy_bg_watchdog(unsigned long data);
501u32 iwl_legacy_usecs_to_beacons(struct iwl_priv *priv,
502 u32 usec, u32 beacon_interval);
503__le32 iwl_legacy_add_beacon_time(struct iwl_priv *priv, u32 base,
504 u32 addon, u32 beacon_interval);
505
506#ifdef CONFIG_PM
507int iwl_legacy_pci_suspend(struct device *device);
508int iwl_legacy_pci_resume(struct device *device);
509extern const struct dev_pm_ops iwl_legacy_pm_ops;
510
511#define IWL_LEGACY_PM_OPS (&iwl_legacy_pm_ops)
512
513#else /* !CONFIG_PM */
514
515#define IWL_LEGACY_PM_OPS NULL
516
517#endif /* !CONFIG_PM */
518
519/*****************************************************
520* Error Handling Debugging
521******************************************************/
522void iwl4965_dump_nic_error_log(struct iwl_priv *priv);
523int iwl4965_dump_nic_event_log(struct iwl_priv *priv,
524 bool full_log, char **buf, bool display);
525#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
526void iwl_legacy_print_rx_config_cmd(struct iwl_priv *priv,
527 struct iwl_rxon_context *ctx);
528#else
529static inline void iwl_legacy_print_rx_config_cmd(struct iwl_priv *priv,
530 struct iwl_rxon_context *ctx)
531{
532}
533#endif
534
535void iwl_legacy_clear_isr_stats(struct iwl_priv *priv);
536
537/*****************************************************
538* GEOS
539******************************************************/
540int iwl_legacy_init_geos(struct iwl_priv *priv);
541void iwl_legacy_free_geos(struct iwl_priv *priv);
542
543/*************** DRIVER STATUS FUNCTIONS *****/
544
545#define STATUS_HCMD_ACTIVE 0 /* host command in progress */
546/* 1 is unused (used to be STATUS_HCMD_SYNC_ACTIVE) */
547#define STATUS_INT_ENABLED 2
548#define STATUS_RF_KILL_HW 3
549#define STATUS_CT_KILL 4
550#define STATUS_INIT 5
551#define STATUS_ALIVE 6
552#define STATUS_READY 7
553#define STATUS_TEMPERATURE 8
554#define STATUS_GEO_CONFIGURED 9
555#define STATUS_EXIT_PENDING 10
556#define STATUS_STATISTICS 12
557#define STATUS_SCANNING 13
558#define STATUS_SCAN_ABORTING 14
559#define STATUS_SCAN_HW 15
560#define STATUS_POWER_PMI 16
561#define STATUS_FW_ERROR 17
562
563
564static inline int iwl_legacy_is_ready(struct iwl_priv *priv)
565{
566 /* The adapter is 'ready' if READY and GEO_CONFIGURED bits are
567 * set but EXIT_PENDING is not */
568 return test_bit(STATUS_READY, &priv->status) &&
569 test_bit(STATUS_GEO_CONFIGURED, &priv->status) &&
570 !test_bit(STATUS_EXIT_PENDING, &priv->status);
571}
572
573static inline int iwl_legacy_is_alive(struct iwl_priv *priv)
574{
575 return test_bit(STATUS_ALIVE, &priv->status);
576}
577
578static inline int iwl_legacy_is_init(struct iwl_priv *priv)
579{
580 return test_bit(STATUS_INIT, &priv->status);
581}
582
583static inline int iwl_legacy_is_rfkill_hw(struct iwl_priv *priv)
584{
585 return test_bit(STATUS_RF_KILL_HW, &priv->status);
586}
587
588static inline int iwl_legacy_is_rfkill(struct iwl_priv *priv)
589{
590 return iwl_legacy_is_rfkill_hw(priv);
591}
592
593static inline int iwl_legacy_is_ctkill(struct iwl_priv *priv)
594{
595 return test_bit(STATUS_CT_KILL, &priv->status);
596}
597
598static inline int iwl_legacy_is_ready_rf(struct iwl_priv *priv)
599{
600
601 if (iwl_legacy_is_rfkill(priv))
602 return 0;
603
604 return iwl_legacy_is_ready(priv);
605}
606
607extern void iwl_legacy_send_bt_config(struct iwl_priv *priv);
608extern int iwl_legacy_send_statistics_request(struct iwl_priv *priv,
609 u8 flags, bool clear);
610void iwl_legacy_apm_stop(struct iwl_priv *priv);
611int iwl_legacy_apm_init(struct iwl_priv *priv);
612
613int iwl_legacy_send_rxon_timing(struct iwl_priv *priv,
614 struct iwl_rxon_context *ctx);
615static inline int iwl_legacy_send_rxon_assoc(struct iwl_priv *priv,
616 struct iwl_rxon_context *ctx)
617{
618 return priv->cfg->ops->hcmd->rxon_assoc(priv, ctx);
619}
620static inline int iwl_legacy_commit_rxon(struct iwl_priv *priv,
621 struct iwl_rxon_context *ctx)
622{
623 return priv->cfg->ops->hcmd->commit_rxon(priv, ctx);
624}
625static inline const struct ieee80211_supported_band *iwl_get_hw_mode(
626 struct iwl_priv *priv, enum ieee80211_band band)
627{
628 return priv->hw->wiphy->bands[band];
629}
630
631extern bool bt_coex_active;
632
633/* mac80211 handlers */
634int iwl_legacy_mac_config(struct ieee80211_hw *hw, u32 changed);
635void iwl_legacy_mac_reset_tsf(struct ieee80211_hw *hw);
636void iwl_legacy_mac_bss_info_changed(struct ieee80211_hw *hw,
637 struct ieee80211_vif *vif,
638 struct ieee80211_bss_conf *bss_conf,
639 u32 changes);
640void iwl_legacy_tx_cmd_protection(struct iwl_priv *priv,
641 struct ieee80211_tx_info *info,
642 __le16 fc, __le32 *tx_flags);
643
644irqreturn_t iwl_legacy_isr(int irq, void *data);
645
646#endif /* __iwl_legacy_core_h__ */
diff --git a/drivers/net/wireless/iwlegacy/iwl-csr.h b/drivers/net/wireless/iwlegacy/iwl-csr.h
new file mode 100644
index 000000000000..668a9616c269
--- /dev/null
+++ b/drivers/net/wireless/iwlegacy/iwl-csr.h
@@ -0,0 +1,422 @@
1/******************************************************************************
2 *
3 * This file is provided under a dual BSD/GPLv2 license. When using or
4 * redistributing this file, you may do so under either license.
5 *
6 * GPL LICENSE SUMMARY
7 *
8 * Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved.
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of version 2 of the GNU General Public License as
12 * published by the Free Software Foundation.
13 *
14 * This program is distributed in the hope that it will be useful, but
15 * WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
22 * USA
23 *
24 * The full GNU General Public License is included in this distribution
25 * in the file called LICENSE.GPL.
26 *
27 * Contact Information:
28 * Intel Linux Wireless <ilw@linux.intel.com>
29 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
30 *
31 * BSD LICENSE
32 *
33 * Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved.
34 * All rights reserved.
35 *
36 * Redistribution and use in source and binary forms, with or without
37 * modification, are permitted provided that the following conditions
38 * are met:
39 *
40 * * Redistributions of source code must retain the above copyright
41 * notice, this list of conditions and the following disclaimer.
42 * * Redistributions in binary form must reproduce the above copyright
43 * notice, this list of conditions and the following disclaimer in
44 * the documentation and/or other materials provided with the
45 * distribution.
46 * * Neither the name Intel Corporation nor the names of its
47 * contributors may be used to endorse or promote products derived
48 * from this software without specific prior written permission.
49 *
50 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
51 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
52 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
53 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
54 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
55 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
56 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
57 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
58 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
59 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
60 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
61 *
62 *****************************************************************************/
63#ifndef __iwl_legacy_csr_h__
64#define __iwl_legacy_csr_h__
65/*
66 * CSR (control and status registers)
67 *
68 * CSR registers are mapped directly into PCI bus space, and are accessible
69 * whenever platform supplies power to device, even when device is in
70 * low power states due to driver-invoked device resets
71 * (e.g. CSR_RESET_REG_FLAG_SW_RESET) or uCode-driven power-saving modes.
72 *
73 * Use iwl_write32() and iwl_read32() family to access these registers;
74 * these provide simple PCI bus access, without waking up the MAC.
75 * Do not use iwl_legacy_write_direct32() family for these registers;
76 * no need to "grab nic access" via CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ.
77 * The MAC (uCode processor, etc.) does not need to be powered up for accessing
78 * the CSR registers.
79 *
80 * NOTE: Device does need to be awake in order to read this memory
81 * via CSR_EEPROM register
82 */
83#define CSR_BASE (0x000)
84
85#define CSR_HW_IF_CONFIG_REG (CSR_BASE+0x000) /* hardware interface config */
86#define CSR_INT_COALESCING (CSR_BASE+0x004) /* accum ints, 32-usec units */
87#define CSR_INT (CSR_BASE+0x008) /* host interrupt status/ack */
88#define CSR_INT_MASK (CSR_BASE+0x00c) /* host interrupt enable */
89#define CSR_FH_INT_STATUS (CSR_BASE+0x010) /* busmaster int status/ack*/
90#define CSR_GPIO_IN (CSR_BASE+0x018) /* read external chip pins */
91#define CSR_RESET (CSR_BASE+0x020) /* busmaster enable, NMI, etc*/
92#define CSR_GP_CNTRL (CSR_BASE+0x024)
93
94/* 2nd byte of CSR_INT_COALESCING, not accessible via iwl_write32()! */
95#define CSR_INT_PERIODIC_REG (CSR_BASE+0x005)
96
97/*
98 * Hardware revision info
99 * Bit fields:
100 * 31-8: Reserved
101 * 7-4: Type of device: see CSR_HW_REV_TYPE_xxx definitions
102 * 3-2: Revision step: 0 = A, 1 = B, 2 = C, 3 = D
103 * 1-0: "Dash" (-) value, as in A-1, etc.
104 *
105 * NOTE: Revision step affects calculation of CCK txpower for 4965.
106 * NOTE: See also CSR_HW_REV_WA_REG (work-around for bug in 4965).
107 */
108#define CSR_HW_REV (CSR_BASE+0x028)
109
110/*
111 * EEPROM memory reads
112 *
113 * NOTE: Device must be awake, initialized via apm_ops.init(),
114 * in order to read.
115 */
116#define CSR_EEPROM_REG (CSR_BASE+0x02c)
117#define CSR_EEPROM_GP (CSR_BASE+0x030)
118
119#define CSR_GIO_REG (CSR_BASE+0x03C)
120#define CSR_GP_UCODE_REG (CSR_BASE+0x048)
121#define CSR_GP_DRIVER_REG (CSR_BASE+0x050)
122
123/*
124 * UCODE-DRIVER GP (general purpose) mailbox registers.
125 * SET/CLR registers set/clear bit(s) if "1" is written.
126 */
127#define CSR_UCODE_DRV_GP1 (CSR_BASE+0x054)
128#define CSR_UCODE_DRV_GP1_SET (CSR_BASE+0x058)
129#define CSR_UCODE_DRV_GP1_CLR (CSR_BASE+0x05c)
130#define CSR_UCODE_DRV_GP2 (CSR_BASE+0x060)
131
132#define CSR_LED_REG (CSR_BASE+0x094)
133#define CSR_DRAM_INT_TBL_REG (CSR_BASE+0x0A0)
134
135/* GIO Chicken Bits (PCI Express bus link power management) */
136#define CSR_GIO_CHICKEN_BITS (CSR_BASE+0x100)
137
138/* Analog phase-lock-loop configuration */
139#define CSR_ANA_PLL_CFG (CSR_BASE+0x20c)
140
141/*
142 * CSR Hardware Revision Workaround Register. Indicates hardware rev;
143 * "step" determines CCK backoff for txpower calculation. Used for 4965 only.
144 * See also CSR_HW_REV register.
145 * Bit fields:
146 * 3-2: 0 = A, 1 = B, 2 = C, 3 = D step
147 * 1-0: "Dash" (-) value, as in C-1, etc.
148 */
149#define CSR_HW_REV_WA_REG (CSR_BASE+0x22C)
150
151#define CSR_DBG_HPET_MEM_REG (CSR_BASE+0x240)
152#define CSR_DBG_LINK_PWR_MGMT_REG (CSR_BASE+0x250)
153
154/* Bits for CSR_HW_IF_CONFIG_REG */
155#define CSR49_HW_IF_CONFIG_REG_BIT_4965_R (0x00000010)
156#define CSR_HW_IF_CONFIG_REG_MSK_BOARD_VER (0x00000C00)
157#define CSR_HW_IF_CONFIG_REG_BIT_MAC_SI (0x00000100)
158#define CSR_HW_IF_CONFIG_REG_BIT_RADIO_SI (0x00000200)
159
160#define CSR39_HW_IF_CONFIG_REG_BIT_3945_MB (0x00000100)
161#define CSR39_HW_IF_CONFIG_REG_BIT_3945_MM (0x00000200)
162#define CSR39_HW_IF_CONFIG_REG_BIT_SKU_MRC (0x00000400)
163#define CSR39_HW_IF_CONFIG_REG_BIT_BOARD_TYPE (0x00000800)
164#define CSR39_HW_IF_CONFIG_REG_BITS_SILICON_TYPE_A (0x00000000)
165#define CSR39_HW_IF_CONFIG_REG_BITS_SILICON_TYPE_B (0x00001000)
166
167#define CSR_HW_IF_CONFIG_REG_BIT_HAP_WAKE_L1A (0x00080000)
168#define CSR_HW_IF_CONFIG_REG_BIT_EEPROM_OWN_SEM (0x00200000)
169#define CSR_HW_IF_CONFIG_REG_BIT_NIC_READY (0x00400000) /* PCI_OWN_SEM */
170#define CSR_HW_IF_CONFIG_REG_BIT_NIC_PREPARE_DONE (0x02000000) /* ME_OWN */
171#define CSR_HW_IF_CONFIG_REG_PREPARE (0x08000000) /* WAKE_ME */
172
173#define CSR_INT_PERIODIC_DIS (0x00) /* disable periodic int*/
174#define CSR_INT_PERIODIC_ENA (0xFF) /* 255*32 usec ~ 8 msec*/
175
176/* interrupt flags in INTA, set by uCode or hardware (e.g. dma),
177 * acknowledged (reset) by host writing "1" to flagged bits. */
178#define CSR_INT_BIT_FH_RX (1 << 31) /* Rx DMA, cmd responses, FH_INT[17:16] */
179#define CSR_INT_BIT_HW_ERR (1 << 29) /* DMA hardware error FH_INT[31] */
180#define CSR_INT_BIT_RX_PERIODIC (1 << 28) /* Rx periodic */
181#define CSR_INT_BIT_FH_TX (1 << 27) /* Tx DMA FH_INT[1:0] */
182#define CSR_INT_BIT_SCD (1 << 26) /* TXQ pointer advanced */
183#define CSR_INT_BIT_SW_ERR (1 << 25) /* uCode error */
184#define CSR_INT_BIT_RF_KILL (1 << 7) /* HW RFKILL switch GP_CNTRL[27] toggled */
185#define CSR_INT_BIT_CT_KILL (1 << 6) /* Critical temp (chip too hot) rfkill */
186#define CSR_INT_BIT_SW_RX (1 << 3) /* Rx, command responses, 3945 */
187#define CSR_INT_BIT_WAKEUP (1 << 1) /* NIC controller waking up (pwr mgmt) */
188#define CSR_INT_BIT_ALIVE (1 << 0) /* uCode interrupts once it initializes */
189
190#define CSR_INI_SET_MASK (CSR_INT_BIT_FH_RX | \
191 CSR_INT_BIT_HW_ERR | \
192 CSR_INT_BIT_FH_TX | \
193 CSR_INT_BIT_SW_ERR | \
194 CSR_INT_BIT_RF_KILL | \
195 CSR_INT_BIT_SW_RX | \
196 CSR_INT_BIT_WAKEUP | \
197 CSR_INT_BIT_ALIVE)
198
199/* interrupt flags in FH (flow handler) (PCI busmaster DMA) */
200#define CSR_FH_INT_BIT_ERR (1 << 31) /* Error */
201#define CSR_FH_INT_BIT_HI_PRIOR (1 << 30) /* High priority Rx, bypass coalescing */
202#define CSR39_FH_INT_BIT_RX_CHNL2 (1 << 18) /* Rx channel 2 (3945 only) */
203#define CSR_FH_INT_BIT_RX_CHNL1 (1 << 17) /* Rx channel 1 */
204#define CSR_FH_INT_BIT_RX_CHNL0 (1 << 16) /* Rx channel 0 */
205#define CSR39_FH_INT_BIT_TX_CHNL6 (1 << 6) /* Tx channel 6 (3945 only) */
206#define CSR_FH_INT_BIT_TX_CHNL1 (1 << 1) /* Tx channel 1 */
207#define CSR_FH_INT_BIT_TX_CHNL0 (1 << 0) /* Tx channel 0 */
208
209#define CSR39_FH_INT_RX_MASK (CSR_FH_INT_BIT_HI_PRIOR | \
210 CSR39_FH_INT_BIT_RX_CHNL2 | \
211 CSR_FH_INT_BIT_RX_CHNL1 | \
212 CSR_FH_INT_BIT_RX_CHNL0)
213
214
215#define CSR39_FH_INT_TX_MASK (CSR39_FH_INT_BIT_TX_CHNL6 | \
216 CSR_FH_INT_BIT_TX_CHNL1 | \
217 CSR_FH_INT_BIT_TX_CHNL0)
218
219#define CSR49_FH_INT_RX_MASK (CSR_FH_INT_BIT_HI_PRIOR | \
220 CSR_FH_INT_BIT_RX_CHNL1 | \
221 CSR_FH_INT_BIT_RX_CHNL0)
222
223#define CSR49_FH_INT_TX_MASK (CSR_FH_INT_BIT_TX_CHNL1 | \
224 CSR_FH_INT_BIT_TX_CHNL0)
225
226/* GPIO */
227#define CSR_GPIO_IN_BIT_AUX_POWER (0x00000200)
228#define CSR_GPIO_IN_VAL_VAUX_PWR_SRC (0x00000000)
229#define CSR_GPIO_IN_VAL_VMAIN_PWR_SRC (0x00000200)
230
231/* RESET */
232#define CSR_RESET_REG_FLAG_NEVO_RESET (0x00000001)
233#define CSR_RESET_REG_FLAG_FORCE_NMI (0x00000002)
234#define CSR_RESET_REG_FLAG_SW_RESET (0x00000080)
235#define CSR_RESET_REG_FLAG_MASTER_DISABLED (0x00000100)
236#define CSR_RESET_REG_FLAG_STOP_MASTER (0x00000200)
237#define CSR_RESET_LINK_PWR_MGMT_DISABLED (0x80000000)
238
239/*
240 * GP (general purpose) CONTROL REGISTER
241 * Bit fields:
242 * 27: HW_RF_KILL_SW
243 * Indicates state of (platform's) hardware RF-Kill switch
244 * 26-24: POWER_SAVE_TYPE
245 * Indicates current power-saving mode:
246 * 000 -- No power saving
247 * 001 -- MAC power-down
248 * 010 -- PHY (radio) power-down
249 * 011 -- Error
250 * 9-6: SYS_CONFIG
251 * Indicates current system configuration, reflecting pins on chip
252 * as forced high/low by device circuit board.
253 * 4: GOING_TO_SLEEP
254 * Indicates MAC is entering a power-saving sleep power-down.
255 * Not a good time to access device-internal resources.
256 * 3: MAC_ACCESS_REQ
257 * Host sets this to request and maintain MAC wakeup, to allow host
258 * access to device-internal resources. Host must wait for
259 * MAC_CLOCK_READY (and !GOING_TO_SLEEP) before accessing non-CSR
260 * device registers.
261 * 2: INIT_DONE
262 * Host sets this to put device into fully operational D0 power mode.
263 * Host resets this after SW_RESET to put device into low power mode.
264 * 0: MAC_CLOCK_READY
265 * Indicates MAC (ucode processor, etc.) is powered up and can run.
266 * Internal resources are accessible.
267 * NOTE: This does not indicate that the processor is actually running.
268 * NOTE: This does not indicate that 4965 or 3945 has completed
269 * init or post-power-down restore of internal SRAM memory.
270 * Use CSR_UCODE_DRV_GP1_BIT_MAC_SLEEP as indication that
271 * SRAM is restored and uCode is in normal operation mode.
272 * Later devices (5xxx/6xxx/1xxx) use non-volatile SRAM, and
273 * do not need to save/restore it.
274 * NOTE: After device reset, this bit remains "0" until host sets
275 * INIT_DONE
276 */
277#define CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY (0x00000001)
278#define CSR_GP_CNTRL_REG_FLAG_INIT_DONE (0x00000004)
279#define CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ (0x00000008)
280#define CSR_GP_CNTRL_REG_FLAG_GOING_TO_SLEEP (0x00000010)
281
282#define CSR_GP_CNTRL_REG_VAL_MAC_ACCESS_EN (0x00000001)
283
284#define CSR_GP_CNTRL_REG_MSK_POWER_SAVE_TYPE (0x07000000)
285#define CSR_GP_CNTRL_REG_FLAG_MAC_POWER_SAVE (0x04000000)
286#define CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW (0x08000000)
287
288
289/* EEPROM REG */
290#define CSR_EEPROM_REG_READ_VALID_MSK (0x00000001)
291#define CSR_EEPROM_REG_BIT_CMD (0x00000002)
292#define CSR_EEPROM_REG_MSK_ADDR (0x0000FFFC)
293#define CSR_EEPROM_REG_MSK_DATA (0xFFFF0000)
294
295/* EEPROM GP */
296#define CSR_EEPROM_GP_VALID_MSK (0x00000007) /* signature */
297#define CSR_EEPROM_GP_IF_OWNER_MSK (0x00000180)
298#define CSR_EEPROM_GP_GOOD_SIG_EEP_LESS_THAN_4K (0x00000002)
299#define CSR_EEPROM_GP_GOOD_SIG_EEP_MORE_THAN_4K (0x00000004)
300
301/* GP REG */
302#define CSR_GP_REG_POWER_SAVE_STATUS_MSK (0x03000000) /* bit 24/25 */
303#define CSR_GP_REG_NO_POWER_SAVE (0x00000000)
304#define CSR_GP_REG_MAC_POWER_SAVE (0x01000000)
305#define CSR_GP_REG_PHY_POWER_SAVE (0x02000000)
306#define CSR_GP_REG_POWER_SAVE_ERROR (0x03000000)
307
308
309/* CSR GIO */
310#define CSR_GIO_REG_VAL_L0S_ENABLED (0x00000002)
311
312/*
313 * UCODE-DRIVER GP (general purpose) mailbox register 1
314 * Host driver and uCode write and/or read this register to communicate with
315 * each other.
316 * Bit fields:
317 * 4: UCODE_DISABLE
318 * Host sets this to request permanent halt of uCode, same as
319 * sending CARD_STATE command with "halt" bit set.
320 * 3: CT_KILL_EXIT
321 * Host sets this to request exit from CT_KILL state, i.e. host thinks
322 * device temperature is low enough to continue normal operation.
323 * 2: CMD_BLOCKED
324 * Host sets this during RF KILL power-down sequence (HW, SW, CT KILL)
325 * to release uCode to clear all Tx and command queues, enter
326 * unassociated mode, and power down.
327 * NOTE: Some devices also use HBUS_TARG_MBX_C register for this bit.
328 * 1: SW_BIT_RFKILL
329 * Host sets this when issuing CARD_STATE command to request
330 * device sleep.
331 * 0: MAC_SLEEP
332 * uCode sets this when preparing a power-saving power-down.
333 * uCode resets this when power-up is complete and SRAM is sane.
334 * NOTE: 3945/4965 saves internal SRAM data to host when powering down,
335 * and must restore this data after powering back up.
336 * MAC_SLEEP is the best indication that restore is complete.
337 * Later devices (5xxx/6xxx/1xxx) use non-volatile SRAM, and
338 * do not need to save/restore it.
339 */
340#define CSR_UCODE_DRV_GP1_BIT_MAC_SLEEP (0x00000001)
341#define CSR_UCODE_SW_BIT_RFKILL (0x00000002)
342#define CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED (0x00000004)
343#define CSR_UCODE_DRV_GP1_REG_BIT_CT_KILL_EXIT (0x00000008)
344
345/* GIO Chicken Bits (PCI Express bus link power management) */
346#define CSR_GIO_CHICKEN_BITS_REG_BIT_L1A_NO_L0S_RX (0x00800000)
347#define CSR_GIO_CHICKEN_BITS_REG_BIT_DIS_L0S_EXIT_TIMER (0x20000000)
348
349/* LED */
350#define CSR_LED_BSM_CTRL_MSK (0xFFFFFFDF)
351#define CSR_LED_REG_TRUN_ON (0x78)
352#define CSR_LED_REG_TRUN_OFF (0x38)
353
354/* ANA_PLL */
355#define CSR39_ANA_PLL_CFG_VAL (0x01000000)
356
357/* HPET MEM debug */
358#define CSR_DBG_HPET_MEM_REG_VAL (0xFFFF0000)
359
360/* DRAM INT TABLE */
361#define CSR_DRAM_INT_TBL_ENABLE (1 << 31)
362#define CSR_DRAM_INIT_TBL_WRAP_CHECK (1 << 27)
363
364/*
365 * HBUS (Host-side Bus)
366 *
367 * HBUS registers are mapped directly into PCI bus space, but are used
368 * to indirectly access device's internal memory or registers that
369 * may be powered-down.
370 *
371 * Use iwl_legacy_write_direct32()/iwl_legacy_read_direct32() family
372 * for these registers;
373 * host must "grab nic access" via CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ
374 * to make sure the MAC (uCode processor, etc.) is powered up for accessing
375 * internal resources.
376 *
377 * Do not use iwl_write32()/iwl_read32() family to access these registers;
378 * these provide only simple PCI bus access, without waking up the MAC.
379 */
380#define HBUS_BASE (0x400)
381
382/*
383 * Registers for accessing device's internal SRAM memory (e.g. SCD SRAM
384 * structures, error log, event log, verifying uCode load).
385 * First write to address register, then read from or write to data register
386 * to complete the job. Once the address register is set up, accesses to
387 * data registers auto-increment the address by one dword.
388 * Bit usage for address registers (read or write):
389 * 0-31: memory address within device
390 */
391#define HBUS_TARG_MEM_RADDR (HBUS_BASE+0x00c)
392#define HBUS_TARG_MEM_WADDR (HBUS_BASE+0x010)
393#define HBUS_TARG_MEM_WDAT (HBUS_BASE+0x018)
394#define HBUS_TARG_MEM_RDAT (HBUS_BASE+0x01c)
395
396/* Mailbox C, used as workaround alternative to CSR_UCODE_DRV_GP1 mailbox */
397#define HBUS_TARG_MBX_C (HBUS_BASE+0x030)
398#define HBUS_TARG_MBX_C_REG_BIT_CMD_BLOCKED (0x00000004)
399
400/*
401 * Registers for accessing device's internal peripheral registers
402 * (e.g. SCD, BSM, etc.). First write to address register,
403 * then read from or write to data register to complete the job.
404 * Bit usage for address registers (read or write):
405 * 0-15: register address (offset) within device
406 * 24-25: (# bytes - 1) to read or write (e.g. 3 for dword)
407 */
408#define HBUS_TARG_PRPH_WADDR (HBUS_BASE+0x044)
409#define HBUS_TARG_PRPH_RADDR (HBUS_BASE+0x048)
410#define HBUS_TARG_PRPH_WDAT (HBUS_BASE+0x04c)
411#define HBUS_TARG_PRPH_RDAT (HBUS_BASE+0x050)
412
413/*
414 * Per-Tx-queue write pointer (index, really!)
415 * Indicates index to next TFD that driver will fill (1 past latest filled).
416 * Bit usage:
417 * 0-7: queue write index
418 * 11-8: queue selector
419 */
420#define HBUS_TARG_WRPTR (HBUS_BASE+0x060)
421
422#endif /* !__iwl_legacy_csr_h__ */
diff --git a/drivers/net/wireless/iwlegacy/iwl-debug.h b/drivers/net/wireless/iwlegacy/iwl-debug.h
new file mode 100644
index 000000000000..665789f3e75d
--- /dev/null
+++ b/drivers/net/wireless/iwlegacy/iwl-debug.h
@@ -0,0 +1,198 @@
1/******************************************************************************
2 *
3 * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved.
4 *
5 * Portions of this file are derived from the ipw3945 project.
6 *
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms of version 2 of the GNU General Public License as
9 * published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful, but WITHOUT
12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 * more details.
15 *
16 * You should have received a copy of the GNU General Public License along with
17 * this program; if not, write to the Free Software Foundation, Inc.,
18 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
19 *
20 * The full GNU General Public License is included in this distribution in the
21 * file called LICENSE.
22 *
23 * Contact Information:
24 * Intel Linux Wireless <ilw@linux.intel.com>
25 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
26 *
27 *****************************************************************************/
28
29#ifndef __iwl_legacy_debug_h__
30#define __iwl_legacy_debug_h__
31
32struct iwl_priv;
33extern u32 iwl_debug_level;
34
35#define IWL_ERR(p, f, a...) dev_err(&((p)->pci_dev->dev), f, ## a)
36#define IWL_WARN(p, f, a...) dev_warn(&((p)->pci_dev->dev), f, ## a)
37#define IWL_INFO(p, f, a...) dev_info(&((p)->pci_dev->dev), f, ## a)
38#define IWL_CRIT(p, f, a...) dev_crit(&((p)->pci_dev->dev), f, ## a)
39
40#define iwl_print_hex_error(priv, p, len) \
41do { \
42 print_hex_dump(KERN_ERR, "iwl data: ", \
43 DUMP_PREFIX_OFFSET, 16, 1, p, len, 1); \
44} while (0)
45
46#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
47#define IWL_DEBUG(__priv, level, fmt, args...) \
48do { \
49 if (iwl_legacy_get_debug_level(__priv) & (level)) \
50 dev_printk(KERN_ERR, &(__priv->hw->wiphy->dev), \
51 "%c %s " fmt, in_interrupt() ? 'I' : 'U', \
52 __func__ , ## args); \
53} while (0)
54
55#define IWL_DEBUG_LIMIT(__priv, level, fmt, args...) \
56do { \
57 if ((iwl_legacy_get_debug_level(__priv) & (level)) && net_ratelimit()) \
58 dev_printk(KERN_ERR, &(__priv->hw->wiphy->dev), \
59 "%c %s " fmt, in_interrupt() ? 'I' : 'U', \
60 __func__ , ## args); \
61} while (0)
62
63#define iwl_print_hex_dump(priv, level, p, len) \
64do { \
65 if (iwl_legacy_get_debug_level(priv) & level) \
66 print_hex_dump(KERN_DEBUG, "iwl data: ", \
67 DUMP_PREFIX_OFFSET, 16, 1, p, len, 1); \
68} while (0)
69
70#else
71#define IWL_DEBUG(__priv, level, fmt, args...)
72#define IWL_DEBUG_LIMIT(__priv, level, fmt, args...)
73static inline void iwl_print_hex_dump(struct iwl_priv *priv, int level,
74 const void *p, u32 len)
75{}
76#endif /* CONFIG_IWLWIFI_LEGACY_DEBUG */
77
78#ifdef CONFIG_IWLWIFI_LEGACY_DEBUGFS
79int iwl_legacy_dbgfs_register(struct iwl_priv *priv, const char *name);
80void iwl_legacy_dbgfs_unregister(struct iwl_priv *priv);
81#else
82static inline int
83iwl_legacy_dbgfs_register(struct iwl_priv *priv, const char *name)
84{
85 return 0;
86}
87static inline void iwl_legacy_dbgfs_unregister(struct iwl_priv *priv)
88{
89}
90#endif /* CONFIG_IWLWIFI_LEGACY_DEBUGFS */
91
92/*
93 * To use the debug system:
94 *
95 * If you are defining a new debug classification, simply add it to the #define
96 * list here in the form of
97 *
98 * #define IWL_DL_xxxx VALUE
99 *
100 * where xxxx should be the name of the classification (for example, WEP).
101 *
102 * You then need to either add a IWL_xxxx_DEBUG() macro definition for your
103 * classification, or use IWL_DEBUG(IWL_DL_xxxx, ...) whenever you want
104 * to send output to that classification.
105 *
106 * The active debug levels can be accessed via files
107 *
108 * /sys/module/iwl4965/parameters/debug{50}
109 * /sys/module/iwl3945/parameters/debug
110 * /sys/class/net/wlan0/device/debug_level
111 *
112 * when CONFIG_IWLWIFI_LEGACY_DEBUG=y.
113 */
114
115/* 0x0000000F - 0x00000001 */
116#define IWL_DL_INFO (1 << 0)
117#define IWL_DL_MAC80211 (1 << 1)
118#define IWL_DL_HCMD (1 << 2)
119#define IWL_DL_STATE (1 << 3)
120/* 0x000000F0 - 0x00000010 */
121#define IWL_DL_MACDUMP (1 << 4)
122#define IWL_DL_HCMD_DUMP (1 << 5)
123#define IWL_DL_EEPROM (1 << 6)
124#define IWL_DL_RADIO (1 << 7)
125/* 0x00000F00 - 0x00000100 */
126#define IWL_DL_POWER (1 << 8)
127#define IWL_DL_TEMP (1 << 9)
128#define IWL_DL_NOTIF (1 << 10)
129#define IWL_DL_SCAN (1 << 11)
130/* 0x0000F000 - 0x00001000 */
131#define IWL_DL_ASSOC (1 << 12)
132#define IWL_DL_DROP (1 << 13)
133#define IWL_DL_TXPOWER (1 << 14)
134#define IWL_DL_AP (1 << 15)
135/* 0x000F0000 - 0x00010000 */
136#define IWL_DL_FW (1 << 16)
137#define IWL_DL_RF_KILL (1 << 17)
138#define IWL_DL_FW_ERRORS (1 << 18)
139#define IWL_DL_LED (1 << 19)
140/* 0x00F00000 - 0x00100000 */
141#define IWL_DL_RATE (1 << 20)
142#define IWL_DL_CALIB (1 << 21)
143#define IWL_DL_WEP (1 << 22)
144#define IWL_DL_TX (1 << 23)
145/* 0x0F000000 - 0x01000000 */
146#define IWL_DL_RX (1 << 24)
147#define IWL_DL_ISR (1 << 25)
148#define IWL_DL_HT (1 << 26)
149#define IWL_DL_IO (1 << 27)
150/* 0xF0000000 - 0x10000000 */
151#define IWL_DL_11H (1 << 28)
152#define IWL_DL_STATS (1 << 29)
153#define IWL_DL_TX_REPLY (1 << 30)
154#define IWL_DL_QOS (1 << 31)
155
156#define IWL_DEBUG_INFO(p, f, a...) IWL_DEBUG(p, IWL_DL_INFO, f, ## a)
157#define IWL_DEBUG_MAC80211(p, f, a...) IWL_DEBUG(p, IWL_DL_MAC80211, f, ## a)
158#define IWL_DEBUG_MACDUMP(p, f, a...) IWL_DEBUG(p, IWL_DL_MACDUMP, f, ## a)
159#define IWL_DEBUG_TEMP(p, f, a...) IWL_DEBUG(p, IWL_DL_TEMP, f, ## a)
160#define IWL_DEBUG_SCAN(p, f, a...) IWL_DEBUG(p, IWL_DL_SCAN, f, ## a)
161#define IWL_DEBUG_RX(p, f, a...) IWL_DEBUG(p, IWL_DL_RX, f, ## a)
162#define IWL_DEBUG_TX(p, f, a...) IWL_DEBUG(p, IWL_DL_TX, f, ## a)
163#define IWL_DEBUG_ISR(p, f, a...) IWL_DEBUG(p, IWL_DL_ISR, f, ## a)
164#define IWL_DEBUG_LED(p, f, a...) IWL_DEBUG(p, IWL_DL_LED, f, ## a)
165#define IWL_DEBUG_WEP(p, f, a...) IWL_DEBUG(p, IWL_DL_WEP, f, ## a)
166#define IWL_DEBUG_HC(p, f, a...) IWL_DEBUG(p, IWL_DL_HCMD, f, ## a)
167#define IWL_DEBUG_HC_DUMP(p, f, a...) IWL_DEBUG(p, IWL_DL_HCMD_DUMP, f, ## a)
168#define IWL_DEBUG_EEPROM(p, f, a...) IWL_DEBUG(p, IWL_DL_EEPROM, f, ## a)
169#define IWL_DEBUG_CALIB(p, f, a...) IWL_DEBUG(p, IWL_DL_CALIB, f, ## a)
170#define IWL_DEBUG_FW(p, f, a...) IWL_DEBUG(p, IWL_DL_FW, f, ## a)
171#define IWL_DEBUG_RF_KILL(p, f, a...) IWL_DEBUG(p, IWL_DL_RF_KILL, f, ## a)
172#define IWL_DEBUG_DROP(p, f, a...) IWL_DEBUG(p, IWL_DL_DROP, f, ## a)
173#define IWL_DEBUG_DROP_LIMIT(p, f, a...) \
174 IWL_DEBUG_LIMIT(p, IWL_DL_DROP, f, ## a)
175#define IWL_DEBUG_AP(p, f, a...) IWL_DEBUG(p, IWL_DL_AP, f, ## a)
176#define IWL_DEBUG_TXPOWER(p, f, a...) IWL_DEBUG(p, IWL_DL_TXPOWER, f, ## a)
177#define IWL_DEBUG_IO(p, f, a...) IWL_DEBUG(p, IWL_DL_IO, f, ## a)
178#define IWL_DEBUG_RATE(p, f, a...) IWL_DEBUG(p, IWL_DL_RATE, f, ## a)
179#define IWL_DEBUG_RATE_LIMIT(p, f, a...) \
180 IWL_DEBUG_LIMIT(p, IWL_DL_RATE, f, ## a)
181#define IWL_DEBUG_NOTIF(p, f, a...) IWL_DEBUG(p, IWL_DL_NOTIF, f, ## a)
182#define IWL_DEBUG_ASSOC(p, f, a...) \
183 IWL_DEBUG(p, IWL_DL_ASSOC | IWL_DL_INFO, f, ## a)
184#define IWL_DEBUG_ASSOC_LIMIT(p, f, a...) \
185 IWL_DEBUG_LIMIT(p, IWL_DL_ASSOC | IWL_DL_INFO, f, ## a)
186#define IWL_DEBUG_HT(p, f, a...) IWL_DEBUG(p, IWL_DL_HT, f, ## a)
187#define IWL_DEBUG_STATS(p, f, a...) IWL_DEBUG(p, IWL_DL_STATS, f, ## a)
188#define IWL_DEBUG_STATS_LIMIT(p, f, a...) \
189 IWL_DEBUG_LIMIT(p, IWL_DL_STATS, f, ## a)
190#define IWL_DEBUG_TX_REPLY(p, f, a...) IWL_DEBUG(p, IWL_DL_TX_REPLY, f, ## a)
191#define IWL_DEBUG_TX_REPLY_LIMIT(p, f, a...) \
192 IWL_DEBUG_LIMIT(p, IWL_DL_TX_REPLY, f, ## a)
193#define IWL_DEBUG_QOS(p, f, a...) IWL_DEBUG(p, IWL_DL_QOS, f, ## a)
194#define IWL_DEBUG_RADIO(p, f, a...) IWL_DEBUG(p, IWL_DL_RADIO, f, ## a)
195#define IWL_DEBUG_POWER(p, f, a...) IWL_DEBUG(p, IWL_DL_POWER, f, ## a)
196#define IWL_DEBUG_11H(p, f, a...) IWL_DEBUG(p, IWL_DL_11H, f, ## a)
197
198#endif
diff --git a/drivers/net/wireless/iwlegacy/iwl-debugfs.c b/drivers/net/wireless/iwlegacy/iwl-debugfs.c
new file mode 100644
index 000000000000..6ea9c4fbcd3a
--- /dev/null
+++ b/drivers/net/wireless/iwlegacy/iwl-debugfs.c
@@ -0,0 +1,1467 @@
1/******************************************************************************
2 *
3 * GPL LICENSE SUMMARY
4 *
5 * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of version 2 of the GNU General Public License as
9 * published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful, but
12 * WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
19 * USA
20 *
21 * The full GNU General Public License is included in this distribution
22 * in the file called LICENSE.GPL.
23 *
24 * Contact Information:
25 * Intel Linux Wireless <ilw@linux.intel.com>
26 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
27 *****************************************************************************/
28#include <linux/ieee80211.h>
29#include <net/mac80211.h>
30
31
32#include "iwl-dev.h"
33#include "iwl-debug.h"
34#include "iwl-core.h"
35#include "iwl-io.h"
36
37/* create and remove of files */
38#define DEBUGFS_ADD_FILE(name, parent, mode) do { \
39 if (!debugfs_create_file(#name, mode, parent, priv, \
40 &iwl_legacy_dbgfs_##name##_ops)) \
41 goto err; \
42} while (0)
43
44#define DEBUGFS_ADD_BOOL(name, parent, ptr) do { \
45 struct dentry *__tmp; \
46 __tmp = debugfs_create_bool(#name, S_IWUSR | S_IRUSR, \
47 parent, ptr); \
48 if (IS_ERR(__tmp) || !__tmp) \
49 goto err; \
50} while (0)
51
52#define DEBUGFS_ADD_X32(name, parent, ptr) do { \
53 struct dentry *__tmp; \
54 __tmp = debugfs_create_x32(#name, S_IWUSR | S_IRUSR, \
55 parent, ptr); \
56 if (IS_ERR(__tmp) || !__tmp) \
57 goto err; \
58} while (0)
59
60/* file operation */
61#define DEBUGFS_READ_FUNC(name) \
62static ssize_t iwl_legacy_dbgfs_##name##_read(struct file *file, \
63 char __user *user_buf, \
64 size_t count, loff_t *ppos);
65
66#define DEBUGFS_WRITE_FUNC(name) \
67static ssize_t iwl_legacy_dbgfs_##name##_write(struct file *file, \
68 const char __user *user_buf, \
69 size_t count, loff_t *ppos);
70
71
72static int
73iwl_legacy_dbgfs_open_file_generic(struct inode *inode, struct file *file)
74{
75 file->private_data = inode->i_private;
76 return 0;
77}
78
79#define DEBUGFS_READ_FILE_OPS(name) \
80 DEBUGFS_READ_FUNC(name); \
81static const struct file_operations iwl_legacy_dbgfs_##name##_ops = { \
82 .read = iwl_legacy_dbgfs_##name##_read, \
83 .open = iwl_legacy_dbgfs_open_file_generic, \
84 .llseek = generic_file_llseek, \
85};
86
87#define DEBUGFS_WRITE_FILE_OPS(name) \
88 DEBUGFS_WRITE_FUNC(name); \
89static const struct file_operations iwl_legacy_dbgfs_##name##_ops = { \
90 .write = iwl_legacy_dbgfs_##name##_write, \
91 .open = iwl_legacy_dbgfs_open_file_generic, \
92 .llseek = generic_file_llseek, \
93};
94
95#define DEBUGFS_READ_WRITE_FILE_OPS(name) \
96 DEBUGFS_READ_FUNC(name); \
97 DEBUGFS_WRITE_FUNC(name); \
98static const struct file_operations iwl_legacy_dbgfs_##name##_ops = { \
99 .write = iwl_legacy_dbgfs_##name##_write, \
100 .read = iwl_legacy_dbgfs_##name##_read, \
101 .open = iwl_legacy_dbgfs_open_file_generic, \
102 .llseek = generic_file_llseek, \
103};
104
105static ssize_t iwl_legacy_dbgfs_tx_statistics_read(struct file *file,
106 char __user *user_buf,
107 size_t count, loff_t *ppos) {
108
109 struct iwl_priv *priv = file->private_data;
110 char *buf;
111 int pos = 0;
112
113 int cnt;
114 ssize_t ret;
115 const size_t bufsz = 100 +
116 sizeof(char) * 50 * (MANAGEMENT_MAX + CONTROL_MAX);
117 buf = kzalloc(bufsz, GFP_KERNEL);
118 if (!buf)
119 return -ENOMEM;
120 pos += scnprintf(buf + pos, bufsz - pos, "Management:\n");
121 for (cnt = 0; cnt < MANAGEMENT_MAX; cnt++) {
122 pos += scnprintf(buf + pos, bufsz - pos,
123 "\t%25s\t\t: %u\n",
124 iwl_legacy_get_mgmt_string(cnt),
125 priv->tx_stats.mgmt[cnt]);
126 }
127 pos += scnprintf(buf + pos, bufsz - pos, "Control\n");
128 for (cnt = 0; cnt < CONTROL_MAX; cnt++) {
129 pos += scnprintf(buf + pos, bufsz - pos,
130 "\t%25s\t\t: %u\n",
131 iwl_legacy_get_ctrl_string(cnt),
132 priv->tx_stats.ctrl[cnt]);
133 }
134 pos += scnprintf(buf + pos, bufsz - pos, "Data:\n");
135 pos += scnprintf(buf + pos, bufsz - pos, "\tcnt: %u\n",
136 priv->tx_stats.data_cnt);
137 pos += scnprintf(buf + pos, bufsz - pos, "\tbytes: %llu\n",
138 priv->tx_stats.data_bytes);
139 ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
140 kfree(buf);
141 return ret;
142}
143
144static ssize_t
145iwl_legacy_dbgfs_clear_traffic_statistics_write(struct file *file,
146 const char __user *user_buf,
147 size_t count, loff_t *ppos)
148{
149 struct iwl_priv *priv = file->private_data;
150 u32 clear_flag;
151 char buf[8];
152 int buf_size;
153
154 memset(buf, 0, sizeof(buf));
155 buf_size = min(count, sizeof(buf) - 1);
156 if (copy_from_user(buf, user_buf, buf_size))
157 return -EFAULT;
158 if (sscanf(buf, "%x", &clear_flag) != 1)
159 return -EFAULT;
160 iwl_legacy_clear_traffic_stats(priv);
161
162 return count;
163}
164
165static ssize_t iwl_legacy_dbgfs_rx_statistics_read(struct file *file,
166 char __user *user_buf,
167 size_t count, loff_t *ppos) {
168
169 struct iwl_priv *priv = file->private_data;
170 char *buf;
171 int pos = 0;
172 int cnt;
173 ssize_t ret;
174 const size_t bufsz = 100 +
175 sizeof(char) * 50 * (MANAGEMENT_MAX + CONTROL_MAX);
176 buf = kzalloc(bufsz, GFP_KERNEL);
177 if (!buf)
178 return -ENOMEM;
179
180 pos += scnprintf(buf + pos, bufsz - pos, "Management:\n");
181 for (cnt = 0; cnt < MANAGEMENT_MAX; cnt++) {
182 pos += scnprintf(buf + pos, bufsz - pos,
183 "\t%25s\t\t: %u\n",
184 iwl_legacy_get_mgmt_string(cnt),
185 priv->rx_stats.mgmt[cnt]);
186 }
187 pos += scnprintf(buf + pos, bufsz - pos, "Control:\n");
188 for (cnt = 0; cnt < CONTROL_MAX; cnt++) {
189 pos += scnprintf(buf + pos, bufsz - pos,
190 "\t%25s\t\t: %u\n",
191 iwl_legacy_get_ctrl_string(cnt),
192 priv->rx_stats.ctrl[cnt]);
193 }
194 pos += scnprintf(buf + pos, bufsz - pos, "Data:\n");
195 pos += scnprintf(buf + pos, bufsz - pos, "\tcnt: %u\n",
196 priv->rx_stats.data_cnt);
197 pos += scnprintf(buf + pos, bufsz - pos, "\tbytes: %llu\n",
198 priv->rx_stats.data_bytes);
199
200 ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
201 kfree(buf);
202 return ret;
203}
204
205#define BYTE1_MASK 0x000000ff;
206#define BYTE2_MASK 0x0000ffff;
207#define BYTE3_MASK 0x00ffffff;
208static ssize_t iwl_legacy_dbgfs_sram_read(struct file *file,
209 char __user *user_buf,
210 size_t count, loff_t *ppos)
211{
212 u32 val;
213 char *buf;
214 ssize_t ret;
215 int i;
216 int pos = 0;
217 struct iwl_priv *priv = file->private_data;
218 size_t bufsz;
219
220 /* default is to dump the entire data segment */
221 if (!priv->dbgfs_sram_offset && !priv->dbgfs_sram_len) {
222 priv->dbgfs_sram_offset = 0x800000;
223 if (priv->ucode_type == UCODE_INIT)
224 priv->dbgfs_sram_len = priv->ucode_init_data.len;
225 else
226 priv->dbgfs_sram_len = priv->ucode_data.len;
227 }
228 bufsz = 30 + priv->dbgfs_sram_len * sizeof(char) * 10;
229 buf = kmalloc(bufsz, GFP_KERNEL);
230 if (!buf)
231 return -ENOMEM;
232 pos += scnprintf(buf + pos, bufsz - pos, "sram_len: 0x%x\n",
233 priv->dbgfs_sram_len);
234 pos += scnprintf(buf + pos, bufsz - pos, "sram_offset: 0x%x\n",
235 priv->dbgfs_sram_offset);
236 for (i = priv->dbgfs_sram_len; i > 0; i -= 4) {
237 val = iwl_legacy_read_targ_mem(priv, priv->dbgfs_sram_offset + \
238 priv->dbgfs_sram_len - i);
239 if (i < 4) {
240 switch (i) {
241 case 1:
242 val &= BYTE1_MASK;
243 break;
244 case 2:
245 val &= BYTE2_MASK;
246 break;
247 case 3:
248 val &= BYTE3_MASK;
249 break;
250 }
251 }
252 if (!(i % 16))
253 pos += scnprintf(buf + pos, bufsz - pos, "\n");
254 pos += scnprintf(buf + pos, bufsz - pos, "0x%08x ", val);
255 }
256 pos += scnprintf(buf + pos, bufsz - pos, "\n");
257
258 ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
259 kfree(buf);
260 return ret;
261}
262
263static ssize_t iwl_legacy_dbgfs_sram_write(struct file *file,
264 const char __user *user_buf,
265 size_t count, loff_t *ppos)
266{
267 struct iwl_priv *priv = file->private_data;
268 char buf[64];
269 int buf_size;
270 u32 offset, len;
271
272 memset(buf, 0, sizeof(buf));
273 buf_size = min(count, sizeof(buf) - 1);
274 if (copy_from_user(buf, user_buf, buf_size))
275 return -EFAULT;
276
277 if (sscanf(buf, "%x,%x", &offset, &len) == 2) {
278 priv->dbgfs_sram_offset = offset;
279 priv->dbgfs_sram_len = len;
280 } else {
281 priv->dbgfs_sram_offset = 0;
282 priv->dbgfs_sram_len = 0;
283 }
284
285 return count;
286}
287
288static ssize_t
289iwl_legacy_dbgfs_stations_read(struct file *file, char __user *user_buf,
290 size_t count, loff_t *ppos)
291{
292 struct iwl_priv *priv = file->private_data;
293 struct iwl_station_entry *station;
294 int max_sta = priv->hw_params.max_stations;
295 char *buf;
296 int i, j, pos = 0;
297 ssize_t ret;
298 /* Add 30 for initial string */
299 const size_t bufsz = 30 + sizeof(char) * 500 * (priv->num_stations);
300
301 buf = kmalloc(bufsz, GFP_KERNEL);
302 if (!buf)
303 return -ENOMEM;
304
305 pos += scnprintf(buf + pos, bufsz - pos, "num of stations: %d\n\n",
306 priv->num_stations);
307
308 for (i = 0; i < max_sta; i++) {
309 station = &priv->stations[i];
310 if (!station->used)
311 continue;
312 pos += scnprintf(buf + pos, bufsz - pos,
313 "station %d - addr: %pM, flags: %#x\n",
314 i, station->sta.sta.addr,
315 station->sta.station_flags_msk);
316 pos += scnprintf(buf + pos, bufsz - pos,
317 "TID\tseq_num\ttxq_id\tframes\ttfds\t");
318 pos += scnprintf(buf + pos, bufsz - pos,
319 "start_idx\tbitmap\t\t\trate_n_flags\n");
320
321 for (j = 0; j < MAX_TID_COUNT; j++) {
322 pos += scnprintf(buf + pos, bufsz - pos,
323 "%d:\t%#x\t%#x\t%u\t%u\t%u\t\t%#.16llx\t%#x",
324 j, station->tid[j].seq_number,
325 station->tid[j].agg.txq_id,
326 station->tid[j].agg.frame_count,
327 station->tid[j].tfds_in_queue,
328 station->tid[j].agg.start_idx,
329 station->tid[j].agg.bitmap,
330 station->tid[j].agg.rate_n_flags);
331
332 if (station->tid[j].agg.wait_for_ba)
333 pos += scnprintf(buf + pos, bufsz - pos,
334 " - waitforba");
335 pos += scnprintf(buf + pos, bufsz - pos, "\n");
336 }
337
338 pos += scnprintf(buf + pos, bufsz - pos, "\n");
339 }
340
341 ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
342 kfree(buf);
343 return ret;
344}
345
346static ssize_t iwl_legacy_dbgfs_nvm_read(struct file *file,
347 char __user *user_buf,
348 size_t count,
349 loff_t *ppos)
350{
351 ssize_t ret;
352 struct iwl_priv *priv = file->private_data;
353 int pos = 0, ofs = 0, buf_size = 0;
354 const u8 *ptr;
355 char *buf;
356 u16 eeprom_ver;
357 size_t eeprom_len = priv->cfg->base_params->eeprom_size;
358 buf_size = 4 * eeprom_len + 256;
359
360 if (eeprom_len % 16) {
361 IWL_ERR(priv, "NVM size is not multiple of 16.\n");
362 return -ENODATA;
363 }
364
365 ptr = priv->eeprom;
366 if (!ptr) {
367 IWL_ERR(priv, "Invalid EEPROM memory\n");
368 return -ENOMEM;
369 }
370
371 /* 4 characters for byte 0xYY */
372 buf = kzalloc(buf_size, GFP_KERNEL);
373 if (!buf) {
374 IWL_ERR(priv, "Can not allocate Buffer\n");
375 return -ENOMEM;
376 }
377 eeprom_ver = iwl_legacy_eeprom_query16(priv, EEPROM_VERSION);
378 pos += scnprintf(buf + pos, buf_size - pos, "EEPROM "
379 "version: 0x%x\n", eeprom_ver);
380 for (ofs = 0 ; ofs < eeprom_len ; ofs += 16) {
381 pos += scnprintf(buf + pos, buf_size - pos, "0x%.4x ", ofs);
382 hex_dump_to_buffer(ptr + ofs, 16 , 16, 2, buf + pos,
383 buf_size - pos, 0);
384 pos += strlen(buf + pos);
385 if (buf_size - pos > 0)
386 buf[pos++] = '\n';
387 }
388
389 ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
390 kfree(buf);
391 return ret;
392}
393
394static ssize_t iwl_legacy_dbgfs_log_event_read(struct file *file,
395 char __user *user_buf,
396 size_t count, loff_t *ppos)
397{
398 struct iwl_priv *priv = file->private_data;
399 char *buf;
400 int pos = 0;
401 ssize_t ret = -ENOMEM;
402
403 ret = pos = priv->cfg->ops->lib->dump_nic_event_log(
404 priv, true, &buf, true);
405 if (buf) {
406 ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
407 kfree(buf);
408 }
409 return ret;
410}
411
412static ssize_t iwl_legacy_dbgfs_log_event_write(struct file *file,
413 const char __user *user_buf,
414 size_t count, loff_t *ppos)
415{
416 struct iwl_priv *priv = file->private_data;
417 u32 event_log_flag;
418 char buf[8];
419 int buf_size;
420
421 memset(buf, 0, sizeof(buf));
422 buf_size = min(count, sizeof(buf) - 1);
423 if (copy_from_user(buf, user_buf, buf_size))
424 return -EFAULT;
425 if (sscanf(buf, "%d", &event_log_flag) != 1)
426 return -EFAULT;
427 if (event_log_flag == 1)
428 priv->cfg->ops->lib->dump_nic_event_log(priv, true,
429 NULL, false);
430
431 return count;
432}
433
434
435
436static ssize_t
437iwl_legacy_dbgfs_channels_read(struct file *file, char __user *user_buf,
438 size_t count, loff_t *ppos)
439{
440 struct iwl_priv *priv = file->private_data;
441 struct ieee80211_channel *channels = NULL;
442 const struct ieee80211_supported_band *supp_band = NULL;
443 int pos = 0, i, bufsz = PAGE_SIZE;
444 char *buf;
445 ssize_t ret;
446
447 if (!test_bit(STATUS_GEO_CONFIGURED, &priv->status))
448 return -EAGAIN;
449
450 buf = kzalloc(bufsz, GFP_KERNEL);
451 if (!buf) {
452 IWL_ERR(priv, "Can not allocate Buffer\n");
453 return -ENOMEM;
454 }
455
456 supp_band = iwl_get_hw_mode(priv, IEEE80211_BAND_2GHZ);
457 if (supp_band) {
458 channels = supp_band->channels;
459
460 pos += scnprintf(buf + pos, bufsz - pos,
461 "Displaying %d channels in 2.4GHz band 802.11bg):\n",
462 supp_band->n_channels);
463
464 for (i = 0; i < supp_band->n_channels; i++)
465 pos += scnprintf(buf + pos, bufsz - pos,
466 "%d: %ddBm: BSS%s%s, %s.\n",
467 channels[i].hw_value,
468 channels[i].max_power,
469 channels[i].flags & IEEE80211_CHAN_RADAR ?
470 " (IEEE 802.11h required)" : "",
471 ((channels[i].flags & IEEE80211_CHAN_NO_IBSS)
472 || (channels[i].flags &
473 IEEE80211_CHAN_RADAR)) ? "" :
474 ", IBSS",
475 channels[i].flags &
476 IEEE80211_CHAN_PASSIVE_SCAN ?
477 "passive only" : "active/passive");
478 }
479 supp_band = iwl_get_hw_mode(priv, IEEE80211_BAND_5GHZ);
480 if (supp_band) {
481 channels = supp_band->channels;
482
483 pos += scnprintf(buf + pos, bufsz - pos,
484 "Displaying %d channels in 5.2GHz band (802.11a)\n",
485 supp_band->n_channels);
486
487 for (i = 0; i < supp_band->n_channels; i++)
488 pos += scnprintf(buf + pos, bufsz - pos,
489 "%d: %ddBm: BSS%s%s, %s.\n",
490 channels[i].hw_value,
491 channels[i].max_power,
492 channels[i].flags & IEEE80211_CHAN_RADAR ?
493 " (IEEE 802.11h required)" : "",
494 ((channels[i].flags & IEEE80211_CHAN_NO_IBSS)
495 || (channels[i].flags &
496 IEEE80211_CHAN_RADAR)) ? "" :
497 ", IBSS",
498 channels[i].flags &
499 IEEE80211_CHAN_PASSIVE_SCAN ?
500 "passive only" : "active/passive");
501 }
502 ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
503 kfree(buf);
504 return ret;
505}
506
507static ssize_t iwl_legacy_dbgfs_status_read(struct file *file,
508 char __user *user_buf,
509 size_t count, loff_t *ppos) {
510
511 struct iwl_priv *priv = file->private_data;
512 char buf[512];
513 int pos = 0;
514 const size_t bufsz = sizeof(buf);
515
516 pos += scnprintf(buf + pos, bufsz - pos, "STATUS_HCMD_ACTIVE:\t %d\n",
517 test_bit(STATUS_HCMD_ACTIVE, &priv->status));
518 pos += scnprintf(buf + pos, bufsz - pos, "STATUS_INT_ENABLED:\t %d\n",
519 test_bit(STATUS_INT_ENABLED, &priv->status));
520 pos += scnprintf(buf + pos, bufsz - pos, "STATUS_RF_KILL_HW:\t %d\n",
521 test_bit(STATUS_RF_KILL_HW, &priv->status));
522 pos += scnprintf(buf + pos, bufsz - pos, "STATUS_CT_KILL:\t\t %d\n",
523 test_bit(STATUS_CT_KILL, &priv->status));
524 pos += scnprintf(buf + pos, bufsz - pos, "STATUS_INIT:\t\t %d\n",
525 test_bit(STATUS_INIT, &priv->status));
526 pos += scnprintf(buf + pos, bufsz - pos, "STATUS_ALIVE:\t\t %d\n",
527 test_bit(STATUS_ALIVE, &priv->status));
528 pos += scnprintf(buf + pos, bufsz - pos, "STATUS_READY:\t\t %d\n",
529 test_bit(STATUS_READY, &priv->status));
530 pos += scnprintf(buf + pos, bufsz - pos, "STATUS_TEMPERATURE:\t %d\n",
531 test_bit(STATUS_TEMPERATURE, &priv->status));
532 pos += scnprintf(buf + pos, bufsz - pos, "STATUS_GEO_CONFIGURED:\t %d\n",
533 test_bit(STATUS_GEO_CONFIGURED, &priv->status));
534 pos += scnprintf(buf + pos, bufsz - pos, "STATUS_EXIT_PENDING:\t %d\n",
535 test_bit(STATUS_EXIT_PENDING, &priv->status));
536 pos += scnprintf(buf + pos, bufsz - pos, "STATUS_STATISTICS:\t %d\n",
537 test_bit(STATUS_STATISTICS, &priv->status));
538 pos += scnprintf(buf + pos, bufsz - pos, "STATUS_SCANNING:\t %d\n",
539 test_bit(STATUS_SCANNING, &priv->status));
540 pos += scnprintf(buf + pos, bufsz - pos, "STATUS_SCAN_ABORTING:\t %d\n",
541 test_bit(STATUS_SCAN_ABORTING, &priv->status));
542 pos += scnprintf(buf + pos, bufsz - pos, "STATUS_SCAN_HW:\t\t %d\n",
543 test_bit(STATUS_SCAN_HW, &priv->status));
544 pos += scnprintf(buf + pos, bufsz - pos, "STATUS_POWER_PMI:\t %d\n",
545 test_bit(STATUS_POWER_PMI, &priv->status));
546 pos += scnprintf(buf + pos, bufsz - pos, "STATUS_FW_ERROR:\t %d\n",
547 test_bit(STATUS_FW_ERROR, &priv->status));
548 return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
549}
550
551static ssize_t iwl_legacy_dbgfs_interrupt_read(struct file *file,
552 char __user *user_buf,
553 size_t count, loff_t *ppos) {
554
555 struct iwl_priv *priv = file->private_data;
556 int pos = 0;
557 int cnt = 0;
558 char *buf;
559 int bufsz = 24 * 64; /* 24 items * 64 char per item */
560 ssize_t ret;
561
562 buf = kzalloc(bufsz, GFP_KERNEL);
563 if (!buf) {
564 IWL_ERR(priv, "Can not allocate Buffer\n");
565 return -ENOMEM;
566 }
567
568 pos += scnprintf(buf + pos, bufsz - pos,
569 "Interrupt Statistics Report:\n");
570
571 pos += scnprintf(buf + pos, bufsz - pos, "HW Error:\t\t\t %u\n",
572 priv->isr_stats.hw);
573 pos += scnprintf(buf + pos, bufsz - pos, "SW Error:\t\t\t %u\n",
574 priv->isr_stats.sw);
575 if (priv->isr_stats.sw || priv->isr_stats.hw) {
576 pos += scnprintf(buf + pos, bufsz - pos,
577 "\tLast Restarting Code: 0x%X\n",
578 priv->isr_stats.err_code);
579 }
580#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
581 pos += scnprintf(buf + pos, bufsz - pos, "Frame transmitted:\t\t %u\n",
582 priv->isr_stats.sch);
583 pos += scnprintf(buf + pos, bufsz - pos, "Alive interrupt:\t\t %u\n",
584 priv->isr_stats.alive);
585#endif
586 pos += scnprintf(buf + pos, bufsz - pos,
587 "HW RF KILL switch toggled:\t %u\n",
588 priv->isr_stats.rfkill);
589
590 pos += scnprintf(buf + pos, bufsz - pos, "CT KILL:\t\t\t %u\n",
591 priv->isr_stats.ctkill);
592
593 pos += scnprintf(buf + pos, bufsz - pos, "Wakeup Interrupt:\t\t %u\n",
594 priv->isr_stats.wakeup);
595
596 pos += scnprintf(buf + pos, bufsz - pos,
597 "Rx command responses:\t\t %u\n",
598 priv->isr_stats.rx);
599 for (cnt = 0; cnt < REPLY_MAX; cnt++) {
600 if (priv->isr_stats.rx_handlers[cnt] > 0)
601 pos += scnprintf(buf + pos, bufsz - pos,
602 "\tRx handler[%36s]:\t\t %u\n",
603 iwl_legacy_get_cmd_string(cnt),
604 priv->isr_stats.rx_handlers[cnt]);
605 }
606
607 pos += scnprintf(buf + pos, bufsz - pos, "Tx/FH interrupt:\t\t %u\n",
608 priv->isr_stats.tx);
609
610 pos += scnprintf(buf + pos, bufsz - pos, "Unexpected INTA:\t\t %u\n",
611 priv->isr_stats.unhandled);
612
613 ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
614 kfree(buf);
615 return ret;
616}
617
618static ssize_t iwl_legacy_dbgfs_interrupt_write(struct file *file,
619 const char __user *user_buf,
620 size_t count, loff_t *ppos)
621{
622 struct iwl_priv *priv = file->private_data;
623 char buf[8];
624 int buf_size;
625 u32 reset_flag;
626
627 memset(buf, 0, sizeof(buf));
628 buf_size = min(count, sizeof(buf) - 1);
629 if (copy_from_user(buf, user_buf, buf_size))
630 return -EFAULT;
631 if (sscanf(buf, "%x", &reset_flag) != 1)
632 return -EFAULT;
633 if (reset_flag == 0)
634 iwl_legacy_clear_isr_stats(priv);
635
636 return count;
637}
638
639static ssize_t
640iwl_legacy_dbgfs_qos_read(struct file *file, char __user *user_buf,
641 size_t count, loff_t *ppos)
642{
643 struct iwl_priv *priv = file->private_data;
644 struct iwl_rxon_context *ctx;
645 int pos = 0, i;
646 char buf[256 * NUM_IWL_RXON_CTX];
647 const size_t bufsz = sizeof(buf);
648
649 for_each_context(priv, ctx) {
650 pos += scnprintf(buf + pos, bufsz - pos, "context %d:\n",
651 ctx->ctxid);
652 for (i = 0; i < AC_NUM; i++) {
653 pos += scnprintf(buf + pos, bufsz - pos,
654 "\tcw_min\tcw_max\taifsn\ttxop\n");
655 pos += scnprintf(buf + pos, bufsz - pos,
656 "AC[%d]\t%u\t%u\t%u\t%u\n", i,
657 ctx->qos_data.def_qos_parm.ac[i].cw_min,
658 ctx->qos_data.def_qos_parm.ac[i].cw_max,
659 ctx->qos_data.def_qos_parm.ac[i].aifsn,
660 ctx->qos_data.def_qos_parm.ac[i].edca_txop);
661 }
662 pos += scnprintf(buf + pos, bufsz - pos, "\n");
663 }
664 return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
665}
666
667static ssize_t iwl_legacy_dbgfs_disable_ht40_write(struct file *file,
668 const char __user *user_buf,
669 size_t count, loff_t *ppos)
670{
671 struct iwl_priv *priv = file->private_data;
672 char buf[8];
673 int buf_size;
674 int ht40;
675
676 memset(buf, 0, sizeof(buf));
677 buf_size = min(count, sizeof(buf) - 1);
678 if (copy_from_user(buf, user_buf, buf_size))
679 return -EFAULT;
680 if (sscanf(buf, "%d", &ht40) != 1)
681 return -EFAULT;
682 if (!iwl_legacy_is_any_associated(priv))
683 priv->disable_ht40 = ht40 ? true : false;
684 else {
685 IWL_ERR(priv, "Sta associated with AP - "
686 "Change to 40MHz channel support is not allowed\n");
687 return -EINVAL;
688 }
689
690 return count;
691}
692
693static ssize_t iwl_legacy_dbgfs_disable_ht40_read(struct file *file,
694 char __user *user_buf,
695 size_t count, loff_t *ppos)
696{
697 struct iwl_priv *priv = file->private_data;
698 char buf[100];
699 int pos = 0;
700 const size_t bufsz = sizeof(buf);
701
702 pos += scnprintf(buf + pos, bufsz - pos,
703 "11n 40MHz Mode: %s\n",
704 priv->disable_ht40 ? "Disabled" : "Enabled");
705 return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
706}
707
708DEBUGFS_READ_WRITE_FILE_OPS(sram);
709DEBUGFS_READ_WRITE_FILE_OPS(log_event);
710DEBUGFS_READ_FILE_OPS(nvm);
711DEBUGFS_READ_FILE_OPS(stations);
712DEBUGFS_READ_FILE_OPS(channels);
713DEBUGFS_READ_FILE_OPS(status);
714DEBUGFS_READ_WRITE_FILE_OPS(interrupt);
715DEBUGFS_READ_FILE_OPS(qos);
716DEBUGFS_READ_WRITE_FILE_OPS(disable_ht40);
717
718static ssize_t iwl_legacy_dbgfs_traffic_log_read(struct file *file,
719 char __user *user_buf,
720 size_t count, loff_t *ppos)
721{
722 struct iwl_priv *priv = file->private_data;
723 int pos = 0, ofs = 0;
724 int cnt = 0, entry;
725 struct iwl_tx_queue *txq;
726 struct iwl_queue *q;
727 struct iwl_rx_queue *rxq = &priv->rxq;
728 char *buf;
729 int bufsz = ((IWL_TRAFFIC_ENTRIES * IWL_TRAFFIC_ENTRY_SIZE * 64) * 2) +
730 (priv->cfg->base_params->num_of_queues * 32 * 8) + 400;
731 const u8 *ptr;
732 ssize_t ret;
733
734 if (!priv->txq) {
735 IWL_ERR(priv, "txq not ready\n");
736 return -EAGAIN;
737 }
738 buf = kzalloc(bufsz, GFP_KERNEL);
739 if (!buf) {
740 IWL_ERR(priv, "Can not allocate buffer\n");
741 return -ENOMEM;
742 }
743 pos += scnprintf(buf + pos, bufsz - pos, "Tx Queue\n");
744 for (cnt = 0; cnt < priv->hw_params.max_txq_num; cnt++) {
745 txq = &priv->txq[cnt];
746 q = &txq->q;
747 pos += scnprintf(buf + pos, bufsz - pos,
748 "q[%d]: read_ptr: %u, write_ptr: %u\n",
749 cnt, q->read_ptr, q->write_ptr);
750 }
751 if (priv->tx_traffic && (iwl_debug_level & IWL_DL_TX)) {
752 ptr = priv->tx_traffic;
753 pos += scnprintf(buf + pos, bufsz - pos,
754 "Tx Traffic idx: %u\n", priv->tx_traffic_idx);
755 for (cnt = 0, ofs = 0; cnt < IWL_TRAFFIC_ENTRIES; cnt++) {
756 for (entry = 0; entry < IWL_TRAFFIC_ENTRY_SIZE / 16;
757 entry++, ofs += 16) {
758 pos += scnprintf(buf + pos, bufsz - pos,
759 "0x%.4x ", ofs);
760 hex_dump_to_buffer(ptr + ofs, 16, 16, 2,
761 buf + pos, bufsz - pos, 0);
762 pos += strlen(buf + pos);
763 if (bufsz - pos > 0)
764 buf[pos++] = '\n';
765 }
766 }
767 }
768
769 pos += scnprintf(buf + pos, bufsz - pos, "Rx Queue\n");
770 pos += scnprintf(buf + pos, bufsz - pos,
771 "read: %u, write: %u\n",
772 rxq->read, rxq->write);
773
774 if (priv->rx_traffic && (iwl_debug_level & IWL_DL_RX)) {
775 ptr = priv->rx_traffic;
776 pos += scnprintf(buf + pos, bufsz - pos,
777 "Rx Traffic idx: %u\n", priv->rx_traffic_idx);
778 for (cnt = 0, ofs = 0; cnt < IWL_TRAFFIC_ENTRIES; cnt++) {
779 for (entry = 0; entry < IWL_TRAFFIC_ENTRY_SIZE / 16;
780 entry++, ofs += 16) {
781 pos += scnprintf(buf + pos, bufsz - pos,
782 "0x%.4x ", ofs);
783 hex_dump_to_buffer(ptr + ofs, 16, 16, 2,
784 buf + pos, bufsz - pos, 0);
785 pos += strlen(buf + pos);
786 if (bufsz - pos > 0)
787 buf[pos++] = '\n';
788 }
789 }
790 }
791
792 ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
793 kfree(buf);
794 return ret;
795}
796
797static ssize_t iwl_legacy_dbgfs_traffic_log_write(struct file *file,
798 const char __user *user_buf,
799 size_t count, loff_t *ppos)
800{
801 struct iwl_priv *priv = file->private_data;
802 char buf[8];
803 int buf_size;
804 int traffic_log;
805
806 memset(buf, 0, sizeof(buf));
807 buf_size = min(count, sizeof(buf) - 1);
808 if (copy_from_user(buf, user_buf, buf_size))
809 return -EFAULT;
810 if (sscanf(buf, "%d", &traffic_log) != 1)
811 return -EFAULT;
812 if (traffic_log == 0)
813 iwl_legacy_reset_traffic_log(priv);
814
815 return count;
816}
817
818static ssize_t iwl_legacy_dbgfs_tx_queue_read(struct file *file,
819 char __user *user_buf,
820 size_t count, loff_t *ppos) {
821
822 struct iwl_priv *priv = file->private_data;
823 struct iwl_tx_queue *txq;
824 struct iwl_queue *q;
825 char *buf;
826 int pos = 0;
827 int cnt;
828 int ret;
829 const size_t bufsz = sizeof(char) * 64 *
830 priv->cfg->base_params->num_of_queues;
831
832 if (!priv->txq) {
833 IWL_ERR(priv, "txq not ready\n");
834 return -EAGAIN;
835 }
836 buf = kzalloc(bufsz, GFP_KERNEL);
837 if (!buf)
838 return -ENOMEM;
839
840 for (cnt = 0; cnt < priv->hw_params.max_txq_num; cnt++) {
841 txq = &priv->txq[cnt];
842 q = &txq->q;
843 pos += scnprintf(buf + pos, bufsz - pos,
844 "hwq %.2d: read=%u write=%u stop=%d"
845 " swq_id=%#.2x (ac %d/hwq %d)\n",
846 cnt, q->read_ptr, q->write_ptr,
847 !!test_bit(cnt, priv->queue_stopped),
848 txq->swq_id, txq->swq_id & 3,
849 (txq->swq_id >> 2) & 0x1f);
850 if (cnt >= 4)
851 continue;
852 /* for the ACs, display the stop count too */
853 pos += scnprintf(buf + pos, bufsz - pos,
854 " stop-count: %d\n",
855 atomic_read(&priv->queue_stop_count[cnt]));
856 }
857 ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
858 kfree(buf);
859 return ret;
860}
861
862static ssize_t iwl_legacy_dbgfs_rx_queue_read(struct file *file,
863 char __user *user_buf,
864 size_t count, loff_t *ppos) {
865
866 struct iwl_priv *priv = file->private_data;
867 struct iwl_rx_queue *rxq = &priv->rxq;
868 char buf[256];
869 int pos = 0;
870 const size_t bufsz = sizeof(buf);
871
872 pos += scnprintf(buf + pos, bufsz - pos, "read: %u\n",
873 rxq->read);
874 pos += scnprintf(buf + pos, bufsz - pos, "write: %u\n",
875 rxq->write);
876 pos += scnprintf(buf + pos, bufsz - pos, "free_count: %u\n",
877 rxq->free_count);
878 if (rxq->rb_stts) {
879 pos += scnprintf(buf + pos, bufsz - pos, "closed_rb_num: %u\n",
880 le16_to_cpu(rxq->rb_stts->closed_rb_num) & 0x0FFF);
881 } else {
882 pos += scnprintf(buf + pos, bufsz - pos,
883 "closed_rb_num: Not Allocated\n");
884 }
885 return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
886}
887
888static ssize_t iwl_legacy_dbgfs_ucode_rx_stats_read(struct file *file,
889 char __user *user_buf,
890 size_t count, loff_t *ppos)
891{
892 struct iwl_priv *priv = file->private_data;
893 return priv->cfg->ops->lib->debugfs_ops.rx_stats_read(file,
894 user_buf, count, ppos);
895}
896
897static ssize_t iwl_legacy_dbgfs_ucode_tx_stats_read(struct file *file,
898 char __user *user_buf,
899 size_t count, loff_t *ppos)
900{
901 struct iwl_priv *priv = file->private_data;
902 return priv->cfg->ops->lib->debugfs_ops.tx_stats_read(file,
903 user_buf, count, ppos);
904}
905
906static ssize_t iwl_legacy_dbgfs_ucode_general_stats_read(struct file *file,
907 char __user *user_buf,
908 size_t count, loff_t *ppos)
909{
910 struct iwl_priv *priv = file->private_data;
911 return priv->cfg->ops->lib->debugfs_ops.general_stats_read(file,
912 user_buf, count, ppos);
913}
914
915static ssize_t iwl_legacy_dbgfs_sensitivity_read(struct file *file,
916 char __user *user_buf,
917 size_t count, loff_t *ppos) {
918
919 struct iwl_priv *priv = file->private_data;
920 int pos = 0;
921 int cnt = 0;
922 char *buf;
923 int bufsz = sizeof(struct iwl_sensitivity_data) * 4 + 100;
924 ssize_t ret;
925 struct iwl_sensitivity_data *data;
926
927 data = &priv->sensitivity_data;
928 buf = kzalloc(bufsz, GFP_KERNEL);
929 if (!buf) {
930 IWL_ERR(priv, "Can not allocate Buffer\n");
931 return -ENOMEM;
932 }
933
934 pos += scnprintf(buf + pos, bufsz - pos, "auto_corr_ofdm:\t\t\t %u\n",
935 data->auto_corr_ofdm);
936 pos += scnprintf(buf + pos, bufsz - pos,
937 "auto_corr_ofdm_mrc:\t\t %u\n",
938 data->auto_corr_ofdm_mrc);
939 pos += scnprintf(buf + pos, bufsz - pos, "auto_corr_ofdm_x1:\t\t %u\n",
940 data->auto_corr_ofdm_x1);
941 pos += scnprintf(buf + pos, bufsz - pos,
942 "auto_corr_ofdm_mrc_x1:\t\t %u\n",
943 data->auto_corr_ofdm_mrc_x1);
944 pos += scnprintf(buf + pos, bufsz - pos, "auto_corr_cck:\t\t\t %u\n",
945 data->auto_corr_cck);
946 pos += scnprintf(buf + pos, bufsz - pos, "auto_corr_cck_mrc:\t\t %u\n",
947 data->auto_corr_cck_mrc);
948 pos += scnprintf(buf + pos, bufsz - pos,
949 "last_bad_plcp_cnt_ofdm:\t\t %u\n",
950 data->last_bad_plcp_cnt_ofdm);
951 pos += scnprintf(buf + pos, bufsz - pos, "last_fa_cnt_ofdm:\t\t %u\n",
952 data->last_fa_cnt_ofdm);
953 pos += scnprintf(buf + pos, bufsz - pos,
954 "last_bad_plcp_cnt_cck:\t\t %u\n",
955 data->last_bad_plcp_cnt_cck);
956 pos += scnprintf(buf + pos, bufsz - pos, "last_fa_cnt_cck:\t\t %u\n",
957 data->last_fa_cnt_cck);
958 pos += scnprintf(buf + pos, bufsz - pos, "nrg_curr_state:\t\t\t %u\n",
959 data->nrg_curr_state);
960 pos += scnprintf(buf + pos, bufsz - pos, "nrg_prev_state:\t\t\t %u\n",
961 data->nrg_prev_state);
962 pos += scnprintf(buf + pos, bufsz - pos, "nrg_value:\t\t\t");
963 for (cnt = 0; cnt < 10; cnt++) {
964 pos += scnprintf(buf + pos, bufsz - pos, " %u",
965 data->nrg_value[cnt]);
966 }
967 pos += scnprintf(buf + pos, bufsz - pos, "\n");
968 pos += scnprintf(buf + pos, bufsz - pos, "nrg_silence_rssi:\t\t");
969 for (cnt = 0; cnt < NRG_NUM_PREV_STAT_L; cnt++) {
970 pos += scnprintf(buf + pos, bufsz - pos, " %u",
971 data->nrg_silence_rssi[cnt]);
972 }
973 pos += scnprintf(buf + pos, bufsz - pos, "\n");
974 pos += scnprintf(buf + pos, bufsz - pos, "nrg_silence_ref:\t\t %u\n",
975 data->nrg_silence_ref);
976 pos += scnprintf(buf + pos, bufsz - pos, "nrg_energy_idx:\t\t\t %u\n",
977 data->nrg_energy_idx);
978 pos += scnprintf(buf + pos, bufsz - pos, "nrg_silence_idx:\t\t %u\n",
979 data->nrg_silence_idx);
980 pos += scnprintf(buf + pos, bufsz - pos, "nrg_th_cck:\t\t\t %u\n",
981 data->nrg_th_cck);
982 pos += scnprintf(buf + pos, bufsz - pos,
983 "nrg_auto_corr_silence_diff:\t %u\n",
984 data->nrg_auto_corr_silence_diff);
985 pos += scnprintf(buf + pos, bufsz - pos, "num_in_cck_no_fa:\t\t %u\n",
986 data->num_in_cck_no_fa);
987 pos += scnprintf(buf + pos, bufsz - pos, "nrg_th_ofdm:\t\t\t %u\n",
988 data->nrg_th_ofdm);
989
990 ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
991 kfree(buf);
992 return ret;
993}
994
995
996static ssize_t iwl_legacy_dbgfs_chain_noise_read(struct file *file,
997 char __user *user_buf,
998 size_t count, loff_t *ppos) {
999
1000 struct iwl_priv *priv = file->private_data;
1001 int pos = 0;
1002 int cnt = 0;
1003 char *buf;
1004 int bufsz = sizeof(struct iwl_chain_noise_data) * 4 + 100;
1005 ssize_t ret;
1006 struct iwl_chain_noise_data *data;
1007
1008 data = &priv->chain_noise_data;
1009 buf = kzalloc(bufsz, GFP_KERNEL);
1010 if (!buf) {
1011 IWL_ERR(priv, "Can not allocate Buffer\n");
1012 return -ENOMEM;
1013 }
1014
1015 pos += scnprintf(buf + pos, bufsz - pos, "active_chains:\t\t\t %u\n",
1016 data->active_chains);
1017 pos += scnprintf(buf + pos, bufsz - pos, "chain_noise_a:\t\t\t %u\n",
1018 data->chain_noise_a);
1019 pos += scnprintf(buf + pos, bufsz - pos, "chain_noise_b:\t\t\t %u\n",
1020 data->chain_noise_b);
1021 pos += scnprintf(buf + pos, bufsz - pos, "chain_noise_c:\t\t\t %u\n",
1022 data->chain_noise_c);
1023 pos += scnprintf(buf + pos, bufsz - pos, "chain_signal_a:\t\t\t %u\n",
1024 data->chain_signal_a);
1025 pos += scnprintf(buf + pos, bufsz - pos, "chain_signal_b:\t\t\t %u\n",
1026 data->chain_signal_b);
1027 pos += scnprintf(buf + pos, bufsz - pos, "chain_signal_c:\t\t\t %u\n",
1028 data->chain_signal_c);
1029 pos += scnprintf(buf + pos, bufsz - pos, "beacon_count:\t\t\t %u\n",
1030 data->beacon_count);
1031
1032 pos += scnprintf(buf + pos, bufsz - pos, "disconn_array:\t\t\t");
1033 for (cnt = 0; cnt < NUM_RX_CHAINS; cnt++) {
1034 pos += scnprintf(buf + pos, bufsz - pos, " %u",
1035 data->disconn_array[cnt]);
1036 }
1037 pos += scnprintf(buf + pos, bufsz - pos, "\n");
1038 pos += scnprintf(buf + pos, bufsz - pos, "delta_gain_code:\t\t");
1039 for (cnt = 0; cnt < NUM_RX_CHAINS; cnt++) {
1040 pos += scnprintf(buf + pos, bufsz - pos, " %u",
1041 data->delta_gain_code[cnt]);
1042 }
1043 pos += scnprintf(buf + pos, bufsz - pos, "\n");
1044 pos += scnprintf(buf + pos, bufsz - pos, "radio_write:\t\t\t %u\n",
1045 data->radio_write);
1046 pos += scnprintf(buf + pos, bufsz - pos, "state:\t\t\t\t %u\n",
1047 data->state);
1048
1049 ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
1050 kfree(buf);
1051 return ret;
1052}
1053
1054static ssize_t iwl_legacy_dbgfs_power_save_status_read(struct file *file,
1055 char __user *user_buf,
1056 size_t count, loff_t *ppos)
1057{
1058 struct iwl_priv *priv = file->private_data;
1059 char buf[60];
1060 int pos = 0;
1061 const size_t bufsz = sizeof(buf);
1062 u32 pwrsave_status;
1063
1064 pwrsave_status = iwl_read32(priv, CSR_GP_CNTRL) &
1065 CSR_GP_REG_POWER_SAVE_STATUS_MSK;
1066
1067 pos += scnprintf(buf + pos, bufsz - pos, "Power Save Status: ");
1068 pos += scnprintf(buf + pos, bufsz - pos, "%s\n",
1069 (pwrsave_status == CSR_GP_REG_NO_POWER_SAVE) ? "none" :
1070 (pwrsave_status == CSR_GP_REG_MAC_POWER_SAVE) ? "MAC" :
1071 (pwrsave_status == CSR_GP_REG_PHY_POWER_SAVE) ? "PHY" :
1072 "error");
1073
1074 return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
1075}
1076
1077static ssize_t iwl_legacy_dbgfs_clear_ucode_statistics_write(struct file *file,
1078 const char __user *user_buf,
1079 size_t count, loff_t *ppos)
1080{
1081 struct iwl_priv *priv = file->private_data;
1082 char buf[8];
1083 int buf_size;
1084 int clear;
1085
1086 memset(buf, 0, sizeof(buf));
1087 buf_size = min(count, sizeof(buf) - 1);
1088 if (copy_from_user(buf, user_buf, buf_size))
1089 return -EFAULT;
1090 if (sscanf(buf, "%d", &clear) != 1)
1091 return -EFAULT;
1092
1093 /* make request to uCode to retrieve statistics information */
1094 mutex_lock(&priv->mutex);
1095 iwl_legacy_send_statistics_request(priv, CMD_SYNC, true);
1096 mutex_unlock(&priv->mutex);
1097
1098 return count;
1099}
1100
1101static ssize_t iwl_legacy_dbgfs_ucode_tracing_read(struct file *file,
1102 char __user *user_buf,
1103 size_t count, loff_t *ppos) {
1104
1105 struct iwl_priv *priv = file->private_data;
1106 int pos = 0;
1107 char buf[128];
1108 const size_t bufsz = sizeof(buf);
1109
1110 pos += scnprintf(buf + pos, bufsz - pos, "ucode trace timer is %s\n",
1111 priv->event_log.ucode_trace ? "On" : "Off");
1112 pos += scnprintf(buf + pos, bufsz - pos, "non_wraps_count:\t\t %u\n",
1113 priv->event_log.non_wraps_count);
1114 pos += scnprintf(buf + pos, bufsz - pos, "wraps_once_count:\t\t %u\n",
1115 priv->event_log.wraps_once_count);
1116 pos += scnprintf(buf + pos, bufsz - pos, "wraps_more_count:\t\t %u\n",
1117 priv->event_log.wraps_more_count);
1118
1119 return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
1120}
1121
1122static ssize_t iwl_legacy_dbgfs_ucode_tracing_write(struct file *file,
1123 const char __user *user_buf,
1124 size_t count, loff_t *ppos)
1125{
1126 struct iwl_priv *priv = file->private_data;
1127 char buf[8];
1128 int buf_size;
1129 int trace;
1130
1131 memset(buf, 0, sizeof(buf));
1132 buf_size = min(count, sizeof(buf) - 1);
1133 if (copy_from_user(buf, user_buf, buf_size))
1134 return -EFAULT;
1135 if (sscanf(buf, "%d", &trace) != 1)
1136 return -EFAULT;
1137
1138 if (trace) {
1139 priv->event_log.ucode_trace = true;
1140 /* schedule the ucode timer to occur in UCODE_TRACE_PERIOD */
1141 mod_timer(&priv->ucode_trace,
1142 jiffies + msecs_to_jiffies(UCODE_TRACE_PERIOD));
1143 } else {
1144 priv->event_log.ucode_trace = false;
1145 del_timer_sync(&priv->ucode_trace);
1146 }
1147
1148 return count;
1149}
1150
1151static ssize_t iwl_legacy_dbgfs_rxon_flags_read(struct file *file,
1152 char __user *user_buf,
1153 size_t count, loff_t *ppos) {
1154
1155 struct iwl_priv *priv = file->private_data;
1156 int len = 0;
1157 char buf[20];
1158
1159 len = sprintf(buf, "0x%04X\n",
1160 le32_to_cpu(priv->contexts[IWL_RXON_CTX_BSS].active.flags));
1161 return simple_read_from_buffer(user_buf, count, ppos, buf, len);
1162}
1163
1164static ssize_t iwl_legacy_dbgfs_rxon_filter_flags_read(struct file *file,
1165 char __user *user_buf,
1166 size_t count, loff_t *ppos) {
1167
1168 struct iwl_priv *priv = file->private_data;
1169 int len = 0;
1170 char buf[20];
1171
1172 len = sprintf(buf, "0x%04X\n",
1173 le32_to_cpu(priv->contexts[IWL_RXON_CTX_BSS].active.filter_flags));
1174 return simple_read_from_buffer(user_buf, count, ppos, buf, len);
1175}
1176
1177static ssize_t iwl_legacy_dbgfs_fh_reg_read(struct file *file,
1178 char __user *user_buf,
1179 size_t count, loff_t *ppos)
1180{
1181 struct iwl_priv *priv = file->private_data;
1182 char *buf;
1183 int pos = 0;
1184 ssize_t ret = -EFAULT;
1185
1186 if (priv->cfg->ops->lib->dump_fh) {
1187 ret = pos = priv->cfg->ops->lib->dump_fh(priv, &buf, true);
1188 if (buf) {
1189 ret = simple_read_from_buffer(user_buf,
1190 count, ppos, buf, pos);
1191 kfree(buf);
1192 }
1193 }
1194
1195 return ret;
1196}
1197
1198static ssize_t iwl_legacy_dbgfs_missed_beacon_read(struct file *file,
1199 char __user *user_buf,
1200 size_t count, loff_t *ppos) {
1201
1202 struct iwl_priv *priv = file->private_data;
1203 int pos = 0;
1204 char buf[12];
1205 const size_t bufsz = sizeof(buf);
1206
1207 pos += scnprintf(buf + pos, bufsz - pos, "%d\n",
1208 priv->missed_beacon_threshold);
1209
1210 return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
1211}
1212
1213static ssize_t iwl_legacy_dbgfs_missed_beacon_write(struct file *file,
1214 const char __user *user_buf,
1215 size_t count, loff_t *ppos)
1216{
1217 struct iwl_priv *priv = file->private_data;
1218 char buf[8];
1219 int buf_size;
1220 int missed;
1221
1222 memset(buf, 0, sizeof(buf));
1223 buf_size = min(count, sizeof(buf) - 1);
1224 if (copy_from_user(buf, user_buf, buf_size))
1225 return -EFAULT;
1226 if (sscanf(buf, "%d", &missed) != 1)
1227 return -EINVAL;
1228
1229 if (missed < IWL_MISSED_BEACON_THRESHOLD_MIN ||
1230 missed > IWL_MISSED_BEACON_THRESHOLD_MAX)
1231 priv->missed_beacon_threshold =
1232 IWL_MISSED_BEACON_THRESHOLD_DEF;
1233 else
1234 priv->missed_beacon_threshold = missed;
1235
1236 return count;
1237}
1238
1239static ssize_t iwl_legacy_dbgfs_plcp_delta_read(struct file *file,
1240 char __user *user_buf,
1241 size_t count, loff_t *ppos) {
1242
1243 struct iwl_priv *priv = file->private_data;
1244 int pos = 0;
1245 char buf[12];
1246 const size_t bufsz = sizeof(buf);
1247
1248 pos += scnprintf(buf + pos, bufsz - pos, "%u\n",
1249 priv->cfg->base_params->plcp_delta_threshold);
1250
1251 return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
1252}
1253
1254static ssize_t iwl_legacy_dbgfs_plcp_delta_write(struct file *file,
1255 const char __user *user_buf,
1256 size_t count, loff_t *ppos) {
1257
1258 struct iwl_priv *priv = file->private_data;
1259 char buf[8];
1260 int buf_size;
1261 int plcp;
1262
1263 memset(buf, 0, sizeof(buf));
1264 buf_size = min(count, sizeof(buf) - 1);
1265 if (copy_from_user(buf, user_buf, buf_size))
1266 return -EFAULT;
1267 if (sscanf(buf, "%d", &plcp) != 1)
1268 return -EINVAL;
1269 if ((plcp < IWL_MAX_PLCP_ERR_THRESHOLD_MIN) ||
1270 (plcp > IWL_MAX_PLCP_ERR_THRESHOLD_MAX))
1271 priv->cfg->base_params->plcp_delta_threshold =
1272 IWL_MAX_PLCP_ERR_THRESHOLD_DISABLE;
1273 else
1274 priv->cfg->base_params->plcp_delta_threshold = plcp;
1275 return count;
1276}
1277
1278static ssize_t iwl_legacy_dbgfs_force_reset_read(struct file *file,
1279 char __user *user_buf,
1280 size_t count, loff_t *ppos) {
1281
1282 struct iwl_priv *priv = file->private_data;
1283 int i, pos = 0;
1284 char buf[300];
1285 const size_t bufsz = sizeof(buf);
1286 struct iwl_force_reset *force_reset;
1287
1288 for (i = 0; i < IWL_MAX_FORCE_RESET; i++) {
1289 force_reset = &priv->force_reset[i];
1290 pos += scnprintf(buf + pos, bufsz - pos,
1291 "Force reset method %d\n", i);
1292 pos += scnprintf(buf + pos, bufsz - pos,
1293 "\tnumber of reset request: %d\n",
1294 force_reset->reset_request_count);
1295 pos += scnprintf(buf + pos, bufsz - pos,
1296 "\tnumber of reset request success: %d\n",
1297 force_reset->reset_success_count);
1298 pos += scnprintf(buf + pos, bufsz - pos,
1299 "\tnumber of reset request reject: %d\n",
1300 force_reset->reset_reject_count);
1301 pos += scnprintf(buf + pos, bufsz - pos,
1302 "\treset duration: %lu\n",
1303 force_reset->reset_duration);
1304 }
1305 return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
1306}
1307
1308static ssize_t iwl_legacy_dbgfs_force_reset_write(struct file *file,
1309 const char __user *user_buf,
1310 size_t count, loff_t *ppos) {
1311
1312 struct iwl_priv *priv = file->private_data;
1313 char buf[8];
1314 int buf_size;
1315 int reset, ret;
1316
1317 memset(buf, 0, sizeof(buf));
1318 buf_size = min(count, sizeof(buf) - 1);
1319 if (copy_from_user(buf, user_buf, buf_size))
1320 return -EFAULT;
1321 if (sscanf(buf, "%d", &reset) != 1)
1322 return -EINVAL;
1323 switch (reset) {
1324 case IWL_RF_RESET:
1325 case IWL_FW_RESET:
1326 ret = iwl_legacy_force_reset(priv, reset, true);
1327 break;
1328 default:
1329 return -EINVAL;
1330 }
1331 return ret ? ret : count;
1332}
1333
1334static ssize_t iwl_legacy_dbgfs_wd_timeout_write(struct file *file,
1335 const char __user *user_buf,
1336 size_t count, loff_t *ppos) {
1337
1338 struct iwl_priv *priv = file->private_data;
1339 char buf[8];
1340 int buf_size;
1341 int timeout;
1342
1343 memset(buf, 0, sizeof(buf));
1344 buf_size = min(count, sizeof(buf) - 1);
1345 if (copy_from_user(buf, user_buf, buf_size))
1346 return -EFAULT;
1347 if (sscanf(buf, "%d", &timeout) != 1)
1348 return -EINVAL;
1349 if (timeout < 0 || timeout > IWL_MAX_WD_TIMEOUT)
1350 timeout = IWL_DEF_WD_TIMEOUT;
1351
1352 priv->cfg->base_params->wd_timeout = timeout;
1353 iwl_legacy_setup_watchdog(priv);
1354 return count;
1355}
1356
1357DEBUGFS_READ_FILE_OPS(rx_statistics);
1358DEBUGFS_READ_FILE_OPS(tx_statistics);
1359DEBUGFS_READ_WRITE_FILE_OPS(traffic_log);
1360DEBUGFS_READ_FILE_OPS(rx_queue);
1361DEBUGFS_READ_FILE_OPS(tx_queue);
1362DEBUGFS_READ_FILE_OPS(ucode_rx_stats);
1363DEBUGFS_READ_FILE_OPS(ucode_tx_stats);
1364DEBUGFS_READ_FILE_OPS(ucode_general_stats);
1365DEBUGFS_READ_FILE_OPS(sensitivity);
1366DEBUGFS_READ_FILE_OPS(chain_noise);
1367DEBUGFS_READ_FILE_OPS(power_save_status);
1368DEBUGFS_WRITE_FILE_OPS(clear_ucode_statistics);
1369DEBUGFS_WRITE_FILE_OPS(clear_traffic_statistics);
1370DEBUGFS_READ_WRITE_FILE_OPS(ucode_tracing);
1371DEBUGFS_READ_FILE_OPS(fh_reg);
1372DEBUGFS_READ_WRITE_FILE_OPS(missed_beacon);
1373DEBUGFS_READ_WRITE_FILE_OPS(plcp_delta);
1374DEBUGFS_READ_WRITE_FILE_OPS(force_reset);
1375DEBUGFS_READ_FILE_OPS(rxon_flags);
1376DEBUGFS_READ_FILE_OPS(rxon_filter_flags);
1377DEBUGFS_WRITE_FILE_OPS(wd_timeout);
1378
1379/*
1380 * Create the debugfs files and directories
1381 *
1382 */
1383int iwl_legacy_dbgfs_register(struct iwl_priv *priv, const char *name)
1384{
1385 struct dentry *phyd = priv->hw->wiphy->debugfsdir;
1386 struct dentry *dir_drv, *dir_data, *dir_rf, *dir_debug;
1387
1388 dir_drv = debugfs_create_dir(name, phyd);
1389 if (!dir_drv)
1390 return -ENOMEM;
1391
1392 priv->debugfs_dir = dir_drv;
1393
1394 dir_data = debugfs_create_dir("data", dir_drv);
1395 if (!dir_data)
1396 goto err;
1397 dir_rf = debugfs_create_dir("rf", dir_drv);
1398 if (!dir_rf)
1399 goto err;
1400 dir_debug = debugfs_create_dir("debug", dir_drv);
1401 if (!dir_debug)
1402 goto err;
1403
1404 DEBUGFS_ADD_FILE(nvm, dir_data, S_IRUSR);
1405 DEBUGFS_ADD_FILE(sram, dir_data, S_IWUSR | S_IRUSR);
1406 DEBUGFS_ADD_FILE(log_event, dir_data, S_IWUSR | S_IRUSR);
1407 DEBUGFS_ADD_FILE(stations, dir_data, S_IRUSR);
1408 DEBUGFS_ADD_FILE(channels, dir_data, S_IRUSR);
1409 DEBUGFS_ADD_FILE(status, dir_data, S_IRUSR);
1410 DEBUGFS_ADD_FILE(interrupt, dir_data, S_IWUSR | S_IRUSR);
1411 DEBUGFS_ADD_FILE(qos, dir_data, S_IRUSR);
1412 DEBUGFS_ADD_FILE(disable_ht40, dir_data, S_IWUSR | S_IRUSR);
1413 DEBUGFS_ADD_FILE(rx_statistics, dir_debug, S_IRUSR);
1414 DEBUGFS_ADD_FILE(tx_statistics, dir_debug, S_IRUSR);
1415 DEBUGFS_ADD_FILE(traffic_log, dir_debug, S_IWUSR | S_IRUSR);
1416 DEBUGFS_ADD_FILE(rx_queue, dir_debug, S_IRUSR);
1417 DEBUGFS_ADD_FILE(tx_queue, dir_debug, S_IRUSR);
1418 DEBUGFS_ADD_FILE(power_save_status, dir_debug, S_IRUSR);
1419 DEBUGFS_ADD_FILE(clear_ucode_statistics, dir_debug, S_IWUSR);
1420 DEBUGFS_ADD_FILE(clear_traffic_statistics, dir_debug, S_IWUSR);
1421 DEBUGFS_ADD_FILE(fh_reg, dir_debug, S_IRUSR);
1422 DEBUGFS_ADD_FILE(missed_beacon, dir_debug, S_IWUSR);
1423 DEBUGFS_ADD_FILE(plcp_delta, dir_debug, S_IWUSR | S_IRUSR);
1424 DEBUGFS_ADD_FILE(force_reset, dir_debug, S_IWUSR | S_IRUSR);
1425 DEBUGFS_ADD_FILE(ucode_rx_stats, dir_debug, S_IRUSR);
1426 DEBUGFS_ADD_FILE(ucode_tx_stats, dir_debug, S_IRUSR);
1427 DEBUGFS_ADD_FILE(ucode_general_stats, dir_debug, S_IRUSR);
1428
1429 if (priv->cfg->base_params->sensitivity_calib_by_driver)
1430 DEBUGFS_ADD_FILE(sensitivity, dir_debug, S_IRUSR);
1431 if (priv->cfg->base_params->chain_noise_calib_by_driver)
1432 DEBUGFS_ADD_FILE(chain_noise, dir_debug, S_IRUSR);
1433 if (priv->cfg->base_params->ucode_tracing)
1434 DEBUGFS_ADD_FILE(ucode_tracing, dir_debug, S_IWUSR | S_IRUSR);
1435 DEBUGFS_ADD_FILE(rxon_flags, dir_debug, S_IWUSR);
1436 DEBUGFS_ADD_FILE(rxon_filter_flags, dir_debug, S_IWUSR);
1437 DEBUGFS_ADD_FILE(wd_timeout, dir_debug, S_IWUSR);
1438 if (priv->cfg->base_params->sensitivity_calib_by_driver)
1439 DEBUGFS_ADD_BOOL(disable_sensitivity, dir_rf,
1440 &priv->disable_sens_cal);
1441 if (priv->cfg->base_params->chain_noise_calib_by_driver)
1442 DEBUGFS_ADD_BOOL(disable_chain_noise, dir_rf,
1443 &priv->disable_chain_noise_cal);
1444 DEBUGFS_ADD_BOOL(disable_tx_power, dir_rf,
1445 &priv->disable_tx_power_cal);
1446 return 0;
1447
1448err:
1449 IWL_ERR(priv, "Can't create the debugfs directory\n");
1450 iwl_legacy_dbgfs_unregister(priv);
1451 return -ENOMEM;
1452}
1453EXPORT_SYMBOL(iwl_legacy_dbgfs_register);
1454
1455/**
1456 * Remove the debugfs files and directories
1457 *
1458 */
1459void iwl_legacy_dbgfs_unregister(struct iwl_priv *priv)
1460{
1461 if (!priv->debugfs_dir)
1462 return;
1463
1464 debugfs_remove_recursive(priv->debugfs_dir);
1465 priv->debugfs_dir = NULL;
1466}
1467EXPORT_SYMBOL(iwl_legacy_dbgfs_unregister);
diff --git a/drivers/net/wireless/iwlegacy/iwl-dev.h b/drivers/net/wireless/iwlegacy/iwl-dev.h
new file mode 100644
index 000000000000..25718cf9919a
--- /dev/null
+++ b/drivers/net/wireless/iwlegacy/iwl-dev.h
@@ -0,0 +1,1426 @@
1/******************************************************************************
2 *
3 * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of version 2 of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
17 *
18 * The full GNU General Public License is included in this distribution in the
19 * file called LICENSE.
20 *
21 * Contact Information:
22 * Intel Linux Wireless <ilw@linux.intel.com>
23 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
24 *
25 *****************************************************************************/
26/*
27 * Please use this file (iwl-dev.h) for driver implementation definitions.
28 * Please use iwl-commands.h for uCode API definitions.
29 * Please use iwl-4965-hw.h for hardware-related definitions.
30 */
31
32#ifndef __iwl_legacy_dev_h__
33#define __iwl_legacy_dev_h__
34
35#include <linux/pci.h> /* for struct pci_device_id */
36#include <linux/kernel.h>
37#include <linux/leds.h>
38#include <linux/wait.h>
39#include <net/ieee80211_radiotap.h>
40
41#include "iwl-eeprom.h"
42#include "iwl-csr.h"
43#include "iwl-prph.h"
44#include "iwl-fh.h"
45#include "iwl-debug.h"
46#include "iwl-4965-hw.h"
47#include "iwl-3945-hw.h"
48#include "iwl-led.h"
49#include "iwl-power.h"
50#include "iwl-legacy-rs.h"
51
52struct iwl_tx_queue;
53
54/* CT-KILL constants */
55#define CT_KILL_THRESHOLD_LEGACY 110 /* in Celsius */
56
57/* Default noise level to report when noise measurement is not available.
58 * This may be because we're:
59 * 1) Not associated (4965, no beacon statistics being sent to driver)
60 * 2) Scanning (noise measurement does not apply to associated channel)
61 * 3) Receiving CCK (3945 delivers noise info only for OFDM frames)
62 * Use default noise value of -127 ... this is below the range of measurable
63 * Rx dBm for either 3945 or 4965, so it can indicate "unmeasurable" to user.
64 * Also, -127 works better than 0 when averaging frames with/without
65 * noise info (e.g. averaging might be done in app); measured dBm values are
66 * always negative ... using a negative value as the default keeps all
67 * averages within an s8's (used in some apps) range of negative values. */
68#define IWL_NOISE_MEAS_NOT_AVAILABLE (-127)
69
70/*
71 * RTS threshold here is total size [2347] minus 4 FCS bytes
72 * Per spec:
73 * a value of 0 means RTS on all data/management packets
74 * a value > max MSDU size means no RTS
75 * else RTS for data/management frames where MPDU is larger
76 * than RTS value.
77 */
78#define DEFAULT_RTS_THRESHOLD 2347U
79#define MIN_RTS_THRESHOLD 0U
80#define MAX_RTS_THRESHOLD 2347U
81#define MAX_MSDU_SIZE 2304U
82#define MAX_MPDU_SIZE 2346U
83#define DEFAULT_BEACON_INTERVAL 100U
84#define DEFAULT_SHORT_RETRY_LIMIT 7U
85#define DEFAULT_LONG_RETRY_LIMIT 4U
86
87struct iwl_rx_mem_buffer {
88 dma_addr_t page_dma;
89 struct page *page;
90 struct list_head list;
91};
92
93#define rxb_addr(r) page_address(r->page)
94
95/* defined below */
96struct iwl_device_cmd;
97
98struct iwl_cmd_meta {
99 /* only for SYNC commands, iff the reply skb is wanted */
100 struct iwl_host_cmd *source;
101 /*
102 * only for ASYNC commands
103 * (which is somewhat stupid -- look at iwl-sta.c for instance
104 * which duplicates a bunch of code because the callback isn't
105 * invoked for SYNC commands, if it were and its result passed
106 * through it would be simpler...)
107 */
108 void (*callback)(struct iwl_priv *priv,
109 struct iwl_device_cmd *cmd,
110 struct iwl_rx_packet *pkt);
111
112 /* The CMD_SIZE_HUGE flag bit indicates that the command
113 * structure is stored at the end of the shared queue memory. */
114 u32 flags;
115
116 DEFINE_DMA_UNMAP_ADDR(mapping);
117 DEFINE_DMA_UNMAP_LEN(len);
118};
119
120/*
121 * Generic queue structure
122 *
123 * Contains common data for Rx and Tx queues
124 */
125struct iwl_queue {
126 int n_bd; /* number of BDs in this queue */
127 int write_ptr; /* 1-st empty entry (index) host_w*/
128 int read_ptr; /* last used entry (index) host_r*/
129 /* use for monitoring and recovering the stuck queue */
130 dma_addr_t dma_addr; /* physical addr for BD's */
131 int n_window; /* safe queue window */
132 u32 id;
133 int low_mark; /* low watermark, resume queue if free
134 * space more than this */
135 int high_mark; /* high watermark, stop queue if free
136 * space less than this */
137} __packed;
138
139/* One for each TFD */
140struct iwl_tx_info {
141 struct sk_buff *skb;
142 struct iwl_rxon_context *ctx;
143};
144
145/**
146 * struct iwl_tx_queue - Tx Queue for DMA
147 * @q: generic Rx/Tx queue descriptor
148 * @bd: base of circular buffer of TFDs
149 * @cmd: array of command/TX buffer pointers
150 * @meta: array of meta data for each command/tx buffer
151 * @dma_addr_cmd: physical address of cmd/tx buffer array
152 * @txb: array of per-TFD driver data
153 * @time_stamp: time (in jiffies) of last read_ptr change
154 * @need_update: indicates need to update read/write index
155 * @sched_retry: indicates queue is high-throughput aggregation (HT AGG) enabled
156 *
157 * A Tx queue consists of circular buffer of BDs (a.k.a. TFDs, transmit frame
158 * descriptors) and required locking structures.
159 */
160#define TFD_TX_CMD_SLOTS 256
161#define TFD_CMD_SLOTS 32
162
163struct iwl_tx_queue {
164 struct iwl_queue q;
165 void *tfds;
166 struct iwl_device_cmd **cmd;
167 struct iwl_cmd_meta *meta;
168 struct iwl_tx_info *txb;
169 unsigned long time_stamp;
170 u8 need_update;
171 u8 sched_retry;
172 u8 active;
173 u8 swq_id;
174};
175
176#define IWL_NUM_SCAN_RATES (2)
177
178struct iwl4965_channel_tgd_info {
179 u8 type;
180 s8 max_power;
181};
182
183struct iwl4965_channel_tgh_info {
184 s64 last_radar_time;
185};
186
187#define IWL4965_MAX_RATE (33)
188
189struct iwl3945_clip_group {
190 /* maximum power level to prevent clipping for each rate, derived by
191 * us from this band's saturation power in EEPROM */
192 const s8 clip_powers[IWL_MAX_RATES];
193};
194
195/* current Tx power values to use, one for each rate for each channel.
196 * requested power is limited by:
197 * -- regulatory EEPROM limits for this channel
198 * -- hardware capabilities (clip-powers)
199 * -- spectrum management
200 * -- user preference (e.g. iwconfig)
201 * when requested power is set, base power index must also be set. */
202struct iwl3945_channel_power_info {
203 struct iwl3945_tx_power tpc; /* actual radio and DSP gain settings */
204 s8 power_table_index; /* actual (compenst'd) index into gain table */
205 s8 base_power_index; /* gain index for power at factory temp. */
206 s8 requested_power; /* power (dBm) requested for this chnl/rate */
207};
208
209/* current scan Tx power values to use, one for each scan rate for each
210 * channel. */
211struct iwl3945_scan_power_info {
212 struct iwl3945_tx_power tpc; /* actual radio and DSP gain settings */
213 s8 power_table_index; /* actual (compenst'd) index into gain table */
214 s8 requested_power; /* scan pwr (dBm) requested for chnl/rate */
215};
216
217/*
218 * One for each channel, holds all channel setup data
219 * Some of the fields (e.g. eeprom and flags/max_power_avg) are redundant
220 * with one another!
221 */
222struct iwl_channel_info {
223 struct iwl4965_channel_tgd_info tgd;
224 struct iwl4965_channel_tgh_info tgh;
225 struct iwl_eeprom_channel eeprom; /* EEPROM regulatory limit */
226 struct iwl_eeprom_channel ht40_eeprom; /* EEPROM regulatory limit for
227 * HT40 channel */
228
229 u8 channel; /* channel number */
230 u8 flags; /* flags copied from EEPROM */
231 s8 max_power_avg; /* (dBm) regul. eeprom, normal Tx, any rate */
232 s8 curr_txpow; /* (dBm) regulatory/spectrum/user (not h/w) limit */
233 s8 min_power; /* always 0 */
234 s8 scan_power; /* (dBm) regul. eeprom, direct scans, any rate */
235
236 u8 group_index; /* 0-4, maps channel to group1/2/3/4/5 */
237 u8 band_index; /* 0-4, maps channel to band1/2/3/4/5 */
238 enum ieee80211_band band;
239
240 /* HT40 channel info */
241 s8 ht40_max_power_avg; /* (dBm) regul. eeprom, normal Tx, any rate */
242 u8 ht40_flags; /* flags copied from EEPROM */
243 u8 ht40_extension_channel; /* HT_IE_EXT_CHANNEL_* */
244
245 /* Radio/DSP gain settings for each "normal" data Tx rate.
246 * These include, in addition to RF and DSP gain, a few fields for
247 * remembering/modifying gain settings (indexes). */
248 struct iwl3945_channel_power_info power_info[IWL4965_MAX_RATE];
249
250 /* Radio/DSP gain settings for each scan rate, for directed scans. */
251 struct iwl3945_scan_power_info scan_pwr_info[IWL_NUM_SCAN_RATES];
252};
253
254#define IWL_TX_FIFO_BK 0 /* shared */
255#define IWL_TX_FIFO_BE 1
256#define IWL_TX_FIFO_VI 2 /* shared */
257#define IWL_TX_FIFO_VO 3
258#define IWL_TX_FIFO_UNUSED -1
259
260/* Minimum number of queues. MAX_NUM is defined in hw specific files.
261 * Set the minimum to accommodate the 4 standard TX queues, 1 command
262 * queue, 2 (unused) HCCA queues, and 4 HT queues (one for each AC) */
263#define IWL_MIN_NUM_QUEUES 10
264
265#define IWL_DEFAULT_CMD_QUEUE_NUM 4
266
267#define IEEE80211_DATA_LEN 2304
268#define IEEE80211_4ADDR_LEN 30
269#define IEEE80211_HLEN (IEEE80211_4ADDR_LEN)
270#define IEEE80211_FRAME_LEN (IEEE80211_DATA_LEN + IEEE80211_HLEN)
271
272struct iwl_frame {
273 union {
274 struct ieee80211_hdr frame;
275 struct iwl_tx_beacon_cmd beacon;
276 u8 raw[IEEE80211_FRAME_LEN];
277 u8 cmd[360];
278 } u;
279 struct list_head list;
280};
281
282#define SEQ_TO_SN(seq) (((seq) & IEEE80211_SCTL_SEQ) >> 4)
283#define SN_TO_SEQ(ssn) (((ssn) << 4) & IEEE80211_SCTL_SEQ)
284#define MAX_SN ((IEEE80211_SCTL_SEQ) >> 4)
285
286enum {
287 CMD_SYNC = 0,
288 CMD_SIZE_NORMAL = 0,
289 CMD_NO_SKB = 0,
290 CMD_SIZE_HUGE = (1 << 0),
291 CMD_ASYNC = (1 << 1),
292 CMD_WANT_SKB = (1 << 2),
293};
294
295#define DEF_CMD_PAYLOAD_SIZE 320
296
297/**
298 * struct iwl_device_cmd
299 *
300 * For allocation of the command and tx queues, this establishes the overall
301 * size of the largest command we send to uCode, except for a scan command
302 * (which is relatively huge; space is allocated separately).
303 */
304struct iwl_device_cmd {
305 struct iwl_cmd_header hdr; /* uCode API */
306 union {
307 u32 flags;
308 u8 val8;
309 u16 val16;
310 u32 val32;
311 struct iwl_tx_cmd tx;
312 u8 payload[DEF_CMD_PAYLOAD_SIZE];
313 } __packed cmd;
314} __packed;
315
316#define TFD_MAX_PAYLOAD_SIZE (sizeof(struct iwl_device_cmd))
317
318
319struct iwl_host_cmd {
320 const void *data;
321 unsigned long reply_page;
322 void (*callback)(struct iwl_priv *priv,
323 struct iwl_device_cmd *cmd,
324 struct iwl_rx_packet *pkt);
325 u32 flags;
326 u16 len;
327 u8 id;
328};
329
330#define SUP_RATE_11A_MAX_NUM_CHANNELS 8
331#define SUP_RATE_11B_MAX_NUM_CHANNELS 4
332#define SUP_RATE_11G_MAX_NUM_CHANNELS 12
333
334/**
335 * struct iwl_rx_queue - Rx queue
336 * @bd: driver's pointer to buffer of receive buffer descriptors (rbd)
337 * @bd_dma: bus address of buffer of receive buffer descriptors (rbd)
338 * @read: Shared index to newest available Rx buffer
339 * @write: Shared index to oldest written Rx packet
340 * @free_count: Number of pre-allocated buffers in rx_free
341 * @rx_free: list of free SKBs for use
342 * @rx_used: List of Rx buffers with no SKB
343 * @need_update: flag to indicate we need to update read/write index
344 * @rb_stts: driver's pointer to receive buffer status
345 * @rb_stts_dma: bus address of receive buffer status
346 *
347 * NOTE: rx_free and rx_used are used as a FIFO for iwl_rx_mem_buffers
348 */
349struct iwl_rx_queue {
350 __le32 *bd;
351 dma_addr_t bd_dma;
352 struct iwl_rx_mem_buffer pool[RX_QUEUE_SIZE + RX_FREE_BUFFERS];
353 struct iwl_rx_mem_buffer *queue[RX_QUEUE_SIZE];
354 u32 read;
355 u32 write;
356 u32 free_count;
357 u32 write_actual;
358 struct list_head rx_free;
359 struct list_head rx_used;
360 int need_update;
361 struct iwl_rb_status *rb_stts;
362 dma_addr_t rb_stts_dma;
363 spinlock_t lock;
364};
365
366#define IWL_SUPPORTED_RATES_IE_LEN 8
367
368#define MAX_TID_COUNT 9
369
370#define IWL_INVALID_RATE 0xFF
371#define IWL_INVALID_VALUE -1
372
373/**
374 * struct iwl_ht_agg -- aggregation status while waiting for block-ack
375 * @txq_id: Tx queue used for Tx attempt
376 * @frame_count: # frames attempted by Tx command
377 * @wait_for_ba: Expect block-ack before next Tx reply
378 * @start_idx: Index of 1st Transmit Frame Descriptor (TFD) in Tx window
379 * @bitmap0: Low order bitmap, one bit for each frame pending ACK in Tx window
380 * @bitmap1: High order, one bit for each frame pending ACK in Tx window
381 * @rate_n_flags: Rate at which Tx was attempted
382 *
383 * If REPLY_TX indicates that aggregation was attempted, driver must wait
384 * for block ack (REPLY_COMPRESSED_BA). This struct stores tx reply info
385 * until block ack arrives.
386 */
387struct iwl_ht_agg {
388 u16 txq_id;
389 u16 frame_count;
390 u16 wait_for_ba;
391 u16 start_idx;
392 u64 bitmap;
393 u32 rate_n_flags;
394#define IWL_AGG_OFF 0
395#define IWL_AGG_ON 1
396#define IWL_EMPTYING_HW_QUEUE_ADDBA 2
397#define IWL_EMPTYING_HW_QUEUE_DELBA 3
398 u8 state;
399};
400
401
402struct iwl_tid_data {
403 u16 seq_number; /* 4965 only */
404 u16 tfds_in_queue;
405 struct iwl_ht_agg agg;
406};
407
408struct iwl_hw_key {
409 u32 cipher;
410 int keylen;
411 u8 keyidx;
412 u8 key[32];
413};
414
415union iwl_ht_rate_supp {
416 u16 rates;
417 struct {
418 u8 siso_rate;
419 u8 mimo_rate;
420 };
421};
422
423#define CFG_HT_RX_AMPDU_FACTOR_8K (0x0)
424#define CFG_HT_RX_AMPDU_FACTOR_16K (0x1)
425#define CFG_HT_RX_AMPDU_FACTOR_32K (0x2)
426#define CFG_HT_RX_AMPDU_FACTOR_64K (0x3)
427#define CFG_HT_RX_AMPDU_FACTOR_DEF CFG_HT_RX_AMPDU_FACTOR_64K
428#define CFG_HT_RX_AMPDU_FACTOR_MAX CFG_HT_RX_AMPDU_FACTOR_64K
429#define CFG_HT_RX_AMPDU_FACTOR_MIN CFG_HT_RX_AMPDU_FACTOR_8K
430
431/*
432 * Maximal MPDU density for TX aggregation
433 * 4 - 2us density
434 * 5 - 4us density
435 * 6 - 8us density
436 * 7 - 16us density
437 */
438#define CFG_HT_MPDU_DENSITY_2USEC (0x4)
439#define CFG_HT_MPDU_DENSITY_4USEC (0x5)
440#define CFG_HT_MPDU_DENSITY_8USEC (0x6)
441#define CFG_HT_MPDU_DENSITY_16USEC (0x7)
442#define CFG_HT_MPDU_DENSITY_DEF CFG_HT_MPDU_DENSITY_4USEC
443#define CFG_HT_MPDU_DENSITY_MAX CFG_HT_MPDU_DENSITY_16USEC
444#define CFG_HT_MPDU_DENSITY_MIN (0x1)
445
446struct iwl_ht_config {
447 bool single_chain_sufficient;
448 enum ieee80211_smps_mode smps; /* current smps mode */
449};
450
451/* QoS structures */
452struct iwl_qos_info {
453 int qos_active;
454 struct iwl_qosparam_cmd def_qos_parm;
455};
456
457/*
458 * Structure should be accessed with sta_lock held. When station addition
459 * is in progress (IWL_STA_UCODE_INPROGRESS) it is possible to access only
460 * the commands (iwl_legacy_addsta_cmd and iwl_link_quality_cmd) without
461 * sta_lock held.
462 */
463struct iwl_station_entry {
464 struct iwl_legacy_addsta_cmd sta;
465 struct iwl_tid_data tid[MAX_TID_COUNT];
466 u8 used, ctxid;
467 struct iwl_hw_key keyinfo;
468 struct iwl_link_quality_cmd *lq;
469};
470
471struct iwl_station_priv_common {
472 struct iwl_rxon_context *ctx;
473 u8 sta_id;
474};
475
476/*
477 * iwl_station_priv: Driver's private station information
478 *
479 * When mac80211 creates a station it reserves some space (hw->sta_data_size)
480 * in the structure for use by driver. This structure is places in that
481 * space.
482 *
483 * The common struct MUST be first because it is shared between
484 * 3945 and 4965!
485 */
486struct iwl_station_priv {
487 struct iwl_station_priv_common common;
488 struct iwl_lq_sta lq_sta;
489 atomic_t pending_frames;
490 bool client;
491 bool asleep;
492};
493
494/**
495 * struct iwl_vif_priv - driver's private per-interface information
496 *
497 * When mac80211 allocates a virtual interface, it can allocate
498 * space for us to put data into.
499 */
500struct iwl_vif_priv {
501 struct iwl_rxon_context *ctx;
502 u8 ibss_bssid_sta_id;
503};
504
505/* one for each uCode image (inst/data, boot/init/runtime) */
506struct fw_desc {
507 void *v_addr; /* access by driver */
508 dma_addr_t p_addr; /* access by card's busmaster DMA */
509 u32 len; /* bytes */
510};
511
512/* uCode file layout */
513struct iwl_ucode_header {
514 __le32 ver; /* major/minor/API/serial */
515 struct {
516 __le32 inst_size; /* bytes of runtime code */
517 __le32 data_size; /* bytes of runtime data */
518 __le32 init_size; /* bytes of init code */
519 __le32 init_data_size; /* bytes of init data */
520 __le32 boot_size; /* bytes of bootstrap code */
521 u8 data[0]; /* in same order as sizes */
522 } v1;
523};
524
525struct iwl4965_ibss_seq {
526 u8 mac[ETH_ALEN];
527 u16 seq_num;
528 u16 frag_num;
529 unsigned long packet_time;
530 struct list_head list;
531};
532
533struct iwl_sensitivity_ranges {
534 u16 min_nrg_cck;
535 u16 max_nrg_cck;
536
537 u16 nrg_th_cck;
538 u16 nrg_th_ofdm;
539
540 u16 auto_corr_min_ofdm;
541 u16 auto_corr_min_ofdm_mrc;
542 u16 auto_corr_min_ofdm_x1;
543 u16 auto_corr_min_ofdm_mrc_x1;
544
545 u16 auto_corr_max_ofdm;
546 u16 auto_corr_max_ofdm_mrc;
547 u16 auto_corr_max_ofdm_x1;
548 u16 auto_corr_max_ofdm_mrc_x1;
549
550 u16 auto_corr_max_cck;
551 u16 auto_corr_max_cck_mrc;
552 u16 auto_corr_min_cck;
553 u16 auto_corr_min_cck_mrc;
554
555 u16 barker_corr_th_min;
556 u16 barker_corr_th_min_mrc;
557 u16 nrg_th_cca;
558};
559
560
561#define KELVIN_TO_CELSIUS(x) ((x)-273)
562#define CELSIUS_TO_KELVIN(x) ((x)+273)
563
564
565/**
566 * struct iwl_hw_params
567 * @max_txq_num: Max # Tx queues supported
568 * @dma_chnl_num: Number of Tx DMA/FIFO channels
569 * @scd_bc_tbls_size: size of scheduler byte count tables
570 * @tfd_size: TFD size
571 * @tx/rx_chains_num: Number of TX/RX chains
572 * @valid_tx/rx_ant: usable antennas
573 * @max_rxq_size: Max # Rx frames in Rx queue (must be power-of-2)
574 * @max_rxq_log: Log-base-2 of max_rxq_size
575 * @rx_page_order: Rx buffer page order
576 * @rx_wrt_ptr_reg: FH{39}_RSCSR_CHNL0_WPTR
577 * @max_stations:
578 * @ht40_channel: is 40MHz width possible in band 2.4
579 * BIT(IEEE80211_BAND_5GHZ) BIT(IEEE80211_BAND_5GHZ)
580 * @sw_crypto: 0 for hw, 1 for sw
581 * @max_xxx_size: for ucode uses
582 * @ct_kill_threshold: temperature threshold
583 * @beacon_time_tsf_bits: number of valid tsf bits for beacon time
584 * @struct iwl_sensitivity_ranges: range of sensitivity values
585 */
586struct iwl_hw_params {
587 u8 max_txq_num;
588 u8 dma_chnl_num;
589 u16 scd_bc_tbls_size;
590 u32 tfd_size;
591 u8 tx_chains_num;
592 u8 rx_chains_num;
593 u8 valid_tx_ant;
594 u8 valid_rx_ant;
595 u16 max_rxq_size;
596 u16 max_rxq_log;
597 u32 rx_page_order;
598 u32 rx_wrt_ptr_reg;
599 u8 max_stations;
600 u8 ht40_channel;
601 u8 max_beacon_itrvl; /* in 1024 ms */
602 u32 max_inst_size;
603 u32 max_data_size;
604 u32 max_bsm_size;
605 u32 ct_kill_threshold; /* value in hw-dependent units */
606 u16 beacon_time_tsf_bits;
607 const struct iwl_sensitivity_ranges *sens;
608};
609
610
611/******************************************************************************
612 *
613 * Functions implemented in core module which are forward declared here
614 * for use by iwl-[4-5].c
615 *
616 * NOTE: The implementation of these functions are not hardware specific
617 * which is why they are in the core module files.
618 *
619 * Naming convention --
620 * iwl_ <-- Is part of iwlwifi
621 * iwlXXXX_ <-- Hardware specific (implemented in iwl-XXXX.c for XXXX)
622 * iwl4965_bg_ <-- Called from work queue context
623 * iwl4965_mac_ <-- mac80211 callback
624 *
625 ****************************************************************************/
626extern void iwl4965_update_chain_flags(struct iwl_priv *priv);
627extern const u8 iwl_bcast_addr[ETH_ALEN];
628extern int iwl_legacy_queue_space(const struct iwl_queue *q);
629static inline int iwl_legacy_queue_used(const struct iwl_queue *q, int i)
630{
631 return q->write_ptr >= q->read_ptr ?
632 (i >= q->read_ptr && i < q->write_ptr) :
633 !(i < q->read_ptr && i >= q->write_ptr);
634}
635
636
637static inline u8 iwl_legacy_get_cmd_index(struct iwl_queue *q, u32 index,
638 int is_huge)
639{
640 /*
641 * This is for init calibration result and scan command which
642 * required buffer > TFD_MAX_PAYLOAD_SIZE,
643 * the big buffer at end of command array
644 */
645 if (is_huge)
646 return q->n_window; /* must be power of 2 */
647
648 /* Otherwise, use normal size buffers */
649 return index & (q->n_window - 1);
650}
651
652
653struct iwl_dma_ptr {
654 dma_addr_t dma;
655 void *addr;
656 size_t size;
657};
658
659#define IWL_OPERATION_MODE_AUTO 0
660#define IWL_OPERATION_MODE_HT_ONLY 1
661#define IWL_OPERATION_MODE_MIXED 2
662#define IWL_OPERATION_MODE_20MHZ 3
663
664#define IWL_TX_CRC_SIZE 4
665#define IWL_TX_DELIMITER_SIZE 4
666
667#define TX_POWER_IWL_ILLEGAL_VOLTAGE -10000
668
669/* Sensitivity and chain noise calibration */
670#define INITIALIZATION_VALUE 0xFFFF
671#define IWL4965_CAL_NUM_BEACONS 20
672#define IWL_CAL_NUM_BEACONS 16
673#define MAXIMUM_ALLOWED_PATHLOSS 15
674
675#define CHAIN_NOISE_MAX_DELTA_GAIN_CODE 3
676
677#define MAX_FA_OFDM 50
678#define MIN_FA_OFDM 5
679#define MAX_FA_CCK 50
680#define MIN_FA_CCK 5
681
682#define AUTO_CORR_STEP_OFDM 1
683
684#define AUTO_CORR_STEP_CCK 3
685#define AUTO_CORR_MAX_TH_CCK 160
686
687#define NRG_DIFF 2
688#define NRG_STEP_CCK 2
689#define NRG_MARGIN 8
690#define MAX_NUMBER_CCK_NO_FA 100
691
692#define AUTO_CORR_CCK_MIN_VAL_DEF (125)
693
694#define CHAIN_A 0
695#define CHAIN_B 1
696#define CHAIN_C 2
697#define CHAIN_NOISE_DELTA_GAIN_INIT_VAL 4
698#define ALL_BAND_FILTER 0xFF00
699#define IN_BAND_FILTER 0xFF
700#define MIN_AVERAGE_NOISE_MAX_VALUE 0xFFFFFFFF
701
702#define NRG_NUM_PREV_STAT_L 20
703#define NUM_RX_CHAINS 3
704
705enum iwl4965_false_alarm_state {
706 IWL_FA_TOO_MANY = 0,
707 IWL_FA_TOO_FEW = 1,
708 IWL_FA_GOOD_RANGE = 2,
709};
710
711enum iwl4965_chain_noise_state {
712 IWL_CHAIN_NOISE_ALIVE = 0, /* must be 0 */
713 IWL_CHAIN_NOISE_ACCUMULATE,
714 IWL_CHAIN_NOISE_CALIBRATED,
715 IWL_CHAIN_NOISE_DONE,
716};
717
718enum iwl4965_calib_enabled_state {
719 IWL_CALIB_DISABLED = 0, /* must be 0 */
720 IWL_CALIB_ENABLED = 1,
721};
722
723/*
724 * enum iwl_calib
725 * defines the order in which results of initial calibrations
726 * should be sent to the runtime uCode
727 */
728enum iwl_calib {
729 IWL_CALIB_MAX,
730};
731
732/* Opaque calibration results */
733struct iwl_calib_result {
734 void *buf;
735 size_t buf_len;
736};
737
738enum ucode_type {
739 UCODE_NONE = 0,
740 UCODE_INIT,
741 UCODE_RT
742};
743
744/* Sensitivity calib data */
745struct iwl_sensitivity_data {
746 u32 auto_corr_ofdm;
747 u32 auto_corr_ofdm_mrc;
748 u32 auto_corr_ofdm_x1;
749 u32 auto_corr_ofdm_mrc_x1;
750 u32 auto_corr_cck;
751 u32 auto_corr_cck_mrc;
752
753 u32 last_bad_plcp_cnt_ofdm;
754 u32 last_fa_cnt_ofdm;
755 u32 last_bad_plcp_cnt_cck;
756 u32 last_fa_cnt_cck;
757
758 u32 nrg_curr_state;
759 u32 nrg_prev_state;
760 u32 nrg_value[10];
761 u8 nrg_silence_rssi[NRG_NUM_PREV_STAT_L];
762 u32 nrg_silence_ref;
763 u32 nrg_energy_idx;
764 u32 nrg_silence_idx;
765 u32 nrg_th_cck;
766 s32 nrg_auto_corr_silence_diff;
767 u32 num_in_cck_no_fa;
768 u32 nrg_th_ofdm;
769
770 u16 barker_corr_th_min;
771 u16 barker_corr_th_min_mrc;
772 u16 nrg_th_cca;
773};
774
775/* Chain noise (differential Rx gain) calib data */
776struct iwl_chain_noise_data {
777 u32 active_chains;
778 u32 chain_noise_a;
779 u32 chain_noise_b;
780 u32 chain_noise_c;
781 u32 chain_signal_a;
782 u32 chain_signal_b;
783 u32 chain_signal_c;
784 u16 beacon_count;
785 u8 disconn_array[NUM_RX_CHAINS];
786 u8 delta_gain_code[NUM_RX_CHAINS];
787 u8 radio_write;
788 u8 state;
789};
790
791#define EEPROM_SEM_TIMEOUT 10 /* milliseconds */
792#define EEPROM_SEM_RETRY_LIMIT 1000 /* number of attempts (not time) */
793
794#define IWL_TRAFFIC_ENTRIES (256)
795#define IWL_TRAFFIC_ENTRY_SIZE (64)
796
797enum {
798 MEASUREMENT_READY = (1 << 0),
799 MEASUREMENT_ACTIVE = (1 << 1),
800};
801
802/* interrupt statistics */
803struct isr_statistics {
804 u32 hw;
805 u32 sw;
806 u32 err_code;
807 u32 sch;
808 u32 alive;
809 u32 rfkill;
810 u32 ctkill;
811 u32 wakeup;
812 u32 rx;
813 u32 rx_handlers[REPLY_MAX];
814 u32 tx;
815 u32 unhandled;
816};
817
818/* management statistics */
819enum iwl_mgmt_stats {
820 MANAGEMENT_ASSOC_REQ = 0,
821 MANAGEMENT_ASSOC_RESP,
822 MANAGEMENT_REASSOC_REQ,
823 MANAGEMENT_REASSOC_RESP,
824 MANAGEMENT_PROBE_REQ,
825 MANAGEMENT_PROBE_RESP,
826 MANAGEMENT_BEACON,
827 MANAGEMENT_ATIM,
828 MANAGEMENT_DISASSOC,
829 MANAGEMENT_AUTH,
830 MANAGEMENT_DEAUTH,
831 MANAGEMENT_ACTION,
832 MANAGEMENT_MAX,
833};
834/* control statistics */
835enum iwl_ctrl_stats {
836 CONTROL_BACK_REQ = 0,
837 CONTROL_BACK,
838 CONTROL_PSPOLL,
839 CONTROL_RTS,
840 CONTROL_CTS,
841 CONTROL_ACK,
842 CONTROL_CFEND,
843 CONTROL_CFENDACK,
844 CONTROL_MAX,
845};
846
847struct traffic_stats {
848#ifdef CONFIG_IWLWIFI_LEGACY_DEBUGFS
849 u32 mgmt[MANAGEMENT_MAX];
850 u32 ctrl[CONTROL_MAX];
851 u32 data_cnt;
852 u64 data_bytes;
853#endif
854};
855
856/*
857 * iwl_switch_rxon: "channel switch" structure
858 *
859 * @ switch_in_progress: channel switch in progress
860 * @ channel: new channel
861 */
862struct iwl_switch_rxon {
863 bool switch_in_progress;
864 __le16 channel;
865};
866
867/*
868 * schedule the timer to wake up every UCODE_TRACE_PERIOD milliseconds
869 * to perform continuous uCode event logging operation if enabled
870 */
871#define UCODE_TRACE_PERIOD (100)
872
873/*
874 * iwl_event_log: current uCode event log position
875 *
876 * @ucode_trace: enable/disable ucode continuous trace timer
877 * @num_wraps: how many times the event buffer wraps
878 * @next_entry: the entry just before the next one that uCode would fill
879 * @non_wraps_count: counter for no wrap detected when dump ucode events
880 * @wraps_once_count: counter for wrap once detected when dump ucode events
881 * @wraps_more_count: counter for wrap more than once detected
882 * when dump ucode events
883 */
884struct iwl_event_log {
885 bool ucode_trace;
886 u32 num_wraps;
887 u32 next_entry;
888 int non_wraps_count;
889 int wraps_once_count;
890 int wraps_more_count;
891};
892
893/*
894 * host interrupt timeout value
895 * used with setting interrupt coalescing timer
896 * the CSR_INT_COALESCING is an 8 bit register in 32-usec unit
897 *
898 * default interrupt coalescing timer is 64 x 32 = 2048 usecs
899 * default interrupt coalescing calibration timer is 16 x 32 = 512 usecs
900 */
901#define IWL_HOST_INT_TIMEOUT_MAX (0xFF)
902#define IWL_HOST_INT_TIMEOUT_DEF (0x40)
903#define IWL_HOST_INT_TIMEOUT_MIN (0x0)
904#define IWL_HOST_INT_CALIB_TIMEOUT_MAX (0xFF)
905#define IWL_HOST_INT_CALIB_TIMEOUT_DEF (0x10)
906#define IWL_HOST_INT_CALIB_TIMEOUT_MIN (0x0)
907
908/*
909 * This is the threshold value of plcp error rate per 100mSecs. It is
910 * used to set and check for the validity of plcp_delta.
911 */
912#define IWL_MAX_PLCP_ERR_THRESHOLD_MIN (1)
913#define IWL_MAX_PLCP_ERR_THRESHOLD_DEF (50)
914#define IWL_MAX_PLCP_ERR_LONG_THRESHOLD_DEF (100)
915#define IWL_MAX_PLCP_ERR_EXT_LONG_THRESHOLD_DEF (200)
916#define IWL_MAX_PLCP_ERR_THRESHOLD_MAX (255)
917#define IWL_MAX_PLCP_ERR_THRESHOLD_DISABLE (0)
918
919#define IWL_DELAY_NEXT_FORCE_RF_RESET (HZ*3)
920#define IWL_DELAY_NEXT_FORCE_FW_RELOAD (HZ*5)
921
922/* TX queue watchdog timeouts in mSecs */
923#define IWL_DEF_WD_TIMEOUT (2000)
924#define IWL_LONG_WD_TIMEOUT (10000)
925#define IWL_MAX_WD_TIMEOUT (120000)
926
927enum iwl_reset {
928 IWL_RF_RESET = 0,
929 IWL_FW_RESET,
930 IWL_MAX_FORCE_RESET,
931};
932
933struct iwl_force_reset {
934 int reset_request_count;
935 int reset_success_count;
936 int reset_reject_count;
937 unsigned long reset_duration;
938 unsigned long last_force_reset_jiffies;
939};
940
941/* extend beacon time format bit shifting */
942/*
943 * for _3945 devices
944 * bits 31:24 - extended
945 * bits 23:0 - interval
946 */
947#define IWL3945_EXT_BEACON_TIME_POS 24
948/*
949 * for _4965 devices
950 * bits 31:22 - extended
951 * bits 21:0 - interval
952 */
953#define IWL4965_EXT_BEACON_TIME_POS 22
954
955enum iwl_rxon_context_id {
956 IWL_RXON_CTX_BSS,
957
958 NUM_IWL_RXON_CTX
959};
960
961struct iwl_rxon_context {
962 struct ieee80211_vif *vif;
963
964 const u8 *ac_to_fifo;
965 const u8 *ac_to_queue;
966 u8 mcast_queue;
967
968 /*
969 * We could use the vif to indicate active, but we
970 * also need it to be active during disabling when
971 * we already removed the vif for type setting.
972 */
973 bool always_active, is_active;
974
975 bool ht_need_multiple_chains;
976
977 enum iwl_rxon_context_id ctxid;
978
979 u32 interface_modes, exclusive_interface_modes;
980 u8 unused_devtype, ap_devtype, ibss_devtype, station_devtype;
981
982 /*
983 * We declare this const so it can only be
984 * changed via explicit cast within the
985 * routines that actually update the physical
986 * hardware.
987 */
988 const struct iwl_legacy_rxon_cmd active;
989 struct iwl_legacy_rxon_cmd staging;
990
991 struct iwl_rxon_time_cmd timing;
992
993 struct iwl_qos_info qos_data;
994
995 u8 bcast_sta_id, ap_sta_id;
996
997 u8 rxon_cmd, rxon_assoc_cmd, rxon_timing_cmd;
998 u8 qos_cmd;
999 u8 wep_key_cmd;
1000
1001 struct iwl_wep_key wep_keys[WEP_KEYS_MAX];
1002 u8 key_mapping_keys;
1003
1004 __le32 station_flags;
1005
1006 struct {
1007 bool non_gf_sta_present;
1008 u8 protection;
1009 bool enabled, is_40mhz;
1010 u8 extension_chan_offset;
1011 } ht;
1012};
1013
1014struct iwl_priv {
1015
1016 /* ieee device used by generic ieee processing code */
1017 struct ieee80211_hw *hw;
1018 struct ieee80211_channel *ieee_channels;
1019 struct ieee80211_rate *ieee_rates;
1020 struct iwl_cfg *cfg;
1021
1022 /* temporary frame storage list */
1023 struct list_head free_frames;
1024 int frames_count;
1025
1026 enum ieee80211_band band;
1027 int alloc_rxb_page;
1028
1029 void (*rx_handlers[REPLY_MAX])(struct iwl_priv *priv,
1030 struct iwl_rx_mem_buffer *rxb);
1031
1032 struct ieee80211_supported_band bands[IEEE80211_NUM_BANDS];
1033
1034 /* spectrum measurement report caching */
1035 struct iwl_spectrum_notification measure_report;
1036 u8 measurement_status;
1037
1038 /* ucode beacon time */
1039 u32 ucode_beacon_time;
1040 int missed_beacon_threshold;
1041
1042 /* track IBSS manager (last beacon) status */
1043 u32 ibss_manager;
1044
1045 /* storing the jiffies when the plcp error rate is received */
1046 unsigned long plcp_jiffies;
1047
1048 /* force reset */
1049 struct iwl_force_reset force_reset[IWL_MAX_FORCE_RESET];
1050
1051 /* we allocate array of iwl_channel_info for NIC's valid channels.
1052 * Access via channel # using indirect index array */
1053 struct iwl_channel_info *channel_info; /* channel info array */
1054 u8 channel_count; /* # of channels */
1055
1056 /* thermal calibration */
1057 s32 temperature; /* degrees Kelvin */
1058 s32 last_temperature;
1059
1060 /* init calibration results */
1061 struct iwl_calib_result calib_results[IWL_CALIB_MAX];
1062
1063 /* Scan related variables */
1064 unsigned long scan_start;
1065 unsigned long scan_start_tsf;
1066 void *scan_cmd;
1067 enum ieee80211_band scan_band;
1068 struct cfg80211_scan_request *scan_request;
1069 struct ieee80211_vif *scan_vif;
1070 bool is_internal_short_scan;
1071 u8 scan_tx_ant[IEEE80211_NUM_BANDS];
1072 u8 mgmt_tx_ant;
1073
1074 /* spinlock */
1075 spinlock_t lock; /* protect general shared data */
1076 spinlock_t hcmd_lock; /* protect hcmd */
1077 spinlock_t reg_lock; /* protect hw register access */
1078 struct mutex mutex;
1079 struct mutex sync_cmd_mutex; /* enable serialization of sync commands */
1080
1081 /* basic pci-network driver stuff */
1082 struct pci_dev *pci_dev;
1083
1084 /* pci hardware address support */
1085 void __iomem *hw_base;
1086 u32 hw_rev;
1087 u32 hw_wa_rev;
1088 u8 rev_id;
1089
1090 /* microcode/device supports multiple contexts */
1091 u8 valid_contexts;
1092
1093 /* command queue number */
1094 u8 cmd_queue;
1095
1096 /* max number of station keys */
1097 u8 sta_key_max_num;
1098
1099 /* EEPROM MAC addresses */
1100 struct mac_address addresses[1];
1101
1102 /* uCode images, save to reload in case of failure */
1103 int fw_index; /* firmware we're trying to load */
1104 u32 ucode_ver; /* version of ucode, copy of
1105 iwl_ucode.ver */
1106 struct fw_desc ucode_code; /* runtime inst */
1107 struct fw_desc ucode_data; /* runtime data original */
1108 struct fw_desc ucode_data_backup; /* runtime data save/restore */
1109 struct fw_desc ucode_init; /* initialization inst */
1110 struct fw_desc ucode_init_data; /* initialization data */
1111 struct fw_desc ucode_boot; /* bootstrap inst */
1112 enum ucode_type ucode_type;
1113 u8 ucode_write_complete; /* the image write is complete */
1114 char firmware_name[25];
1115
1116 struct iwl_rxon_context contexts[NUM_IWL_RXON_CTX];
1117
1118 struct iwl_switch_rxon switch_rxon;
1119
1120 /* 1st responses from initialize and runtime uCode images.
1121 * _4965's initialize alive response contains some calibration data. */
1122 struct iwl_init_alive_resp card_alive_init;
1123 struct iwl_alive_resp card_alive;
1124
1125 u16 active_rate;
1126
1127 u8 start_calib;
1128 struct iwl_sensitivity_data sensitivity_data;
1129 struct iwl_chain_noise_data chain_noise_data;
1130 __le16 sensitivity_tbl[HD_TABLE_SIZE];
1131
1132 struct iwl_ht_config current_ht_config;
1133
1134 /* Rate scaling data */
1135 u8 retry_rate;
1136
1137 wait_queue_head_t wait_command_queue;
1138
1139 int activity_timer_active;
1140
1141 /* Rx and Tx DMA processing queues */
1142 struct iwl_rx_queue rxq;
1143 struct iwl_tx_queue *txq;
1144 unsigned long txq_ctx_active_msk;
1145 struct iwl_dma_ptr kw; /* keep warm address */
1146 struct iwl_dma_ptr scd_bc_tbls;
1147
1148 u32 scd_base_addr; /* scheduler sram base address */
1149
1150 unsigned long status;
1151
1152 /* counts mgmt, ctl, and data packets */
1153 struct traffic_stats tx_stats;
1154 struct traffic_stats rx_stats;
1155
1156 /* counts interrupts */
1157 struct isr_statistics isr_stats;
1158
1159 struct iwl_power_mgr power_data;
1160
1161 /* context information */
1162 u8 bssid[ETH_ALEN]; /* used only on 3945 but filled by core */
1163
1164 /* station table variables */
1165
1166 /* Note: if lock and sta_lock are needed, lock must be acquired first */
1167 spinlock_t sta_lock;
1168 int num_stations;
1169 struct iwl_station_entry stations[IWL_STATION_COUNT];
1170 unsigned long ucode_key_table;
1171
1172 /* queue refcounts */
1173#define IWL_MAX_HW_QUEUES 32
1174 unsigned long queue_stopped[BITS_TO_LONGS(IWL_MAX_HW_QUEUES)];
1175 /* for each AC */
1176 atomic_t queue_stop_count[4];
1177
1178 /* Indication if ieee80211_ops->open has been called */
1179 u8 is_open;
1180
1181 u8 mac80211_registered;
1182
1183 /* eeprom -- this is in the card's little endian byte order */
1184 u8 *eeprom;
1185 struct iwl_eeprom_calib_info *calib_info;
1186
1187 enum nl80211_iftype iw_mode;
1188
1189 /* Last Rx'd beacon timestamp */
1190 u64 timestamp;
1191
1192 union {
1193#if defined(CONFIG_IWL3945) || defined(CONFIG_IWL3945_MODULE)
1194 struct {
1195 void *shared_virt;
1196 dma_addr_t shared_phys;
1197
1198 struct delayed_work thermal_periodic;
1199 struct delayed_work rfkill_poll;
1200
1201 struct iwl3945_notif_statistics statistics;
1202#ifdef CONFIG_IWLWIFI_LEGACY_DEBUGFS
1203 struct iwl3945_notif_statistics accum_statistics;
1204 struct iwl3945_notif_statistics delta_statistics;
1205 struct iwl3945_notif_statistics max_delta;
1206#endif
1207
1208 u32 sta_supp_rates;
1209 int last_rx_rssi; /* From Rx packet statistics */
1210
1211 /* Rx'd packet timing information */
1212 u32 last_beacon_time;
1213 u64 last_tsf;
1214
1215 /*
1216 * each calibration channel group in the
1217 * EEPROM has a derived clip setting for
1218 * each rate.
1219 */
1220 const struct iwl3945_clip_group clip_groups[5];
1221
1222 } _3945;
1223#endif
1224#if defined(CONFIG_IWL4965) || defined(CONFIG_IWL4965_MODULE)
1225 struct {
1226 /*
1227 * reporting the number of tids has AGG on. 0 means
1228 * no AGGREGATION
1229 */
1230 u8 agg_tids_count;
1231
1232 struct iwl_rx_phy_res last_phy_res;
1233 bool last_phy_res_valid;
1234
1235 struct completion firmware_loading_complete;
1236
1237 /*
1238 * chain noise reset and gain commands are the
1239 * two extra calibration commands follows the standard
1240 * phy calibration commands
1241 */
1242 u8 phy_calib_chain_noise_reset_cmd;
1243 u8 phy_calib_chain_noise_gain_cmd;
1244
1245 struct iwl_notif_statistics statistics;
1246#ifdef CONFIG_IWLWIFI_LEGACY_DEBUGFS
1247 struct iwl_notif_statistics accum_statistics;
1248 struct iwl_notif_statistics delta_statistics;
1249 struct iwl_notif_statistics max_delta;
1250#endif
1251
1252 } _4965;
1253#endif
1254 };
1255
1256 struct iwl_hw_params hw_params;
1257
1258 u32 inta_mask;
1259
1260 struct workqueue_struct *workqueue;
1261
1262 struct work_struct restart;
1263 struct work_struct scan_completed;
1264 struct work_struct rx_replenish;
1265 struct work_struct abort_scan;
1266
1267 struct iwl_rxon_context *beacon_ctx;
1268 struct sk_buff *beacon_skb;
1269
1270 struct work_struct start_internal_scan;
1271 struct work_struct tx_flush;
1272
1273 struct tasklet_struct irq_tasklet;
1274
1275 struct delayed_work init_alive_start;
1276 struct delayed_work alive_start;
1277 struct delayed_work scan_check;
1278
1279 /* TX Power */
1280 s8 tx_power_user_lmt;
1281 s8 tx_power_device_lmt;
1282 s8 tx_power_next;
1283
1284
1285#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
1286 /* debugging info */
1287 u32 debug_level; /* per device debugging will override global
1288 iwl_debug_level if set */
1289#endif /* CONFIG_IWLWIFI_LEGACY_DEBUG */
1290#ifdef CONFIG_IWLWIFI_LEGACY_DEBUGFS
1291 /* debugfs */
1292 u16 tx_traffic_idx;
1293 u16 rx_traffic_idx;
1294 u8 *tx_traffic;
1295 u8 *rx_traffic;
1296 struct dentry *debugfs_dir;
1297 u32 dbgfs_sram_offset, dbgfs_sram_len;
1298 bool disable_ht40;
1299#endif /* CONFIG_IWLWIFI_LEGACY_DEBUGFS */
1300
1301 struct work_struct txpower_work;
1302 u32 disable_sens_cal;
1303 u32 disable_chain_noise_cal;
1304 u32 disable_tx_power_cal;
1305 struct work_struct run_time_calib_work;
1306 struct timer_list statistics_periodic;
1307 struct timer_list ucode_trace;
1308 struct timer_list watchdog;
1309 bool hw_ready;
1310
1311 struct iwl_event_log event_log;
1312
1313 struct led_classdev led;
1314 unsigned long blink_on, blink_off;
1315 bool led_registered;
1316}; /*iwl_priv */
1317
1318static inline void iwl_txq_ctx_activate(struct iwl_priv *priv, int txq_id)
1319{
1320 set_bit(txq_id, &priv->txq_ctx_active_msk);
1321}
1322
1323static inline void iwl_txq_ctx_deactivate(struct iwl_priv *priv, int txq_id)
1324{
1325 clear_bit(txq_id, &priv->txq_ctx_active_msk);
1326}
1327
1328#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
1329/*
1330 * iwl_legacy_get_debug_level: Return active debug level for device
1331 *
1332 * Using sysfs it is possible to set per device debug level. This debug
1333 * level will be used if set, otherwise the global debug level which can be
1334 * set via module parameter is used.
1335 */
1336static inline u32 iwl_legacy_get_debug_level(struct iwl_priv *priv)
1337{
1338 if (priv->debug_level)
1339 return priv->debug_level;
1340 else
1341 return iwl_debug_level;
1342}
1343#else
1344static inline u32 iwl_legacy_get_debug_level(struct iwl_priv *priv)
1345{
1346 return iwl_debug_level;
1347}
1348#endif
1349
1350
1351static inline struct ieee80211_hdr *
1352iwl_legacy_tx_queue_get_hdr(struct iwl_priv *priv,
1353 int txq_id, int idx)
1354{
1355 if (priv->txq[txq_id].txb[idx].skb)
1356 return (struct ieee80211_hdr *)priv->txq[txq_id].
1357 txb[idx].skb->data;
1358 return NULL;
1359}
1360
1361static inline struct iwl_rxon_context *
1362iwl_legacy_rxon_ctx_from_vif(struct ieee80211_vif *vif)
1363{
1364 struct iwl_vif_priv *vif_priv = (void *)vif->drv_priv;
1365
1366 return vif_priv->ctx;
1367}
1368
1369#define for_each_context(priv, ctx) \
1370 for (ctx = &priv->contexts[IWL_RXON_CTX_BSS]; \
1371 ctx < &priv->contexts[NUM_IWL_RXON_CTX]; ctx++) \
1372 if (priv->valid_contexts & BIT(ctx->ctxid))
1373
1374static inline int iwl_legacy_is_associated(struct iwl_priv *priv,
1375 enum iwl_rxon_context_id ctxid)
1376{
1377 return (priv->contexts[ctxid].active.filter_flags &
1378 RXON_FILTER_ASSOC_MSK) ? 1 : 0;
1379}
1380
1381static inline int iwl_legacy_is_any_associated(struct iwl_priv *priv)
1382{
1383 return iwl_legacy_is_associated(priv, IWL_RXON_CTX_BSS);
1384}
1385
1386static inline int iwl_legacy_is_associated_ctx(struct iwl_rxon_context *ctx)
1387{
1388 return (ctx->active.filter_flags & RXON_FILTER_ASSOC_MSK) ? 1 : 0;
1389}
1390
1391static inline int iwl_legacy_is_channel_valid(const struct iwl_channel_info *ch_info)
1392{
1393 if (ch_info == NULL)
1394 return 0;
1395 return (ch_info->flags & EEPROM_CHANNEL_VALID) ? 1 : 0;
1396}
1397
1398static inline int iwl_legacy_is_channel_radar(const struct iwl_channel_info *ch_info)
1399{
1400 return (ch_info->flags & EEPROM_CHANNEL_RADAR) ? 1 : 0;
1401}
1402
1403static inline u8 iwl_legacy_is_channel_a_band(const struct iwl_channel_info *ch_info)
1404{
1405 return ch_info->band == IEEE80211_BAND_5GHZ;
1406}
1407
1408static inline int
1409iwl_legacy_is_channel_passive(const struct iwl_channel_info *ch)
1410{
1411 return (!(ch->flags & EEPROM_CHANNEL_ACTIVE)) ? 1 : 0;
1412}
1413
1414static inline void
1415__iwl_legacy_free_pages(struct iwl_priv *priv, struct page *page)
1416{
1417 __free_pages(page, priv->hw_params.rx_page_order);
1418 priv->alloc_rxb_page--;
1419}
1420
1421static inline void iwl_legacy_free_pages(struct iwl_priv *priv, unsigned long page)
1422{
1423 free_pages(page, priv->hw_params.rx_page_order);
1424 priv->alloc_rxb_page--;
1425}
1426#endif /* __iwl_legacy_dev_h__ */
diff --git a/drivers/net/wireless/iwlegacy/iwl-devtrace.c b/drivers/net/wireless/iwlegacy/iwl-devtrace.c
new file mode 100644
index 000000000000..080b852b33bd
--- /dev/null
+++ b/drivers/net/wireless/iwlegacy/iwl-devtrace.c
@@ -0,0 +1,45 @@
1/******************************************************************************
2 *
3 * Copyright(c) 2009 - 2011 Intel Corporation. All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of version 2 of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
17 *
18 * The full GNU General Public License is included in this distribution in the
19 * file called LICENSE.
20 *
21 * Contact Information:
22 * Intel Linux Wireless <ilw@linux.intel.com>
23 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
24 *
25 *****************************************************************************/
26
27#include <linux/module.h>
28
29/* sparse doesn't like tracepoint macros */
30#ifndef __CHECKER__
31#include "iwl-dev.h"
32
33#define CREATE_TRACE_POINTS
34#include "iwl-devtrace.h"
35
36EXPORT_TRACEPOINT_SYMBOL(iwlwifi_legacy_dev_iowrite8);
37EXPORT_TRACEPOINT_SYMBOL(iwlwifi_legacy_dev_ioread32);
38EXPORT_TRACEPOINT_SYMBOL(iwlwifi_legacy_dev_iowrite32);
39EXPORT_TRACEPOINT_SYMBOL(iwlwifi_legacy_dev_rx);
40EXPORT_TRACEPOINT_SYMBOL(iwlwifi_legacy_dev_tx);
41EXPORT_TRACEPOINT_SYMBOL(iwlwifi_legacy_dev_ucode_event);
42EXPORT_TRACEPOINT_SYMBOL(iwlwifi_legacy_dev_ucode_error);
43EXPORT_TRACEPOINT_SYMBOL(iwlwifi_legacy_dev_ucode_cont_event);
44EXPORT_TRACEPOINT_SYMBOL(iwlwifi_legacy_dev_ucode_wrap_event);
45#endif
diff --git a/drivers/net/wireless/iwlegacy/iwl-devtrace.h b/drivers/net/wireless/iwlegacy/iwl-devtrace.h
new file mode 100644
index 000000000000..9612aa0f6ec4
--- /dev/null
+++ b/drivers/net/wireless/iwlegacy/iwl-devtrace.h
@@ -0,0 +1,270 @@
1/******************************************************************************
2 *
3 * Copyright(c) 2009 - 2011 Intel Corporation. All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of version 2 of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
17 *
18 * The full GNU General Public License is included in this distribution in the
19 * file called LICENSE.
20 *
21 * Contact Information:
22 * Intel Linux Wireless <ilw@linux.intel.com>
23 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
24 *
25 *****************************************************************************/
26
27#if !defined(__IWLWIFI_LEGACY_DEVICE_TRACE) || defined(TRACE_HEADER_MULTI_READ)
28#define __IWLWIFI_LEGACY_DEVICE_TRACE
29
30#include <linux/tracepoint.h>
31
32#if !defined(CONFIG_IWLWIFI_LEGACY_DEVICE_TRACING) || defined(__CHECKER__)
33#undef TRACE_EVENT
34#define TRACE_EVENT(name, proto, ...) \
35static inline void trace_ ## name(proto) {}
36#endif
37
38
39#define PRIV_ENTRY __field(struct iwl_priv *, priv)
40#define PRIV_ASSIGN (__entry->priv = priv)
41
42#undef TRACE_SYSTEM
43#define TRACE_SYSTEM iwlwifi_legacy_io
44
45TRACE_EVENT(iwlwifi_legacy_dev_ioread32,
46 TP_PROTO(struct iwl_priv *priv, u32 offs, u32 val),
47 TP_ARGS(priv, offs, val),
48 TP_STRUCT__entry(
49 PRIV_ENTRY
50 __field(u32, offs)
51 __field(u32, val)
52 ),
53 TP_fast_assign(
54 PRIV_ASSIGN;
55 __entry->offs = offs;
56 __entry->val = val;
57 ),
58 TP_printk("[%p] read io[%#x] = %#x", __entry->priv,
59 __entry->offs, __entry->val)
60);
61
62TRACE_EVENT(iwlwifi_legacy_dev_iowrite8,
63 TP_PROTO(struct iwl_priv *priv, u32 offs, u8 val),
64 TP_ARGS(priv, offs, val),
65 TP_STRUCT__entry(
66 PRIV_ENTRY
67 __field(u32, offs)
68 __field(u8, val)
69 ),
70 TP_fast_assign(
71 PRIV_ASSIGN;
72 __entry->offs = offs;
73 __entry->val = val;
74 ),
75 TP_printk("[%p] write io[%#x] = %#x)", __entry->priv,
76 __entry->offs, __entry->val)
77);
78
79TRACE_EVENT(iwlwifi_legacy_dev_iowrite32,
80 TP_PROTO(struct iwl_priv *priv, u32 offs, u32 val),
81 TP_ARGS(priv, offs, val),
82 TP_STRUCT__entry(
83 PRIV_ENTRY
84 __field(u32, offs)
85 __field(u32, val)
86 ),
87 TP_fast_assign(
88 PRIV_ASSIGN;
89 __entry->offs = offs;
90 __entry->val = val;
91 ),
92 TP_printk("[%p] write io[%#x] = %#x)", __entry->priv,
93 __entry->offs, __entry->val)
94);
95
96#undef TRACE_SYSTEM
97#define TRACE_SYSTEM iwlwifi_legacy_ucode
98
99TRACE_EVENT(iwlwifi_legacy_dev_ucode_cont_event,
100 TP_PROTO(struct iwl_priv *priv, u32 time, u32 data, u32 ev),
101 TP_ARGS(priv, time, data, ev),
102 TP_STRUCT__entry(
103 PRIV_ENTRY
104
105 __field(u32, time)
106 __field(u32, data)
107 __field(u32, ev)
108 ),
109 TP_fast_assign(
110 PRIV_ASSIGN;
111 __entry->time = time;
112 __entry->data = data;
113 __entry->ev = ev;
114 ),
115 TP_printk("[%p] EVT_LOGT:%010u:0x%08x:%04u",
116 __entry->priv, __entry->time, __entry->data, __entry->ev)
117);
118
119TRACE_EVENT(iwlwifi_legacy_dev_ucode_wrap_event,
120 TP_PROTO(struct iwl_priv *priv, u32 wraps, u32 n_entry, u32 p_entry),
121 TP_ARGS(priv, wraps, n_entry, p_entry),
122 TP_STRUCT__entry(
123 PRIV_ENTRY
124
125 __field(u32, wraps)
126 __field(u32, n_entry)
127 __field(u32, p_entry)
128 ),
129 TP_fast_assign(
130 PRIV_ASSIGN;
131 __entry->wraps = wraps;
132 __entry->n_entry = n_entry;
133 __entry->p_entry = p_entry;
134 ),
135 TP_printk("[%p] wraps=#%02d n=0x%X p=0x%X",
136 __entry->priv, __entry->wraps, __entry->n_entry,
137 __entry->p_entry)
138);
139
140#undef TRACE_SYSTEM
141#define TRACE_SYSTEM iwlwifi
142
143TRACE_EVENT(iwlwifi_legacy_dev_hcmd,
144 TP_PROTO(struct iwl_priv *priv, void *hcmd, size_t len, u32 flags),
145 TP_ARGS(priv, hcmd, len, flags),
146 TP_STRUCT__entry(
147 PRIV_ENTRY
148 __dynamic_array(u8, hcmd, len)
149 __field(u32, flags)
150 ),
151 TP_fast_assign(
152 PRIV_ASSIGN;
153 memcpy(__get_dynamic_array(hcmd), hcmd, len);
154 __entry->flags = flags;
155 ),
156 TP_printk("[%p] hcmd %#.2x (%ssync)",
157 __entry->priv, ((u8 *)__get_dynamic_array(hcmd))[0],
158 __entry->flags & CMD_ASYNC ? "a" : "")
159);
160
161TRACE_EVENT(iwlwifi_legacy_dev_rx,
162 TP_PROTO(struct iwl_priv *priv, void *rxbuf, size_t len),
163 TP_ARGS(priv, rxbuf, len),
164 TP_STRUCT__entry(
165 PRIV_ENTRY
166 __dynamic_array(u8, rxbuf, len)
167 ),
168 TP_fast_assign(
169 PRIV_ASSIGN;
170 memcpy(__get_dynamic_array(rxbuf), rxbuf, len);
171 ),
172 TP_printk("[%p] RX cmd %#.2x",
173 __entry->priv, ((u8 *)__get_dynamic_array(rxbuf))[4])
174);
175
176TRACE_EVENT(iwlwifi_legacy_dev_tx,
177 TP_PROTO(struct iwl_priv *priv, void *tfd, size_t tfdlen,
178 void *buf0, size_t buf0_len,
179 void *buf1, size_t buf1_len),
180 TP_ARGS(priv, tfd, tfdlen, buf0, buf0_len, buf1, buf1_len),
181 TP_STRUCT__entry(
182 PRIV_ENTRY
183
184 __field(size_t, framelen)
185 __dynamic_array(u8, tfd, tfdlen)
186
187 /*
188 * Do not insert between or below these items,
189 * we want to keep the frame together (except
190 * for the possible padding).
191 */
192 __dynamic_array(u8, buf0, buf0_len)
193 __dynamic_array(u8, buf1, buf1_len)
194 ),
195 TP_fast_assign(
196 PRIV_ASSIGN;
197 __entry->framelen = buf0_len + buf1_len;
198 memcpy(__get_dynamic_array(tfd), tfd, tfdlen);
199 memcpy(__get_dynamic_array(buf0), buf0, buf0_len);
200 memcpy(__get_dynamic_array(buf1), buf1, buf1_len);
201 ),
202 TP_printk("[%p] TX %.2x (%zu bytes)",
203 __entry->priv,
204 ((u8 *)__get_dynamic_array(buf0))[0],
205 __entry->framelen)
206);
207
208TRACE_EVENT(iwlwifi_legacy_dev_ucode_error,
209 TP_PROTO(struct iwl_priv *priv, u32 desc, u32 time,
210 u32 data1, u32 data2, u32 line, u32 blink1,
211 u32 blink2, u32 ilink1, u32 ilink2),
212 TP_ARGS(priv, desc, time, data1, data2, line,
213 blink1, blink2, ilink1, ilink2),
214 TP_STRUCT__entry(
215 PRIV_ENTRY
216 __field(u32, desc)
217 __field(u32, time)
218 __field(u32, data1)
219 __field(u32, data2)
220 __field(u32, line)
221 __field(u32, blink1)
222 __field(u32, blink2)
223 __field(u32, ilink1)
224 __field(u32, ilink2)
225 ),
226 TP_fast_assign(
227 PRIV_ASSIGN;
228 __entry->desc = desc;
229 __entry->time = time;
230 __entry->data1 = data1;
231 __entry->data2 = data2;
232 __entry->line = line;
233 __entry->blink1 = blink1;
234 __entry->blink2 = blink2;
235 __entry->ilink1 = ilink1;
236 __entry->ilink2 = ilink2;
237 ),
238 TP_printk("[%p] #%02d %010u data 0x%08X 0x%08X line %u, "
239 "blink 0x%05X 0x%05X ilink 0x%05X 0x%05X",
240 __entry->priv, __entry->desc, __entry->time, __entry->data1,
241 __entry->data2, __entry->line, __entry->blink1,
242 __entry->blink2, __entry->ilink1, __entry->ilink2)
243);
244
245TRACE_EVENT(iwlwifi_legacy_dev_ucode_event,
246 TP_PROTO(struct iwl_priv *priv, u32 time, u32 data, u32 ev),
247 TP_ARGS(priv, time, data, ev),
248 TP_STRUCT__entry(
249 PRIV_ENTRY
250
251 __field(u32, time)
252 __field(u32, data)
253 __field(u32, ev)
254 ),
255 TP_fast_assign(
256 PRIV_ASSIGN;
257 __entry->time = time;
258 __entry->data = data;
259 __entry->ev = ev;
260 ),
261 TP_printk("[%p] EVT_LOGT:%010u:0x%08x:%04u",
262 __entry->priv, __entry->time, __entry->data, __entry->ev)
263);
264#endif /* __IWLWIFI_DEVICE_TRACE */
265
266#undef TRACE_INCLUDE_PATH
267#define TRACE_INCLUDE_PATH .
268#undef TRACE_INCLUDE_FILE
269#define TRACE_INCLUDE_FILE iwl-devtrace
270#include <trace/define_trace.h>
diff --git a/drivers/net/wireless/iwlegacy/iwl-eeprom.c b/drivers/net/wireless/iwlegacy/iwl-eeprom.c
new file mode 100644
index 000000000000..39e577323942
--- /dev/null
+++ b/drivers/net/wireless/iwlegacy/iwl-eeprom.c
@@ -0,0 +1,561 @@
1/******************************************************************************
2 *
3 * This file is provided under a dual BSD/GPLv2 license. When using or
4 * redistributing this file, you may do so under either license.
5 *
6 * GPL LICENSE SUMMARY
7 *
8 * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of version 2 of the GNU General Public License as
12 * published by the Free Software Foundation.
13 *
14 * This program is distributed in the hope that it will be useful, but
15 * WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
22 * USA
23 *
24 * The full GNU General Public License is included in this distribution
25 * in the file called LICENSE.GPL.
26 *
27 * Contact Information:
28 * Intel Linux Wireless <ilw@linux.intel.com>
29 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
30 *
31 * BSD LICENSE
32 *
33 * Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved.
34 * All rights reserved.
35 *
36 * Redistribution and use in source and binary forms, with or without
37 * modification, are permitted provided that the following conditions
38 * are met:
39 *
40 * * Redistributions of source code must retain the above copyright
41 * notice, this list of conditions and the following disclaimer.
42 * * Redistributions in binary form must reproduce the above copyright
43 * notice, this list of conditions and the following disclaimer in
44 * the documentation and/or other materials provided with the
45 * distribution.
46 * * Neither the name Intel Corporation nor the names of its
47 * contributors may be used to endorse or promote products derived
48 * from this software without specific prior written permission.
49 *
50 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
51 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
52 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
53 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
54 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
55 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
56 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
57 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
58 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
59 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
60 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
61 *****************************************************************************/
62
63
64#include <linux/kernel.h>
65#include <linux/module.h>
66#include <linux/slab.h>
67#include <linux/init.h>
68
69#include <net/mac80211.h>
70
71#include "iwl-commands.h"
72#include "iwl-dev.h"
73#include "iwl-core.h"
74#include "iwl-debug.h"
75#include "iwl-eeprom.h"
76#include "iwl-io.h"
77
78/************************** EEPROM BANDS ****************************
79 *
80 * The iwl_eeprom_band definitions below provide the mapping from the
81 * EEPROM contents to the specific channel number supported for each
82 * band.
83 *
84 * For example, iwl_priv->eeprom.band_3_channels[4] from the band_3
85 * definition below maps to physical channel 42 in the 5.2GHz spectrum.
86 * The specific geography and calibration information for that channel
87 * is contained in the eeprom map itself.
88 *
89 * During init, we copy the eeprom information and channel map
90 * information into priv->channel_info_24/52 and priv->channel_map_24/52
91 *
92 * channel_map_24/52 provides the index in the channel_info array for a
93 * given channel. We have to have two separate maps as there is channel
94 * overlap with the 2.4GHz and 5.2GHz spectrum as seen in band_1 and
95 * band_2
96 *
97 * A value of 0xff stored in the channel_map indicates that the channel
98 * is not supported by the hardware at all.
99 *
100 * A value of 0xfe in the channel_map indicates that the channel is not
101 * valid for Tx with the current hardware. This means that
102 * while the system can tune and receive on a given channel, it may not
103 * be able to associate or transmit any frames on that
104 * channel. There is no corresponding channel information for that
105 * entry.
106 *
107 *********************************************************************/
108
109/* 2.4 GHz */
110const u8 iwl_eeprom_band_1[14] = {
111 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14
112};
113
114/* 5.2 GHz bands */
115static const u8 iwl_eeprom_band_2[] = { /* 4915-5080MHz */
116 183, 184, 185, 187, 188, 189, 192, 196, 7, 8, 11, 12, 16
117};
118
119static const u8 iwl_eeprom_band_3[] = { /* 5170-5320MHz */
120 34, 36, 38, 40, 42, 44, 46, 48, 52, 56, 60, 64
121};
122
123static const u8 iwl_eeprom_band_4[] = { /* 5500-5700MHz */
124 100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140
125};
126
127static const u8 iwl_eeprom_band_5[] = { /* 5725-5825MHz */
128 145, 149, 153, 157, 161, 165
129};
130
131static const u8 iwl_eeprom_band_6[] = { /* 2.4 ht40 channel */
132 1, 2, 3, 4, 5, 6, 7
133};
134
135static const u8 iwl_eeprom_band_7[] = { /* 5.2 ht40 channel */
136 36, 44, 52, 60, 100, 108, 116, 124, 132, 149, 157
137};
138
139/******************************************************************************
140 *
141 * EEPROM related functions
142 *
143******************************************************************************/
144
145static int iwl_legacy_eeprom_verify_signature(struct iwl_priv *priv)
146{
147 u32 gp = iwl_read32(priv, CSR_EEPROM_GP) & CSR_EEPROM_GP_VALID_MSK;
148 int ret = 0;
149
150 IWL_DEBUG_EEPROM(priv, "EEPROM signature=0x%08x\n", gp);
151 switch (gp) {
152 case CSR_EEPROM_GP_GOOD_SIG_EEP_LESS_THAN_4K:
153 case CSR_EEPROM_GP_GOOD_SIG_EEP_MORE_THAN_4K:
154 break;
155 default:
156 IWL_ERR(priv, "bad EEPROM signature,"
157 "EEPROM_GP=0x%08x\n", gp);
158 ret = -ENOENT;
159 break;
160 }
161 return ret;
162}
163
164const u8
165*iwl_legacy_eeprom_query_addr(const struct iwl_priv *priv, size_t offset)
166{
167 BUG_ON(offset >= priv->cfg->base_params->eeprom_size);
168 return &priv->eeprom[offset];
169}
170EXPORT_SYMBOL(iwl_legacy_eeprom_query_addr);
171
172u16 iwl_legacy_eeprom_query16(const struct iwl_priv *priv, size_t offset)
173{
174 if (!priv->eeprom)
175 return 0;
176 return (u16)priv->eeprom[offset] | ((u16)priv->eeprom[offset + 1] << 8);
177}
178EXPORT_SYMBOL(iwl_legacy_eeprom_query16);
179
180/**
181 * iwl_legacy_eeprom_init - read EEPROM contents
182 *
183 * Load the EEPROM contents from adapter into priv->eeprom
184 *
185 * NOTE: This routine uses the non-debug IO access functions.
186 */
187int iwl_legacy_eeprom_init(struct iwl_priv *priv)
188{
189 __le16 *e;
190 u32 gp = iwl_read32(priv, CSR_EEPROM_GP);
191 int sz;
192 int ret;
193 u16 addr;
194
195 /* allocate eeprom */
196 sz = priv->cfg->base_params->eeprom_size;
197 IWL_DEBUG_EEPROM(priv, "NVM size = %d\n", sz);
198 priv->eeprom = kzalloc(sz, GFP_KERNEL);
199 if (!priv->eeprom) {
200 ret = -ENOMEM;
201 goto alloc_err;
202 }
203 e = (__le16 *)priv->eeprom;
204
205 priv->cfg->ops->lib->apm_ops.init(priv);
206
207 ret = iwl_legacy_eeprom_verify_signature(priv);
208 if (ret < 0) {
209 IWL_ERR(priv, "EEPROM not found, EEPROM_GP=0x%08x\n", gp);
210 ret = -ENOENT;
211 goto err;
212 }
213
214 /* Make sure driver (instead of uCode) is allowed to read EEPROM */
215 ret = priv->cfg->ops->lib->eeprom_ops.acquire_semaphore(priv);
216 if (ret < 0) {
217 IWL_ERR(priv, "Failed to acquire EEPROM semaphore.\n");
218 ret = -ENOENT;
219 goto err;
220 }
221
222 /* eeprom is an array of 16bit values */
223 for (addr = 0; addr < sz; addr += sizeof(u16)) {
224 u32 r;
225
226 _iwl_legacy_write32(priv, CSR_EEPROM_REG,
227 CSR_EEPROM_REG_MSK_ADDR & (addr << 1));
228
229 ret = iwl_poll_bit(priv, CSR_EEPROM_REG,
230 CSR_EEPROM_REG_READ_VALID_MSK,
231 CSR_EEPROM_REG_READ_VALID_MSK,
232 IWL_EEPROM_ACCESS_TIMEOUT);
233 if (ret < 0) {
234 IWL_ERR(priv, "Time out reading EEPROM[%d]\n",
235 addr);
236 goto done;
237 }
238 r = _iwl_legacy_read_direct32(priv, CSR_EEPROM_REG);
239 e[addr / 2] = cpu_to_le16(r >> 16);
240 }
241
242 IWL_DEBUG_EEPROM(priv, "NVM Type: %s, version: 0x%x\n",
243 "EEPROM",
244 iwl_legacy_eeprom_query16(priv, EEPROM_VERSION));
245
246 ret = 0;
247done:
248 priv->cfg->ops->lib->eeprom_ops.release_semaphore(priv);
249
250err:
251 if (ret)
252 iwl_legacy_eeprom_free(priv);
253 /* Reset chip to save power until we load uCode during "up". */
254 iwl_legacy_apm_stop(priv);
255alloc_err:
256 return ret;
257}
258EXPORT_SYMBOL(iwl_legacy_eeprom_init);
259
260void iwl_legacy_eeprom_free(struct iwl_priv *priv)
261{
262 kfree(priv->eeprom);
263 priv->eeprom = NULL;
264}
265EXPORT_SYMBOL(iwl_legacy_eeprom_free);
266
267static void iwl_legacy_init_band_reference(const struct iwl_priv *priv,
268 int eep_band, int *eeprom_ch_count,
269 const struct iwl_eeprom_channel **eeprom_ch_info,
270 const u8 **eeprom_ch_index)
271{
272 u32 offset = priv->cfg->ops->lib->
273 eeprom_ops.regulatory_bands[eep_band - 1];
274 switch (eep_band) {
275 case 1: /* 2.4GHz band */
276 *eeprom_ch_count = ARRAY_SIZE(iwl_eeprom_band_1);
277 *eeprom_ch_info = (struct iwl_eeprom_channel *)
278 iwl_legacy_eeprom_query_addr(priv, offset);
279 *eeprom_ch_index = iwl_eeprom_band_1;
280 break;
281 case 2: /* 4.9GHz band */
282 *eeprom_ch_count = ARRAY_SIZE(iwl_eeprom_band_2);
283 *eeprom_ch_info = (struct iwl_eeprom_channel *)
284 iwl_legacy_eeprom_query_addr(priv, offset);
285 *eeprom_ch_index = iwl_eeprom_band_2;
286 break;
287 case 3: /* 5.2GHz band */
288 *eeprom_ch_count = ARRAY_SIZE(iwl_eeprom_band_3);
289 *eeprom_ch_info = (struct iwl_eeprom_channel *)
290 iwl_legacy_eeprom_query_addr(priv, offset);
291 *eeprom_ch_index = iwl_eeprom_band_3;
292 break;
293 case 4: /* 5.5GHz band */
294 *eeprom_ch_count = ARRAY_SIZE(iwl_eeprom_band_4);
295 *eeprom_ch_info = (struct iwl_eeprom_channel *)
296 iwl_legacy_eeprom_query_addr(priv, offset);
297 *eeprom_ch_index = iwl_eeprom_band_4;
298 break;
299 case 5: /* 5.7GHz band */
300 *eeprom_ch_count = ARRAY_SIZE(iwl_eeprom_band_5);
301 *eeprom_ch_info = (struct iwl_eeprom_channel *)
302 iwl_legacy_eeprom_query_addr(priv, offset);
303 *eeprom_ch_index = iwl_eeprom_band_5;
304 break;
305 case 6: /* 2.4GHz ht40 channels */
306 *eeprom_ch_count = ARRAY_SIZE(iwl_eeprom_band_6);
307 *eeprom_ch_info = (struct iwl_eeprom_channel *)
308 iwl_legacy_eeprom_query_addr(priv, offset);
309 *eeprom_ch_index = iwl_eeprom_band_6;
310 break;
311 case 7: /* 5 GHz ht40 channels */
312 *eeprom_ch_count = ARRAY_SIZE(iwl_eeprom_band_7);
313 *eeprom_ch_info = (struct iwl_eeprom_channel *)
314 iwl_legacy_eeprom_query_addr(priv, offset);
315 *eeprom_ch_index = iwl_eeprom_band_7;
316 break;
317 default:
318 BUG();
319 return;
320 }
321}
322
323#define CHECK_AND_PRINT(x) ((eeprom_ch->flags & EEPROM_CHANNEL_##x) \
324 ? # x " " : "")
325/**
326 * iwl_legacy_mod_ht40_chan_info - Copy ht40 channel info into driver's priv.
327 *
328 * Does not set up a command, or touch hardware.
329 */
330static int iwl_legacy_mod_ht40_chan_info(struct iwl_priv *priv,
331 enum ieee80211_band band, u16 channel,
332 const struct iwl_eeprom_channel *eeprom_ch,
333 u8 clear_ht40_extension_channel)
334{
335 struct iwl_channel_info *ch_info;
336
337 ch_info = (struct iwl_channel_info *)
338 iwl_legacy_get_channel_info(priv, band, channel);
339
340 if (!iwl_legacy_is_channel_valid(ch_info))
341 return -1;
342
343 IWL_DEBUG_EEPROM(priv, "HT40 Ch. %d [%sGHz] %s%s%s%s%s(0x%02x %ddBm):"
344 " Ad-Hoc %ssupported\n",
345 ch_info->channel,
346 iwl_legacy_is_channel_a_band(ch_info) ?
347 "5.2" : "2.4",
348 CHECK_AND_PRINT(IBSS),
349 CHECK_AND_PRINT(ACTIVE),
350 CHECK_AND_PRINT(RADAR),
351 CHECK_AND_PRINT(WIDE),
352 CHECK_AND_PRINT(DFS),
353 eeprom_ch->flags,
354 eeprom_ch->max_power_avg,
355 ((eeprom_ch->flags & EEPROM_CHANNEL_IBSS)
356 && !(eeprom_ch->flags & EEPROM_CHANNEL_RADAR)) ?
357 "" : "not ");
358
359 ch_info->ht40_eeprom = *eeprom_ch;
360 ch_info->ht40_max_power_avg = eeprom_ch->max_power_avg;
361 ch_info->ht40_flags = eeprom_ch->flags;
362 if (eeprom_ch->flags & EEPROM_CHANNEL_VALID)
363 ch_info->ht40_extension_channel &=
364 ~clear_ht40_extension_channel;
365
366 return 0;
367}
368
369#define CHECK_AND_PRINT_I(x) ((eeprom_ch_info[ch].flags & EEPROM_CHANNEL_##x) \
370 ? # x " " : "")
371
372/**
373 * iwl_legacy_init_channel_map - Set up driver's info for all possible channels
374 */
375int iwl_legacy_init_channel_map(struct iwl_priv *priv)
376{
377 int eeprom_ch_count = 0;
378 const u8 *eeprom_ch_index = NULL;
379 const struct iwl_eeprom_channel *eeprom_ch_info = NULL;
380 int band, ch;
381 struct iwl_channel_info *ch_info;
382
383 if (priv->channel_count) {
384 IWL_DEBUG_EEPROM(priv, "Channel map already initialized.\n");
385 return 0;
386 }
387
388 IWL_DEBUG_EEPROM(priv, "Initializing regulatory info from EEPROM\n");
389
390 priv->channel_count =
391 ARRAY_SIZE(iwl_eeprom_band_1) +
392 ARRAY_SIZE(iwl_eeprom_band_2) +
393 ARRAY_SIZE(iwl_eeprom_band_3) +
394 ARRAY_SIZE(iwl_eeprom_band_4) +
395 ARRAY_SIZE(iwl_eeprom_band_5);
396
397 IWL_DEBUG_EEPROM(priv, "Parsing data for %d channels.\n",
398 priv->channel_count);
399
400 priv->channel_info = kzalloc(sizeof(struct iwl_channel_info) *
401 priv->channel_count, GFP_KERNEL);
402 if (!priv->channel_info) {
403 IWL_ERR(priv, "Could not allocate channel_info\n");
404 priv->channel_count = 0;
405 return -ENOMEM;
406 }
407
408 ch_info = priv->channel_info;
409
410 /* Loop through the 5 EEPROM bands adding them in order to the
411 * channel map we maintain (that contains additional information than
412 * what just in the EEPROM) */
413 for (band = 1; band <= 5; band++) {
414
415 iwl_legacy_init_band_reference(priv, band, &eeprom_ch_count,
416 &eeprom_ch_info, &eeprom_ch_index);
417
418 /* Loop through each band adding each of the channels */
419 for (ch = 0; ch < eeprom_ch_count; ch++) {
420 ch_info->channel = eeprom_ch_index[ch];
421 ch_info->band = (band == 1) ? IEEE80211_BAND_2GHZ :
422 IEEE80211_BAND_5GHZ;
423
424 /* permanently store EEPROM's channel regulatory flags
425 * and max power in channel info database. */
426 ch_info->eeprom = eeprom_ch_info[ch];
427
428 /* Copy the run-time flags so they are there even on
429 * invalid channels */
430 ch_info->flags = eeprom_ch_info[ch].flags;
431 /* First write that ht40 is not enabled, and then enable
432 * one by one */
433 ch_info->ht40_extension_channel =
434 IEEE80211_CHAN_NO_HT40;
435
436 if (!(iwl_legacy_is_channel_valid(ch_info))) {
437 IWL_DEBUG_EEPROM(priv,
438 "Ch. %d Flags %x [%sGHz] - "
439 "No traffic\n",
440 ch_info->channel,
441 ch_info->flags,
442 iwl_legacy_is_channel_a_band(ch_info) ?
443 "5.2" : "2.4");
444 ch_info++;
445 continue;
446 }
447
448 /* Initialize regulatory-based run-time data */
449 ch_info->max_power_avg = ch_info->curr_txpow =
450 eeprom_ch_info[ch].max_power_avg;
451 ch_info->scan_power = eeprom_ch_info[ch].max_power_avg;
452 ch_info->min_power = 0;
453
454 IWL_DEBUG_EEPROM(priv, "Ch. %d [%sGHz] "
455 "%s%s%s%s%s%s(0x%02x %ddBm):"
456 " Ad-Hoc %ssupported\n",
457 ch_info->channel,
458 iwl_legacy_is_channel_a_band(ch_info) ?
459 "5.2" : "2.4",
460 CHECK_AND_PRINT_I(VALID),
461 CHECK_AND_PRINT_I(IBSS),
462 CHECK_AND_PRINT_I(ACTIVE),
463 CHECK_AND_PRINT_I(RADAR),
464 CHECK_AND_PRINT_I(WIDE),
465 CHECK_AND_PRINT_I(DFS),
466 eeprom_ch_info[ch].flags,
467 eeprom_ch_info[ch].max_power_avg,
468 ((eeprom_ch_info[ch].
469 flags & EEPROM_CHANNEL_IBSS)
470 && !(eeprom_ch_info[ch].
471 flags & EEPROM_CHANNEL_RADAR))
472 ? "" : "not ");
473
474 /* Set the tx_power_user_lmt to the highest power
475 * supported by any channel */
476 if (eeprom_ch_info[ch].max_power_avg >
477 priv->tx_power_user_lmt)
478 priv->tx_power_user_lmt =
479 eeprom_ch_info[ch].max_power_avg;
480
481 ch_info++;
482 }
483 }
484
485 /* Check if we do have HT40 channels */
486 if (priv->cfg->ops->lib->eeprom_ops.regulatory_bands[5] ==
487 EEPROM_REGULATORY_BAND_NO_HT40 &&
488 priv->cfg->ops->lib->eeprom_ops.regulatory_bands[6] ==
489 EEPROM_REGULATORY_BAND_NO_HT40)
490 return 0;
491
492 /* Two additional EEPROM bands for 2.4 and 5 GHz HT40 channels */
493 for (band = 6; band <= 7; band++) {
494 enum ieee80211_band ieeeband;
495
496 iwl_legacy_init_band_reference(priv, band, &eeprom_ch_count,
497 &eeprom_ch_info, &eeprom_ch_index);
498
499 /* EEPROM band 6 is 2.4, band 7 is 5 GHz */
500 ieeeband =
501 (band == 6) ? IEEE80211_BAND_2GHZ : IEEE80211_BAND_5GHZ;
502
503 /* Loop through each band adding each of the channels */
504 for (ch = 0; ch < eeprom_ch_count; ch++) {
505 /* Set up driver's info for lower half */
506 iwl_legacy_mod_ht40_chan_info(priv, ieeeband,
507 eeprom_ch_index[ch],
508 &eeprom_ch_info[ch],
509 IEEE80211_CHAN_NO_HT40PLUS);
510
511 /* Set up driver's info for upper half */
512 iwl_legacy_mod_ht40_chan_info(priv, ieeeband,
513 eeprom_ch_index[ch] + 4,
514 &eeprom_ch_info[ch],
515 IEEE80211_CHAN_NO_HT40MINUS);
516 }
517 }
518
519 return 0;
520}
521EXPORT_SYMBOL(iwl_legacy_init_channel_map);
522
523/*
524 * iwl_legacy_free_channel_map - undo allocations in iwl_legacy_init_channel_map
525 */
526void iwl_legacy_free_channel_map(struct iwl_priv *priv)
527{
528 kfree(priv->channel_info);
529 priv->channel_count = 0;
530}
531EXPORT_SYMBOL(iwl_legacy_free_channel_map);
532
533/**
534 * iwl_legacy_get_channel_info - Find driver's private channel info
535 *
536 * Based on band and channel number.
537 */
538const struct
539iwl_channel_info *iwl_legacy_get_channel_info(const struct iwl_priv *priv,
540 enum ieee80211_band band, u16 channel)
541{
542 int i;
543
544 switch (band) {
545 case IEEE80211_BAND_5GHZ:
546 for (i = 14; i < priv->channel_count; i++) {
547 if (priv->channel_info[i].channel == channel)
548 return &priv->channel_info[i];
549 }
550 break;
551 case IEEE80211_BAND_2GHZ:
552 if (channel >= 1 && channel <= 14)
553 return &priv->channel_info[channel - 1];
554 break;
555 default:
556 BUG();
557 }
558
559 return NULL;
560}
561EXPORT_SYMBOL(iwl_legacy_get_channel_info);
diff --git a/drivers/net/wireless/iwlegacy/iwl-eeprom.h b/drivers/net/wireless/iwlegacy/iwl-eeprom.h
new file mode 100644
index 000000000000..0744f8da63b4
--- /dev/null
+++ b/drivers/net/wireless/iwlegacy/iwl-eeprom.h
@@ -0,0 +1,344 @@
1/******************************************************************************
2 *
3 * This file is provided under a dual BSD/GPLv2 license. When using or
4 * redistributing this file, you may do so under either license.
5 *
6 * GPL LICENSE SUMMARY
7 *
8 * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of version 2 of the GNU General Public License as
12 * published by the Free Software Foundation.
13 *
14 * This program is distributed in the hope that it will be useful, but
15 * WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
22 * USA
23 *
24 * The full GNU General Public License is included in this distribution
25 * in the file called LICENSE.GPL.
26 *
27 * Contact Information:
28 * Intel Linux Wireless <ilw@linux.intel.com>
29 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
30 *
31 * BSD LICENSE
32 *
33 * Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved.
34 * All rights reserved.
35 *
36 * Redistribution and use in source and binary forms, with or without
37 * modification, are permitted provided that the following conditions
38 * are met:
39 *
40 * * Redistributions of source code must retain the above copyright
41 * notice, this list of conditions and the following disclaimer.
42 * * Redistributions in binary form must reproduce the above copyright
43 * notice, this list of conditions and the following disclaimer in
44 * the documentation and/or other materials provided with the
45 * distribution.
46 * * Neither the name Intel Corporation nor the names of its
47 * contributors may be used to endorse or promote products derived
48 * from this software without specific prior written permission.
49 *
50 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
51 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
52 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
53 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
54 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
55 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
56 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
57 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
58 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
59 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
60 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
61 *****************************************************************************/
62
63#ifndef __iwl_legacy_eeprom_h__
64#define __iwl_legacy_eeprom_h__
65
66#include <net/mac80211.h>
67
68struct iwl_priv;
69
70/*
71 * EEPROM access time values:
72 *
73 * Driver initiates EEPROM read by writing byte address << 1 to CSR_EEPROM_REG.
74 * Driver then polls CSR_EEPROM_REG for CSR_EEPROM_REG_READ_VALID_MSK (0x1).
75 * When polling, wait 10 uSec between polling loops, up to a maximum 5000 uSec.
76 * Driver reads 16-bit value from bits 31-16 of CSR_EEPROM_REG.
77 */
78#define IWL_EEPROM_ACCESS_TIMEOUT 5000 /* uSec */
79
80#define IWL_EEPROM_SEM_TIMEOUT 10 /* microseconds */
81#define IWL_EEPROM_SEM_RETRY_LIMIT 1000 /* number of attempts (not time) */
82
83
84/*
85 * Regulatory channel usage flags in EEPROM struct iwl4965_eeprom_channel.flags.
86 *
87 * IBSS and/or AP operation is allowed *only* on those channels with
88 * (VALID && IBSS && ACTIVE && !RADAR). This restriction is in place because
89 * RADAR detection is not supported by the 4965 driver, but is a
90 * requirement for establishing a new network for legal operation on channels
91 * requiring RADAR detection or restricting ACTIVE scanning.
92 *
93 * NOTE: "WIDE" flag does not indicate anything about "HT40" 40 MHz channels.
94 * It only indicates that 20 MHz channel use is supported; HT40 channel
95 * usage is indicated by a separate set of regulatory flags for each
96 * HT40 channel pair.
97 *
98 * NOTE: Using a channel inappropriately will result in a uCode error!
99 */
100#define IWL_NUM_TX_CALIB_GROUPS 5
101enum {
102 EEPROM_CHANNEL_VALID = (1 << 0), /* usable for this SKU/geo */
103 EEPROM_CHANNEL_IBSS = (1 << 1), /* usable as an IBSS channel */
104 /* Bit 2 Reserved */
105 EEPROM_CHANNEL_ACTIVE = (1 << 3), /* active scanning allowed */
106 EEPROM_CHANNEL_RADAR = (1 << 4), /* radar detection required */
107 EEPROM_CHANNEL_WIDE = (1 << 5), /* 20 MHz channel okay */
108 /* Bit 6 Reserved (was Narrow Channel) */
109 EEPROM_CHANNEL_DFS = (1 << 7), /* dynamic freq selection candidate */
110};
111
112/* SKU Capabilities */
113/* 3945 only */
114#define EEPROM_SKU_CAP_SW_RF_KILL_ENABLE (1 << 0)
115#define EEPROM_SKU_CAP_HW_RF_KILL_ENABLE (1 << 1)
116
117/* *regulatory* channel data format in eeprom, one for each channel.
118 * There are separate entries for HT40 (40 MHz) vs. normal (20 MHz) channels. */
119struct iwl_eeprom_channel {
120 u8 flags; /* EEPROM_CHANNEL_* flags copied from EEPROM */
121 s8 max_power_avg; /* max power (dBm) on this chnl, limit 31 */
122} __packed;
123
124/* 3945 Specific */
125#define EEPROM_3945_EEPROM_VERSION (0x2f)
126
127/* 4965 has two radio transmitters (and 3 radio receivers) */
128#define EEPROM_TX_POWER_TX_CHAINS (2)
129
130/* 4965 has room for up to 8 sets of txpower calibration data */
131#define EEPROM_TX_POWER_BANDS (8)
132
133/* 4965 factory calibration measures txpower gain settings for
134 * each of 3 target output levels */
135#define EEPROM_TX_POWER_MEASUREMENTS (3)
136
137/* 4965 Specific */
138/* 4965 driver does not work with txpower calibration version < 5 */
139#define EEPROM_4965_TX_POWER_VERSION (5)
140#define EEPROM_4965_EEPROM_VERSION (0x2f)
141#define EEPROM_4965_CALIB_VERSION_OFFSET (2*0xB6) /* 2 bytes */
142#define EEPROM_4965_CALIB_TXPOWER_OFFSET (2*0xE8) /* 48 bytes */
143#define EEPROM_4965_BOARD_REVISION (2*0x4F) /* 2 bytes */
144#define EEPROM_4965_BOARD_PBA (2*0x56+1) /* 9 bytes */
145
146/* 2.4 GHz */
147extern const u8 iwl_eeprom_band_1[14];
148
149/*
150 * factory calibration data for one txpower level, on one channel,
151 * measured on one of the 2 tx chains (radio transmitter and associated
152 * antenna). EEPROM contains:
153 *
154 * 1) Temperature (degrees Celsius) of device when measurement was made.
155 *
156 * 2) Gain table index used to achieve the target measurement power.
157 * This refers to the "well-known" gain tables (see iwl-4965-hw.h).
158 *
159 * 3) Actual measured output power, in half-dBm ("34" = 17 dBm).
160 *
161 * 4) RF power amplifier detector level measurement (not used).
162 */
163struct iwl_eeprom_calib_measure {
164 u8 temperature; /* Device temperature (Celsius) */
165 u8 gain_idx; /* Index into gain table */
166 u8 actual_pow; /* Measured RF output power, half-dBm */
167 s8 pa_det; /* Power amp detector level (not used) */
168} __packed;
169
170
171/*
172 * measurement set for one channel. EEPROM contains:
173 *
174 * 1) Channel number measured
175 *
176 * 2) Measurements for each of 3 power levels for each of 2 radio transmitters
177 * (a.k.a. "tx chains") (6 measurements altogether)
178 */
179struct iwl_eeprom_calib_ch_info {
180 u8 ch_num;
181 struct iwl_eeprom_calib_measure
182 measurements[EEPROM_TX_POWER_TX_CHAINS]
183 [EEPROM_TX_POWER_MEASUREMENTS];
184} __packed;
185
186/*
187 * txpower subband info.
188 *
189 * For each frequency subband, EEPROM contains the following:
190 *
191 * 1) First and last channels within range of the subband. "0" values
192 * indicate that this sample set is not being used.
193 *
194 * 2) Sample measurement sets for 2 channels close to the range endpoints.
195 */
196struct iwl_eeprom_calib_subband_info {
197 u8 ch_from; /* channel number of lowest channel in subband */
198 u8 ch_to; /* channel number of highest channel in subband */
199 struct iwl_eeprom_calib_ch_info ch1;
200 struct iwl_eeprom_calib_ch_info ch2;
201} __packed;
202
203
204/*
205 * txpower calibration info. EEPROM contains:
206 *
207 * 1) Factory-measured saturation power levels (maximum levels at which
208 * tx power amplifier can output a signal without too much distortion).
209 * There is one level for 2.4 GHz band and one for 5 GHz band. These
210 * values apply to all channels within each of the bands.
211 *
212 * 2) Factory-measured power supply voltage level. This is assumed to be
213 * constant (i.e. same value applies to all channels/bands) while the
214 * factory measurements are being made.
215 *
216 * 3) Up to 8 sets of factory-measured txpower calibration values.
217 * These are for different frequency ranges, since txpower gain
218 * characteristics of the analog radio circuitry vary with frequency.
219 *
220 * Not all sets need to be filled with data;
221 * struct iwl_eeprom_calib_subband_info contains range of channels
222 * (0 if unused) for each set of data.
223 */
224struct iwl_eeprom_calib_info {
225 u8 saturation_power24; /* half-dBm (e.g. "34" = 17 dBm) */
226 u8 saturation_power52; /* half-dBm */
227 __le16 voltage; /* signed */
228 struct iwl_eeprom_calib_subband_info
229 band_info[EEPROM_TX_POWER_BANDS];
230} __packed;
231
232
233/* General */
234#define EEPROM_DEVICE_ID (2*0x08) /* 2 bytes */
235#define EEPROM_MAC_ADDRESS (2*0x15) /* 6 bytes */
236#define EEPROM_BOARD_REVISION (2*0x35) /* 2 bytes */
237#define EEPROM_BOARD_PBA_NUMBER (2*0x3B+1) /* 9 bytes */
238#define EEPROM_VERSION (2*0x44) /* 2 bytes */
239#define EEPROM_SKU_CAP (2*0x45) /* 2 bytes */
240#define EEPROM_OEM_MODE (2*0x46) /* 2 bytes */
241#define EEPROM_WOWLAN_MODE (2*0x47) /* 2 bytes */
242#define EEPROM_RADIO_CONFIG (2*0x48) /* 2 bytes */
243#define EEPROM_NUM_MAC_ADDRESS (2*0x4C) /* 2 bytes */
244
245/* The following masks are to be applied on EEPROM_RADIO_CONFIG */
246#define EEPROM_RF_CFG_TYPE_MSK(x) (x & 0x3) /* bits 0-1 */
247#define EEPROM_RF_CFG_STEP_MSK(x) ((x >> 2) & 0x3) /* bits 2-3 */
248#define EEPROM_RF_CFG_DASH_MSK(x) ((x >> 4) & 0x3) /* bits 4-5 */
249#define EEPROM_RF_CFG_PNUM_MSK(x) ((x >> 6) & 0x3) /* bits 6-7 */
250#define EEPROM_RF_CFG_TX_ANT_MSK(x) ((x >> 8) & 0xF) /* bits 8-11 */
251#define EEPROM_RF_CFG_RX_ANT_MSK(x) ((x >> 12) & 0xF) /* bits 12-15 */
252
253#define EEPROM_3945_RF_CFG_TYPE_MAX 0x0
254#define EEPROM_4965_RF_CFG_TYPE_MAX 0x1
255
256/*
257 * Per-channel regulatory data.
258 *
259 * Each channel that *might* be supported by iwl has a fixed location
260 * in EEPROM containing EEPROM_CHANNEL_* usage flags (LSB) and max regulatory
261 * txpower (MSB).
262 *
263 * Entries immediately below are for 20 MHz channel width. HT40 (40 MHz)
264 * channels (only for 4965, not supported by 3945) appear later in the EEPROM.
265 *
266 * 2.4 GHz channels 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14
267 */
268#define EEPROM_REGULATORY_SKU_ID (2*0x60) /* 4 bytes */
269#define EEPROM_REGULATORY_BAND_1 (2*0x62) /* 2 bytes */
270#define EEPROM_REGULATORY_BAND_1_CHANNELS (2*0x63) /* 28 bytes */
271
272/*
273 * 4.9 GHz channels 183, 184, 185, 187, 188, 189, 192, 196,
274 * 5.0 GHz channels 7, 8, 11, 12, 16
275 * (4915-5080MHz) (none of these is ever supported)
276 */
277#define EEPROM_REGULATORY_BAND_2 (2*0x71) /* 2 bytes */
278#define EEPROM_REGULATORY_BAND_2_CHANNELS (2*0x72) /* 26 bytes */
279
280/*
281 * 5.2 GHz channels 34, 36, 38, 40, 42, 44, 46, 48, 52, 56, 60, 64
282 * (5170-5320MHz)
283 */
284#define EEPROM_REGULATORY_BAND_3 (2*0x7F) /* 2 bytes */
285#define EEPROM_REGULATORY_BAND_3_CHANNELS (2*0x80) /* 24 bytes */
286
287/*
288 * 5.5 GHz channels 100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140
289 * (5500-5700MHz)
290 */
291#define EEPROM_REGULATORY_BAND_4 (2*0x8C) /* 2 bytes */
292#define EEPROM_REGULATORY_BAND_4_CHANNELS (2*0x8D) /* 22 bytes */
293
294/*
295 * 5.7 GHz channels 145, 149, 153, 157, 161, 165
296 * (5725-5825MHz)
297 */
298#define EEPROM_REGULATORY_BAND_5 (2*0x98) /* 2 bytes */
299#define EEPROM_REGULATORY_BAND_5_CHANNELS (2*0x99) /* 12 bytes */
300
301/*
302 * 2.4 GHz HT40 channels 1 (5), 2 (6), 3 (7), 4 (8), 5 (9), 6 (10), 7 (11)
303 *
304 * The channel listed is the center of the lower 20 MHz half of the channel.
305 * The overall center frequency is actually 2 channels (10 MHz) above that,
306 * and the upper half of each HT40 channel is centered 4 channels (20 MHz) away
307 * from the lower half; e.g. the upper half of HT40 channel 1 is channel 5,
308 * and the overall HT40 channel width centers on channel 3.
309 *
310 * NOTE: The RXON command uses 20 MHz channel numbers to specify the
311 * control channel to which to tune. RXON also specifies whether the
312 * control channel is the upper or lower half of a HT40 channel.
313 *
314 * NOTE: 4965 does not support HT40 channels on 2.4 GHz.
315 */
316#define EEPROM_4965_REGULATORY_BAND_24_HT40_CHANNELS (2*0xA0) /* 14 bytes */
317
318/*
319 * 5.2 GHz HT40 channels 36 (40), 44 (48), 52 (56), 60 (64),
320 * 100 (104), 108 (112), 116 (120), 124 (128), 132 (136), 149 (153), 157 (161)
321 */
322#define EEPROM_4965_REGULATORY_BAND_52_HT40_CHANNELS (2*0xA8) /* 22 bytes */
323
324#define EEPROM_REGULATORY_BAND_NO_HT40 (0)
325
326struct iwl_eeprom_ops {
327 const u32 regulatory_bands[7];
328 int (*acquire_semaphore) (struct iwl_priv *priv);
329 void (*release_semaphore) (struct iwl_priv *priv);
330};
331
332
333int iwl_legacy_eeprom_init(struct iwl_priv *priv);
334void iwl_legacy_eeprom_free(struct iwl_priv *priv);
335const u8 *iwl_legacy_eeprom_query_addr(const struct iwl_priv *priv,
336 size_t offset);
337u16 iwl_legacy_eeprom_query16(const struct iwl_priv *priv, size_t offset);
338int iwl_legacy_init_channel_map(struct iwl_priv *priv);
339void iwl_legacy_free_channel_map(struct iwl_priv *priv);
340const struct iwl_channel_info *iwl_legacy_get_channel_info(
341 const struct iwl_priv *priv,
342 enum ieee80211_band band, u16 channel);
343
344#endif /* __iwl_legacy_eeprom_h__ */
diff --git a/drivers/net/wireless/iwlegacy/iwl-fh.h b/drivers/net/wireless/iwlegacy/iwl-fh.h
new file mode 100644
index 000000000000..4e20c7e5c883
--- /dev/null
+++ b/drivers/net/wireless/iwlegacy/iwl-fh.h
@@ -0,0 +1,513 @@
1/******************************************************************************
2 *
3 * This file is provided under a dual BSD/GPLv2 license. When using or
4 * redistributing this file, you may do so under either license.
5 *
6 * GPL LICENSE SUMMARY
7 *
8 * Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved.
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of version 2 of the GNU General Public License as
12 * published by the Free Software Foundation.
13 *
14 * This program is distributed in the hope that it will be useful, but
15 * WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
22 * USA
23 *
24 * The full GNU General Public License is included in this distribution
25 * in the file called LICENSE.GPL.
26 *
27 * Contact Information:
28 * Intel Linux Wireless <ilw@linux.intel.com>
29 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
30 *
31 * BSD LICENSE
32 *
33 * Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved.
34 * All rights reserved.
35 *
36 * Redistribution and use in source and binary forms, with or without
37 * modification, are permitted provided that the following conditions
38 * are met:
39 *
40 * * Redistributions of source code must retain the above copyright
41 * notice, this list of conditions and the following disclaimer.
42 * * Redistributions in binary form must reproduce the above copyright
43 * notice, this list of conditions and the following disclaimer in
44 * the documentation and/or other materials provided with the
45 * distribution.
46 * * Neither the name Intel Corporation nor the names of its
47 * contributors may be used to endorse or promote products derived
48 * from this software without specific prior written permission.
49 *
50 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
51 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
52 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
53 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
54 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
55 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
56 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
57 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
58 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
59 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
60 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
61 *
62 *****************************************************************************/
63#ifndef __iwl_legacy_fh_h__
64#define __iwl_legacy_fh_h__
65
66/****************************/
67/* Flow Handler Definitions */
68/****************************/
69
70/**
71 * This I/O area is directly read/writable by driver (e.g. Linux uses writel())
72 * Addresses are offsets from device's PCI hardware base address.
73 */
74#define FH_MEM_LOWER_BOUND (0x1000)
75#define FH_MEM_UPPER_BOUND (0x2000)
76
77/**
78 * Keep-Warm (KW) buffer base address.
79 *
80 * Driver must allocate a 4KByte buffer that is used by 4965 for keeping the
81 * host DRAM powered on (via dummy accesses to DRAM) to maintain low-latency
82 * DRAM access when 4965 is Txing or Rxing. The dummy accesses prevent host
83 * from going into a power-savings mode that would cause higher DRAM latency,
84 * and possible data over/under-runs, before all Tx/Rx is complete.
85 *
86 * Driver loads FH_KW_MEM_ADDR_REG with the physical address (bits 35:4)
87 * of the buffer, which must be 4K aligned. Once this is set up, the 4965
88 * automatically invokes keep-warm accesses when normal accesses might not
89 * be sufficient to maintain fast DRAM response.
90 *
91 * Bit fields:
92 * 31-0: Keep-warm buffer physical base address [35:4], must be 4K aligned
93 */
94#define FH_KW_MEM_ADDR_REG (FH_MEM_LOWER_BOUND + 0x97C)
95
96
97/**
98 * TFD Circular Buffers Base (CBBC) addresses
99 *
100 * 4965 has 16 base pointer registers, one for each of 16 host-DRAM-resident
101 * circular buffers (CBs/queues) containing Transmit Frame Descriptors (TFDs)
102 * (see struct iwl_tfd_frame). These 16 pointer registers are offset by 0x04
103 * bytes from one another. Each TFD circular buffer in DRAM must be 256-byte
104 * aligned (address bits 0-7 must be 0).
105 *
106 * Bit fields in each pointer register:
107 * 27-0: TFD CB physical base address [35:8], must be 256-byte aligned
108 */
109#define FH_MEM_CBBC_LOWER_BOUND (FH_MEM_LOWER_BOUND + 0x9D0)
110#define FH_MEM_CBBC_UPPER_BOUND (FH_MEM_LOWER_BOUND + 0xA10)
111
112/* Find TFD CB base pointer for given queue (range 0-15). */
113#define FH_MEM_CBBC_QUEUE(x) (FH_MEM_CBBC_LOWER_BOUND + (x) * 0x4)
114
115
116/**
117 * Rx SRAM Control and Status Registers (RSCSR)
118 *
119 * These registers provide handshake between driver and 4965 for the Rx queue
120 * (this queue handles *all* command responses, notifications, Rx data, etc.
121 * sent from 4965 uCode to host driver). Unlike Tx, there is only one Rx
122 * queue, and only one Rx DMA/FIFO channel. Also unlike Tx, which can
123 * concatenate up to 20 DRAM buffers to form a Tx frame, each Receive Buffer
124 * Descriptor (RBD) points to only one Rx Buffer (RB); there is a 1:1
125 * mapping between RBDs and RBs.
126 *
127 * Driver must allocate host DRAM memory for the following, and set the
128 * physical address of each into 4965 registers:
129 *
130 * 1) Receive Buffer Descriptor (RBD) circular buffer (CB), typically with 256
131 * entries (although any power of 2, up to 4096, is selectable by driver).
132 * Each entry (1 dword) points to a receive buffer (RB) of consistent size
133 * (typically 4K, although 8K or 16K are also selectable by driver).
134 * Driver sets up RB size and number of RBDs in the CB via Rx config
135 * register FH_MEM_RCSR_CHNL0_CONFIG_REG.
136 *
137 * Bit fields within one RBD:
138 * 27-0: Receive Buffer physical address bits [35:8], 256-byte aligned
139 *
140 * Driver sets physical address [35:8] of base of RBD circular buffer
141 * into FH_RSCSR_CHNL0_RBDCB_BASE_REG [27:0].
142 *
143 * 2) Rx status buffer, 8 bytes, in which 4965 indicates which Rx Buffers
144 * (RBs) have been filled, via a "write pointer", actually the index of
145 * the RB's corresponding RBD within the circular buffer. Driver sets
146 * physical address [35:4] into FH_RSCSR_CHNL0_STTS_WPTR_REG [31:0].
147 *
148 * Bit fields in lower dword of Rx status buffer (upper dword not used
149 * by driver; see struct iwl4965_shared, val0):
150 * 31-12: Not used by driver
151 * 11- 0: Index of last filled Rx buffer descriptor
152 * (4965 writes, driver reads this value)
153 *
154 * As the driver prepares Receive Buffers (RBs) for 4965 to fill, driver must
155 * enter pointers to these RBs into contiguous RBD circular buffer entries,
156 * and update the 4965's "write" index register,
157 * FH_RSCSR_CHNL0_RBDCB_WPTR_REG.
158 *
159 * This "write" index corresponds to the *next* RBD that the driver will make
160 * available, i.e. one RBD past the tail of the ready-to-fill RBDs within
161 * the circular buffer. This value should initially be 0 (before preparing any
162 * RBs), should be 8 after preparing the first 8 RBs (for example), and must
163 * wrap back to 0 at the end of the circular buffer (but don't wrap before
164 * "read" index has advanced past 1! See below).
165 * NOTE: 4965 EXPECTS THE WRITE INDEX TO BE INCREMENTED IN MULTIPLES OF 8.
166 *
167 * As the 4965 fills RBs (referenced from contiguous RBDs within the circular
168 * buffer), it updates the Rx status buffer in host DRAM, 2) described above,
169 * to tell the driver the index of the latest filled RBD. The driver must
170 * read this "read" index from DRAM after receiving an Rx interrupt from 4965.
171 *
172 * The driver must also internally keep track of a third index, which is the
173 * next RBD to process. When receiving an Rx interrupt, driver should process
174 * all filled but unprocessed RBs up to, but not including, the RB
175 * corresponding to the "read" index. For example, if "read" index becomes "1",
176 * driver may process the RB pointed to by RBD 0. Depending on volume of
177 * traffic, there may be many RBs to process.
178 *
179 * If read index == write index, 4965 thinks there is no room to put new data.
180 * Due to this, the maximum number of filled RBs is 255, instead of 256. To
181 * be safe, make sure that there is a gap of at least 2 RBDs between "write"
182 * and "read" indexes; that is, make sure that there are no more than 254
183 * buffers waiting to be filled.
184 */
185#define FH_MEM_RSCSR_LOWER_BOUND (FH_MEM_LOWER_BOUND + 0xBC0)
186#define FH_MEM_RSCSR_UPPER_BOUND (FH_MEM_LOWER_BOUND + 0xC00)
187#define FH_MEM_RSCSR_CHNL0 (FH_MEM_RSCSR_LOWER_BOUND)
188
189/**
190 * Physical base address of 8-byte Rx Status buffer.
191 * Bit fields:
192 * 31-0: Rx status buffer physical base address [35:4], must 16-byte aligned.
193 */
194#define FH_RSCSR_CHNL0_STTS_WPTR_REG (FH_MEM_RSCSR_CHNL0)
195
196/**
197 * Physical base address of Rx Buffer Descriptor Circular Buffer.
198 * Bit fields:
199 * 27-0: RBD CD physical base address [35:8], must be 256-byte aligned.
200 */
201#define FH_RSCSR_CHNL0_RBDCB_BASE_REG (FH_MEM_RSCSR_CHNL0 + 0x004)
202
203/**
204 * Rx write pointer (index, really!).
205 * Bit fields:
206 * 11-0: Index of driver's most recent prepared-to-be-filled RBD, + 1.
207 * NOTE: For 256-entry circular buffer, use only bits [7:0].
208 */
209#define FH_RSCSR_CHNL0_RBDCB_WPTR_REG (FH_MEM_RSCSR_CHNL0 + 0x008)
210#define FH_RSCSR_CHNL0_WPTR (FH_RSCSR_CHNL0_RBDCB_WPTR_REG)
211
212
213/**
214 * Rx Config/Status Registers (RCSR)
215 * Rx Config Reg for channel 0 (only channel used)
216 *
217 * Driver must initialize FH_MEM_RCSR_CHNL0_CONFIG_REG as follows for
218 * normal operation (see bit fields).
219 *
220 * Clearing FH_MEM_RCSR_CHNL0_CONFIG_REG to 0 turns off Rx DMA.
221 * Driver should poll FH_MEM_RSSR_RX_STATUS_REG for
222 * FH_RSSR_CHNL0_RX_STATUS_CHNL_IDLE (bit 24) before continuing.
223 *
224 * Bit fields:
225 * 31-30: Rx DMA channel enable: '00' off/pause, '01' pause at end of frame,
226 * '10' operate normally
227 * 29-24: reserved
228 * 23-20: # RBDs in circular buffer = 2^value; use "8" for 256 RBDs (normal),
229 * min "5" for 32 RBDs, max "12" for 4096 RBDs.
230 * 19-18: reserved
231 * 17-16: size of each receive buffer; '00' 4K (normal), '01' 8K,
232 * '10' 12K, '11' 16K.
233 * 15-14: reserved
234 * 13-12: IRQ destination; '00' none, '01' host driver (normal operation)
235 * 11- 4: timeout for closing Rx buffer and interrupting host (units 32 usec)
236 * typical value 0x10 (about 1/2 msec)
237 * 3- 0: reserved
238 */
239#define FH_MEM_RCSR_LOWER_BOUND (FH_MEM_LOWER_BOUND + 0xC00)
240#define FH_MEM_RCSR_UPPER_BOUND (FH_MEM_LOWER_BOUND + 0xCC0)
241#define FH_MEM_RCSR_CHNL0 (FH_MEM_RCSR_LOWER_BOUND)
242
243#define FH_MEM_RCSR_CHNL0_CONFIG_REG (FH_MEM_RCSR_CHNL0)
244
245#define FH_RCSR_CHNL0_RX_CONFIG_RB_TIMEOUT_MSK (0x00000FF0) /* bits 4-11 */
246#define FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_MSK (0x00001000) /* bits 12 */
247#define FH_RCSR_CHNL0_RX_CONFIG_SINGLE_FRAME_MSK (0x00008000) /* bit 15 */
248#define FH_RCSR_CHNL0_RX_CONFIG_RB_SIZE_MSK (0x00030000) /* bits 16-17 */
249#define FH_RCSR_CHNL0_RX_CONFIG_RBDBC_SIZE_MSK (0x00F00000) /* bits 20-23 */
250#define FH_RCSR_CHNL0_RX_CONFIG_DMA_CHNL_EN_MSK (0xC0000000) /* bits 30-31*/
251
252#define FH_RCSR_RX_CONFIG_RBDCB_SIZE_POS (20)
253#define FH_RCSR_RX_CONFIG_REG_IRQ_RBTH_POS (4)
254#define RX_RB_TIMEOUT (0x10)
255
256#define FH_RCSR_RX_CONFIG_CHNL_EN_PAUSE_VAL (0x00000000)
257#define FH_RCSR_RX_CONFIG_CHNL_EN_PAUSE_EOF_VAL (0x40000000)
258#define FH_RCSR_RX_CONFIG_CHNL_EN_ENABLE_VAL (0x80000000)
259
260#define FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_4K (0x00000000)
261#define FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_8K (0x00010000)
262#define FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_12K (0x00020000)
263#define FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_16K (0x00030000)
264
265#define FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY (0x00000004)
266#define FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_NO_INT_VAL (0x00000000)
267#define FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_INT_HOST_VAL (0x00001000)
268
269#define FH_RSCSR_FRAME_SIZE_MSK (0x00003FFF) /* bits 0-13 */
270
271/**
272 * Rx Shared Status Registers (RSSR)
273 *
274 * After stopping Rx DMA channel (writing 0 to
275 * FH_MEM_RCSR_CHNL0_CONFIG_REG), driver must poll
276 * FH_MEM_RSSR_RX_STATUS_REG until Rx channel is idle.
277 *
278 * Bit fields:
279 * 24: 1 = Channel 0 is idle
280 *
281 * FH_MEM_RSSR_SHARED_CTRL_REG and FH_MEM_RSSR_RX_ENABLE_ERR_IRQ2DRV
282 * contain default values that should not be altered by the driver.
283 */
284#define FH_MEM_RSSR_LOWER_BOUND (FH_MEM_LOWER_BOUND + 0xC40)
285#define FH_MEM_RSSR_UPPER_BOUND (FH_MEM_LOWER_BOUND + 0xD00)
286
287#define FH_MEM_RSSR_SHARED_CTRL_REG (FH_MEM_RSSR_LOWER_BOUND)
288#define FH_MEM_RSSR_RX_STATUS_REG (FH_MEM_RSSR_LOWER_BOUND + 0x004)
289#define FH_MEM_RSSR_RX_ENABLE_ERR_IRQ2DRV\
290 (FH_MEM_RSSR_LOWER_BOUND + 0x008)
291
292#define FH_RSSR_CHNL0_RX_STATUS_CHNL_IDLE (0x01000000)
293
294#define FH_MEM_TFDIB_REG1_ADDR_BITSHIFT 28
295
296/* TFDB Area - TFDs buffer table */
297#define FH_MEM_TFDIB_DRAM_ADDR_LSB_MSK (0xFFFFFFFF)
298#define FH_TFDIB_LOWER_BOUND (FH_MEM_LOWER_BOUND + 0x900)
299#define FH_TFDIB_UPPER_BOUND (FH_MEM_LOWER_BOUND + 0x958)
300#define FH_TFDIB_CTRL0_REG(_chnl) (FH_TFDIB_LOWER_BOUND + 0x8 * (_chnl))
301#define FH_TFDIB_CTRL1_REG(_chnl) (FH_TFDIB_LOWER_BOUND + 0x8 * (_chnl) + 0x4)
302
303/**
304 * Transmit DMA Channel Control/Status Registers (TCSR)
305 *
306 * 4965 has one configuration register for each of 8 Tx DMA/FIFO channels
307 * supported in hardware (don't confuse these with the 16 Tx queues in DRAM,
308 * which feed the DMA/FIFO channels); config regs are separated by 0x20 bytes.
309 *
310 * To use a Tx DMA channel, driver must initialize its
311 * FH_TCSR_CHNL_TX_CONFIG_REG(chnl) with:
312 *
313 * FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE |
314 * FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE_VAL
315 *
316 * All other bits should be 0.
317 *
318 * Bit fields:
319 * 31-30: Tx DMA channel enable: '00' off/pause, '01' pause at end of frame,
320 * '10' operate normally
321 * 29- 4: Reserved, set to "0"
322 * 3: Enable internal DMA requests (1, normal operation), disable (0)
323 * 2- 0: Reserved, set to "0"
324 */
325#define FH_TCSR_LOWER_BOUND (FH_MEM_LOWER_BOUND + 0xD00)
326#define FH_TCSR_UPPER_BOUND (FH_MEM_LOWER_BOUND + 0xE60)
327
328/* Find Control/Status reg for given Tx DMA/FIFO channel */
329#define FH49_TCSR_CHNL_NUM (7)
330#define FH50_TCSR_CHNL_NUM (8)
331
332/* TCSR: tx_config register values */
333#define FH_TCSR_CHNL_TX_CONFIG_REG(_chnl) \
334 (FH_TCSR_LOWER_BOUND + 0x20 * (_chnl))
335#define FH_TCSR_CHNL_TX_CREDIT_REG(_chnl) \
336 (FH_TCSR_LOWER_BOUND + 0x20 * (_chnl) + 0x4)
337#define FH_TCSR_CHNL_TX_BUF_STS_REG(_chnl) \
338 (FH_TCSR_LOWER_BOUND + 0x20 * (_chnl) + 0x8)
339
340#define FH_TCSR_TX_CONFIG_REG_VAL_MSG_MODE_TXF (0x00000000)
341#define FH_TCSR_TX_CONFIG_REG_VAL_MSG_MODE_DRV (0x00000001)
342
343#define FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_DISABLE (0x00000000)
344#define FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE (0x00000008)
345
346#define FH_TCSR_TX_CONFIG_REG_VAL_CIRQ_HOST_NOINT (0x00000000)
347#define FH_TCSR_TX_CONFIG_REG_VAL_CIRQ_HOST_ENDTFD (0x00100000)
348#define FH_TCSR_TX_CONFIG_REG_VAL_CIRQ_HOST_IFTFD (0x00200000)
349
350#define FH_TCSR_TX_CONFIG_REG_VAL_CIRQ_RTC_NOINT (0x00000000)
351#define FH_TCSR_TX_CONFIG_REG_VAL_CIRQ_RTC_ENDTFD (0x00400000)
352#define FH_TCSR_TX_CONFIG_REG_VAL_CIRQ_RTC_IFTFD (0x00800000)
353
354#define FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_PAUSE (0x00000000)
355#define FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_PAUSE_EOF (0x40000000)
356#define FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE (0x80000000)
357
358#define FH_TCSR_CHNL_TX_BUF_STS_REG_VAL_TFDB_EMPTY (0x00000000)
359#define FH_TCSR_CHNL_TX_BUF_STS_REG_VAL_TFDB_WAIT (0x00002000)
360#define FH_TCSR_CHNL_TX_BUF_STS_REG_VAL_TFDB_VALID (0x00000003)
361
362#define FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_NUM (20)
363#define FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_IDX (12)
364
365/**
366 * Tx Shared Status Registers (TSSR)
367 *
368 * After stopping Tx DMA channel (writing 0 to
369 * FH_TCSR_CHNL_TX_CONFIG_REG(chnl)), driver must poll
370 * FH_TSSR_TX_STATUS_REG until selected Tx channel is idle
371 * (channel's buffers empty | no pending requests).
372 *
373 * Bit fields:
374 * 31-24: 1 = Channel buffers empty (channel 7:0)
375 * 23-16: 1 = No pending requests (channel 7:0)
376 */
377#define FH_TSSR_LOWER_BOUND (FH_MEM_LOWER_BOUND + 0xEA0)
378#define FH_TSSR_UPPER_BOUND (FH_MEM_LOWER_BOUND + 0xEC0)
379
380#define FH_TSSR_TX_STATUS_REG (FH_TSSR_LOWER_BOUND + 0x010)
381
382/**
383 * Bit fields for TSSR(Tx Shared Status & Control) error status register:
384 * 31: Indicates an address error when accessed to internal memory
385 * uCode/driver must write "1" in order to clear this flag
386 * 30: Indicates that Host did not send the expected number of dwords to FH
387 * uCode/driver must write "1" in order to clear this flag
388 * 16-9:Each status bit is for one channel. Indicates that an (Error) ActDMA
389 * command was received from the scheduler while the TRB was already full
390 * with previous command
391 * uCode/driver must write "1" in order to clear this flag
392 * 7-0: Each status bit indicates a channel's TxCredit error. When an error
393 * bit is set, it indicates that the FH has received a full indication
394 * from the RTC TxFIFO and the current value of the TxCredit counter was
395 * not equal to zero. This mean that the credit mechanism was not
396 * synchronized to the TxFIFO status
397 * uCode/driver must write "1" in order to clear this flag
398 */
399#define FH_TSSR_TX_ERROR_REG (FH_TSSR_LOWER_BOUND + 0x018)
400
401#define FH_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(_chnl) ((1 << (_chnl)) << 16)
402
403/* Tx service channels */
404#define FH_SRVC_CHNL (9)
405#define FH_SRVC_LOWER_BOUND (FH_MEM_LOWER_BOUND + 0x9C8)
406#define FH_SRVC_UPPER_BOUND (FH_MEM_LOWER_BOUND + 0x9D0)
407#define FH_SRVC_CHNL_SRAM_ADDR_REG(_chnl) \
408 (FH_SRVC_LOWER_BOUND + ((_chnl) - 9) * 0x4)
409
410#define FH_TX_CHICKEN_BITS_REG (FH_MEM_LOWER_BOUND + 0xE98)
411/* Instruct FH to increment the retry count of a packet when
412 * it is brought from the memory to TX-FIFO
413 */
414#define FH_TX_CHICKEN_BITS_SCD_AUTO_RETRY_EN (0x00000002)
415
416#define RX_QUEUE_SIZE 256
417#define RX_QUEUE_MASK 255
418#define RX_QUEUE_SIZE_LOG 8
419
420/*
421 * RX related structures and functions
422 */
423#define RX_FREE_BUFFERS 64
424#define RX_LOW_WATERMARK 8
425
426/* Size of one Rx buffer in host DRAM */
427#define IWL_RX_BUF_SIZE_3K (3 * 1000) /* 3945 only */
428#define IWL_RX_BUF_SIZE_4K (4 * 1024)
429#define IWL_RX_BUF_SIZE_8K (8 * 1024)
430
431/**
432 * struct iwl_rb_status - reseve buffer status
433 * host memory mapped FH registers
434 * @closed_rb_num [0:11] - Indicates the index of the RB which was closed
435 * @closed_fr_num [0:11] - Indicates the index of the RX Frame which was closed
436 * @finished_rb_num [0:11] - Indicates the index of the current RB
437 * in which the last frame was written to
438 * @finished_fr_num [0:11] - Indicates the index of the RX Frame
439 * which was transfered
440 */
441struct iwl_rb_status {
442 __le16 closed_rb_num;
443 __le16 closed_fr_num;
444 __le16 finished_rb_num;
445 __le16 finished_fr_nam;
446 __le32 __unused; /* 3945 only */
447} __packed;
448
449
450#define TFD_QUEUE_SIZE_MAX (256)
451#define TFD_QUEUE_SIZE_BC_DUP (64)
452#define TFD_QUEUE_BC_SIZE (TFD_QUEUE_SIZE_MAX + TFD_QUEUE_SIZE_BC_DUP)
453#define IWL_TX_DMA_MASK DMA_BIT_MASK(36)
454#define IWL_NUM_OF_TBS 20
455
456static inline u8 iwl_legacy_get_dma_hi_addr(dma_addr_t addr)
457{
458 return (sizeof(addr) > sizeof(u32) ? (addr >> 16) >> 16 : 0) & 0xF;
459}
460/**
461 * struct iwl_tfd_tb transmit buffer descriptor within transmit frame descriptor
462 *
463 * This structure contains dma address and length of transmission address
464 *
465 * @lo: low [31:0] portion of the dma address of TX buffer
466 * every even is unaligned on 16 bit boundary
467 * @hi_n_len 0-3 [35:32] portion of dma
468 * 4-15 length of the tx buffer
469 */
470struct iwl_tfd_tb {
471 __le32 lo;
472 __le16 hi_n_len;
473} __packed;
474
475/**
476 * struct iwl_tfd
477 *
478 * Transmit Frame Descriptor (TFD)
479 *
480 * @ __reserved1[3] reserved
481 * @ num_tbs 0-4 number of active tbs
482 * 5 reserved
483 * 6-7 padding (not used)
484 * @ tbs[20] transmit frame buffer descriptors
485 * @ __pad padding
486 *
487 * Each Tx queue uses a circular buffer of 256 TFDs stored in host DRAM.
488 * Both driver and device share these circular buffers, each of which must be
489 * contiguous 256 TFDs x 128 bytes-per-TFD = 32 KBytes
490 *
491 * Driver must indicate the physical address of the base of each
492 * circular buffer via the FH_MEM_CBBC_QUEUE registers.
493 *
494 * Each TFD contains pointer/size information for up to 20 data buffers
495 * in host DRAM. These buffers collectively contain the (one) frame described
496 * by the TFD. Each buffer must be a single contiguous block of memory within
497 * itself, but buffers may be scattered in host DRAM. Each buffer has max size
498 * of (4K - 4). The concatenates all of a TFD's buffers into a single
499 * Tx frame, up to 8 KBytes in size.
500 *
501 * A maximum of 255 (not 256!) TFDs may be on a queue waiting for Tx.
502 */
503struct iwl_tfd {
504 u8 __reserved1[3];
505 u8 num_tbs;
506 struct iwl_tfd_tb tbs[IWL_NUM_OF_TBS];
507 __le32 __pad;
508} __packed;
509
510/* Keep Warm Size */
511#define IWL_KW_SIZE 0x1000 /* 4k */
512
513#endif /* !__iwl_legacy_fh_h__ */
diff --git a/drivers/net/wireless/iwlegacy/iwl-hcmd.c b/drivers/net/wireless/iwlegacy/iwl-hcmd.c
new file mode 100644
index 000000000000..9d721cbda5bb
--- /dev/null
+++ b/drivers/net/wireless/iwlegacy/iwl-hcmd.c
@@ -0,0 +1,271 @@
1/******************************************************************************
2 *
3 * GPL LICENSE SUMMARY
4 *
5 * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of version 2 of the GNU General Public License as
9 * published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful, but
12 * WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
19 * USA
20 *
21 * The full GNU General Public License is included in this distribution
22 * in the file called LICENSE.GPL.
23 *
24 * Contact Information:
25 * Intel Linux Wireless <ilw@linux.intel.com>
26 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
27 *****************************************************************************/
28
29#include <linux/kernel.h>
30#include <linux/module.h>
31#include <linux/sched.h>
32#include <net/mac80211.h>
33
34#include "iwl-dev.h"
35#include "iwl-debug.h"
36#include "iwl-eeprom.h"
37#include "iwl-core.h"
38
39
40const char *iwl_legacy_get_cmd_string(u8 cmd)
41{
42 switch (cmd) {
43 IWL_CMD(REPLY_ALIVE);
44 IWL_CMD(REPLY_ERROR);
45 IWL_CMD(REPLY_RXON);
46 IWL_CMD(REPLY_RXON_ASSOC);
47 IWL_CMD(REPLY_QOS_PARAM);
48 IWL_CMD(REPLY_RXON_TIMING);
49 IWL_CMD(REPLY_ADD_STA);
50 IWL_CMD(REPLY_REMOVE_STA);
51 IWL_CMD(REPLY_WEPKEY);
52 IWL_CMD(REPLY_3945_RX);
53 IWL_CMD(REPLY_TX);
54 IWL_CMD(REPLY_RATE_SCALE);
55 IWL_CMD(REPLY_LEDS_CMD);
56 IWL_CMD(REPLY_TX_LINK_QUALITY_CMD);
57 IWL_CMD(REPLY_CHANNEL_SWITCH);
58 IWL_CMD(CHANNEL_SWITCH_NOTIFICATION);
59 IWL_CMD(REPLY_SPECTRUM_MEASUREMENT_CMD);
60 IWL_CMD(SPECTRUM_MEASURE_NOTIFICATION);
61 IWL_CMD(POWER_TABLE_CMD);
62 IWL_CMD(PM_SLEEP_NOTIFICATION);
63 IWL_CMD(PM_DEBUG_STATISTIC_NOTIFIC);
64 IWL_CMD(REPLY_SCAN_CMD);
65 IWL_CMD(REPLY_SCAN_ABORT_CMD);
66 IWL_CMD(SCAN_START_NOTIFICATION);
67 IWL_CMD(SCAN_RESULTS_NOTIFICATION);
68 IWL_CMD(SCAN_COMPLETE_NOTIFICATION);
69 IWL_CMD(BEACON_NOTIFICATION);
70 IWL_CMD(REPLY_TX_BEACON);
71 IWL_CMD(REPLY_TX_PWR_TABLE_CMD);
72 IWL_CMD(REPLY_BT_CONFIG);
73 IWL_CMD(REPLY_STATISTICS_CMD);
74 IWL_CMD(STATISTICS_NOTIFICATION);
75 IWL_CMD(CARD_STATE_NOTIFICATION);
76 IWL_CMD(MISSED_BEACONS_NOTIFICATION);
77 IWL_CMD(REPLY_CT_KILL_CONFIG_CMD);
78 IWL_CMD(SENSITIVITY_CMD);
79 IWL_CMD(REPLY_PHY_CALIBRATION_CMD);
80 IWL_CMD(REPLY_RX_PHY_CMD);
81 IWL_CMD(REPLY_RX_MPDU_CMD);
82 IWL_CMD(REPLY_RX);
83 IWL_CMD(REPLY_COMPRESSED_BA);
84 default:
85 return "UNKNOWN";
86
87 }
88}
89EXPORT_SYMBOL(iwl_legacy_get_cmd_string);
90
91#define HOST_COMPLETE_TIMEOUT (HZ / 2)
92
93static void iwl_legacy_generic_cmd_callback(struct iwl_priv *priv,
94 struct iwl_device_cmd *cmd,
95 struct iwl_rx_packet *pkt)
96{
97 if (pkt->hdr.flags & IWL_CMD_FAILED_MSK) {
98 IWL_ERR(priv, "Bad return from %s (0x%08X)\n",
99 iwl_legacy_get_cmd_string(cmd->hdr.cmd), pkt->hdr.flags);
100 return;
101 }
102
103#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
104 switch (cmd->hdr.cmd) {
105 case REPLY_TX_LINK_QUALITY_CMD:
106 case SENSITIVITY_CMD:
107 IWL_DEBUG_HC_DUMP(priv, "back from %s (0x%08X)\n",
108 iwl_legacy_get_cmd_string(cmd->hdr.cmd), pkt->hdr.flags);
109 break;
110 default:
111 IWL_DEBUG_HC(priv, "back from %s (0x%08X)\n",
112 iwl_legacy_get_cmd_string(cmd->hdr.cmd), pkt->hdr.flags);
113 }
114#endif
115}
116
117static int
118iwl_legacy_send_cmd_async(struct iwl_priv *priv, struct iwl_host_cmd *cmd)
119{
120 int ret;
121
122 BUG_ON(!(cmd->flags & CMD_ASYNC));
123
124 /* An asynchronous command can not expect an SKB to be set. */
125 BUG_ON(cmd->flags & CMD_WANT_SKB);
126
127 /* Assign a generic callback if one is not provided */
128 if (!cmd->callback)
129 cmd->callback = iwl_legacy_generic_cmd_callback;
130
131 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
132 return -EBUSY;
133
134 ret = iwl_legacy_enqueue_hcmd(priv, cmd);
135 if (ret < 0) {
136 IWL_ERR(priv, "Error sending %s: enqueue_hcmd failed: %d\n",
137 iwl_legacy_get_cmd_string(cmd->id), ret);
138 return ret;
139 }
140 return 0;
141}
142
143int iwl_legacy_send_cmd_sync(struct iwl_priv *priv, struct iwl_host_cmd *cmd)
144{
145 int cmd_idx;
146 int ret;
147
148 BUG_ON(cmd->flags & CMD_ASYNC);
149
150 /* A synchronous command can not have a callback set. */
151 BUG_ON(cmd->callback);
152
153 IWL_DEBUG_INFO(priv, "Attempting to send sync command %s\n",
154 iwl_legacy_get_cmd_string(cmd->id));
155 mutex_lock(&priv->sync_cmd_mutex);
156
157 set_bit(STATUS_HCMD_ACTIVE, &priv->status);
158 IWL_DEBUG_INFO(priv, "Setting HCMD_ACTIVE for command %s\n",
159 iwl_legacy_get_cmd_string(cmd->id));
160
161 cmd_idx = iwl_legacy_enqueue_hcmd(priv, cmd);
162 if (cmd_idx < 0) {
163 ret = cmd_idx;
164 IWL_ERR(priv, "Error sending %s: enqueue_hcmd failed: %d\n",
165 iwl_legacy_get_cmd_string(cmd->id), ret);
166 goto out;
167 }
168
169 ret = wait_event_interruptible_timeout(priv->wait_command_queue,
170 !test_bit(STATUS_HCMD_ACTIVE, &priv->status),
171 HOST_COMPLETE_TIMEOUT);
172 if (!ret) {
173 if (test_bit(STATUS_HCMD_ACTIVE, &priv->status)) {
174 IWL_ERR(priv,
175 "Error sending %s: time out after %dms.\n",
176 iwl_legacy_get_cmd_string(cmd->id),
177 jiffies_to_msecs(HOST_COMPLETE_TIMEOUT));
178
179 clear_bit(STATUS_HCMD_ACTIVE, &priv->status);
180 IWL_DEBUG_INFO(priv,
181 "Clearing HCMD_ACTIVE for command %s\n",
182 iwl_legacy_get_cmd_string(cmd->id));
183 ret = -ETIMEDOUT;
184 goto cancel;
185 }
186 }
187
188 if (test_bit(STATUS_RF_KILL_HW, &priv->status)) {
189 IWL_ERR(priv, "Command %s aborted: RF KILL Switch\n",
190 iwl_legacy_get_cmd_string(cmd->id));
191 ret = -ECANCELED;
192 goto fail;
193 }
194 if (test_bit(STATUS_FW_ERROR, &priv->status)) {
195 IWL_ERR(priv, "Command %s failed: FW Error\n",
196 iwl_legacy_get_cmd_string(cmd->id));
197 ret = -EIO;
198 goto fail;
199 }
200 if ((cmd->flags & CMD_WANT_SKB) && !cmd->reply_page) {
201 IWL_ERR(priv, "Error: Response NULL in '%s'\n",
202 iwl_legacy_get_cmd_string(cmd->id));
203 ret = -EIO;
204 goto cancel;
205 }
206
207 ret = 0;
208 goto out;
209
210cancel:
211 if (cmd->flags & CMD_WANT_SKB) {
212 /*
213 * Cancel the CMD_WANT_SKB flag for the cmd in the
214 * TX cmd queue. Otherwise in case the cmd comes
215 * in later, it will possibly set an invalid
216 * address (cmd->meta.source).
217 */
218 priv->txq[priv->cmd_queue].meta[cmd_idx].flags &=
219 ~CMD_WANT_SKB;
220 }
221fail:
222 if (cmd->reply_page) {
223 iwl_legacy_free_pages(priv, cmd->reply_page);
224 cmd->reply_page = 0;
225 }
226out:
227 mutex_unlock(&priv->sync_cmd_mutex);
228 return ret;
229}
230EXPORT_SYMBOL(iwl_legacy_send_cmd_sync);
231
232int iwl_legacy_send_cmd(struct iwl_priv *priv, struct iwl_host_cmd *cmd)
233{
234 if (cmd->flags & CMD_ASYNC)
235 return iwl_legacy_send_cmd_async(priv, cmd);
236
237 return iwl_legacy_send_cmd_sync(priv, cmd);
238}
239EXPORT_SYMBOL(iwl_legacy_send_cmd);
240
241int
242iwl_legacy_send_cmd_pdu(struct iwl_priv *priv, u8 id, u16 len, const void *data)
243{
244 struct iwl_host_cmd cmd = {
245 .id = id,
246 .len = len,
247 .data = data,
248 };
249
250 return iwl_legacy_send_cmd_sync(priv, &cmd);
251}
252EXPORT_SYMBOL(iwl_legacy_send_cmd_pdu);
253
254int iwl_legacy_send_cmd_pdu_async(struct iwl_priv *priv,
255 u8 id, u16 len, const void *data,
256 void (*callback)(struct iwl_priv *priv,
257 struct iwl_device_cmd *cmd,
258 struct iwl_rx_packet *pkt))
259{
260 struct iwl_host_cmd cmd = {
261 .id = id,
262 .len = len,
263 .data = data,
264 };
265
266 cmd.flags |= CMD_ASYNC;
267 cmd.callback = callback;
268
269 return iwl_legacy_send_cmd_async(priv, &cmd);
270}
271EXPORT_SYMBOL(iwl_legacy_send_cmd_pdu_async);
diff --git a/drivers/net/wireless/iwlegacy/iwl-helpers.h b/drivers/net/wireless/iwlegacy/iwl-helpers.h
new file mode 100644
index 000000000000..02132e755831
--- /dev/null
+++ b/drivers/net/wireless/iwlegacy/iwl-helpers.h
@@ -0,0 +1,181 @@
1/******************************************************************************
2 *
3 * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved.
4 *
5 * Portions of this file are derived from the ipw3945 project, as well
6 * as portions of the ieee80211 subsystem header files.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of version 2 of the GNU General Public License as
10 * published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it will be useful, but WITHOUT
13 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 * more details.
16 *
17 * You should have received a copy of the GNU General Public License along with
18 * this program; if not, write to the Free Software Foundation, Inc.,
19 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
20 *
21 * The full GNU General Public License is included in this distribution in the
22 * file called LICENSE.
23 *
24 * Contact Information:
25 * Intel Linux Wireless <ilw@linux.intel.com>
26 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
27 *
28 *****************************************************************************/
29
30#ifndef __iwl_legacy_helpers_h__
31#define __iwl_legacy_helpers_h__
32
33#include <linux/ctype.h>
34#include <net/mac80211.h>
35
36#include "iwl-io.h"
37
38#define IWL_MASK(lo, hi) ((1 << (hi)) | ((1 << (hi)) - (1 << (lo))))
39
40
41static inline struct ieee80211_conf *iwl_legacy_ieee80211_get_hw_conf(
42 struct ieee80211_hw *hw)
43{
44 return &hw->conf;
45}
46
47/**
48 * iwl_legacy_queue_inc_wrap - increment queue index, wrap back to beginning
49 * @index -- current index
50 * @n_bd -- total number of entries in queue (must be power of 2)
51 */
52static inline int iwl_legacy_queue_inc_wrap(int index, int n_bd)
53{
54 return ++index & (n_bd - 1);
55}
56
57/**
58 * iwl_legacy_queue_dec_wrap - decrement queue index, wrap back to end
59 * @index -- current index
60 * @n_bd -- total number of entries in queue (must be power of 2)
61 */
62static inline int iwl_legacy_queue_dec_wrap(int index, int n_bd)
63{
64 return --index & (n_bd - 1);
65}
66
67/* TODO: Move fw_desc functions to iwl-pci.ko */
68static inline void iwl_legacy_free_fw_desc(struct pci_dev *pci_dev,
69 struct fw_desc *desc)
70{
71 if (desc->v_addr)
72 dma_free_coherent(&pci_dev->dev, desc->len,
73 desc->v_addr, desc->p_addr);
74 desc->v_addr = NULL;
75 desc->len = 0;
76}
77
78static inline int iwl_legacy_alloc_fw_desc(struct pci_dev *pci_dev,
79 struct fw_desc *desc)
80{
81 if (!desc->len) {
82 desc->v_addr = NULL;
83 return -EINVAL;
84 }
85
86 desc->v_addr = dma_alloc_coherent(&pci_dev->dev, desc->len,
87 &desc->p_addr, GFP_KERNEL);
88 return (desc->v_addr != NULL) ? 0 : -ENOMEM;
89}
90
91/*
92 * we have 8 bits used like this:
93 *
94 * 7 6 5 4 3 2 1 0
95 * | | | | | | | |
96 * | | | | | | +-+-------- AC queue (0-3)
97 * | | | | | |
98 * | +-+-+-+-+------------ HW queue ID
99 * |
100 * +---------------------- unused
101 */
102static inline void
103iwl_legacy_set_swq_id(struct iwl_tx_queue *txq, u8 ac, u8 hwq)
104{
105 BUG_ON(ac > 3); /* only have 2 bits */
106 BUG_ON(hwq > 31); /* only use 5 bits */
107
108 txq->swq_id = (hwq << 2) | ac;
109}
110
111static inline void iwl_legacy_wake_queue(struct iwl_priv *priv,
112 struct iwl_tx_queue *txq)
113{
114 u8 queue = txq->swq_id;
115 u8 ac = queue & 3;
116 u8 hwq = (queue >> 2) & 0x1f;
117
118 if (test_and_clear_bit(hwq, priv->queue_stopped))
119 if (atomic_dec_return(&priv->queue_stop_count[ac]) <= 0)
120 ieee80211_wake_queue(priv->hw, ac);
121}
122
123static inline void iwl_legacy_stop_queue(struct iwl_priv *priv,
124 struct iwl_tx_queue *txq)
125{
126 u8 queue = txq->swq_id;
127 u8 ac = queue & 3;
128 u8 hwq = (queue >> 2) & 0x1f;
129
130 if (!test_and_set_bit(hwq, priv->queue_stopped))
131 if (atomic_inc_return(&priv->queue_stop_count[ac]) > 0)
132 ieee80211_stop_queue(priv->hw, ac);
133}
134
135#define ieee80211_stop_queue DO_NOT_USE_ieee80211_stop_queue
136#define ieee80211_wake_queue DO_NOT_USE_ieee80211_wake_queue
137
138static inline void iwl_legacy_disable_interrupts(struct iwl_priv *priv)
139{
140 clear_bit(STATUS_INT_ENABLED, &priv->status);
141
142 /* disable interrupts from uCode/NIC to host */
143 iwl_write32(priv, CSR_INT_MASK, 0x00000000);
144
145 /* acknowledge/clear/reset any interrupts still pending
146 * from uCode or flow handler (Rx/Tx DMA) */
147 iwl_write32(priv, CSR_INT, 0xffffffff);
148 iwl_write32(priv, CSR_FH_INT_STATUS, 0xffffffff);
149 IWL_DEBUG_ISR(priv, "Disabled interrupts\n");
150}
151
152static inline void iwl_legacy_enable_interrupts(struct iwl_priv *priv)
153{
154 IWL_DEBUG_ISR(priv, "Enabling interrupts\n");
155 set_bit(STATUS_INT_ENABLED, &priv->status);
156 iwl_write32(priv, CSR_INT_MASK, priv->inta_mask);
157}
158
159/**
160 * iwl_legacy_beacon_time_mask_low - mask of lower 32 bit of beacon time
161 * @priv -- pointer to iwl_priv data structure
162 * @tsf_bits -- number of bits need to shift for masking)
163 */
164static inline u32 iwl_legacy_beacon_time_mask_low(struct iwl_priv *priv,
165 u16 tsf_bits)
166{
167 return (1 << tsf_bits) - 1;
168}
169
170/**
171 * iwl_legacy_beacon_time_mask_high - mask of higher 32 bit of beacon time
172 * @priv -- pointer to iwl_priv data structure
173 * @tsf_bits -- number of bits need to shift for masking)
174 */
175static inline u32 iwl_legacy_beacon_time_mask_high(struct iwl_priv *priv,
176 u16 tsf_bits)
177{
178 return ((1 << (32 - tsf_bits)) - 1) << tsf_bits;
179}
180
181#endif /* __iwl_legacy_helpers_h__ */
diff --git a/drivers/net/wireless/iwlegacy/iwl-io.h b/drivers/net/wireless/iwlegacy/iwl-io.h
new file mode 100644
index 000000000000..5cc5d342914f
--- /dev/null
+++ b/drivers/net/wireless/iwlegacy/iwl-io.h
@@ -0,0 +1,545 @@
1/******************************************************************************
2 *
3 * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved.
4 *
5 * Portions of this file are derived from the ipw3945 project.
6 *
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms of version 2 of the GNU General Public License as
9 * published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful, but WITHOUT
12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 * more details.
15 *
16 * You should have received a copy of the GNU General Public License along with
17 * this program; if not, write to the Free Software Foundation, Inc.,
18 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
19 *
20 * The full GNU General Public License is included in this distribution in the
21 * file called LICENSE.
22 *
23 * Contact Information:
24 * Intel Linux Wireless <ilw@linux.intel.com>
25 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
26 *
27 *****************************************************************************/
28
29#ifndef __iwl_legacy_io_h__
30#define __iwl_legacy_io_h__
31
32#include <linux/io.h>
33
34#include "iwl-dev.h"
35#include "iwl-debug.h"
36#include "iwl-devtrace.h"
37
38/*
39 * IO, register, and NIC memory access functions
40 *
41 * NOTE on naming convention and macro usage for these
42 *
43 * A single _ prefix before a an access function means that no state
44 * check or debug information is printed when that function is called.
45 *
46 * A double __ prefix before an access function means that state is checked
47 * and the current line number and caller function name are printed in addition
48 * to any other debug output.
49 *
50 * The non-prefixed name is the #define that maps the caller into a
51 * #define that provides the caller's name and __LINE__ to the double
52 * prefix version.
53 *
54 * If you wish to call the function without any debug or state checking,
55 * you should use the single _ prefix version (as is used by dependent IO
56 * routines, for example _iwl_legacy_read_direct32 calls the non-check version of
57 * _iwl_legacy_read32.)
58 *
59 * These declarations are *extremely* useful in quickly isolating code deltas
60 * which result in misconfiguration of the hardware I/O. In combination with
61 * git-bisect and the IO debug level you can quickly determine the specific
62 * commit which breaks the IO sequence to the hardware.
63 *
64 */
65
66static inline void _iwl_legacy_write8(struct iwl_priv *priv, u32 ofs, u8 val)
67{
68 trace_iwlwifi_legacy_dev_iowrite8(priv, ofs, val);
69 iowrite8(val, priv->hw_base + ofs);
70}
71
72#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
73static inline void
74__iwl_legacy_write8(const char *f, u32 l, struct iwl_priv *priv,
75 u32 ofs, u8 val)
76{
77 IWL_DEBUG_IO(priv, "write8(0x%08X, 0x%02X) - %s %d\n", ofs, val, f, l);
78 _iwl_legacy_write8(priv, ofs, val);
79}
80#define iwl_write8(priv, ofs, val) \
81 __iwl_legacy_write8(__FILE__, __LINE__, priv, ofs, val)
82#else
83#define iwl_write8(priv, ofs, val) _iwl_legacy_write8(priv, ofs, val)
84#endif
85
86
87static inline void _iwl_legacy_write32(struct iwl_priv *priv, u32 ofs, u32 val)
88{
89 trace_iwlwifi_legacy_dev_iowrite32(priv, ofs, val);
90 iowrite32(val, priv->hw_base + ofs);
91}
92
93#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
94static inline void
95__iwl_legacy_write32(const char *f, u32 l, struct iwl_priv *priv,
96 u32 ofs, u32 val)
97{
98 IWL_DEBUG_IO(priv, "write32(0x%08X, 0x%08X) - %s %d\n", ofs, val, f, l);
99 _iwl_legacy_write32(priv, ofs, val);
100}
101#define iwl_write32(priv, ofs, val) \
102 __iwl_legacy_write32(__FILE__, __LINE__, priv, ofs, val)
103#else
104#define iwl_write32(priv, ofs, val) _iwl_legacy_write32(priv, ofs, val)
105#endif
106
107static inline u32 _iwl_legacy_read32(struct iwl_priv *priv, u32 ofs)
108{
109 u32 val = ioread32(priv->hw_base + ofs);
110 trace_iwlwifi_legacy_dev_ioread32(priv, ofs, val);
111 return val;
112}
113
114#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
115static inline u32
116__iwl_legacy_read32(char *f, u32 l, struct iwl_priv *priv, u32 ofs)
117{
118 IWL_DEBUG_IO(priv, "read_direct32(0x%08X) - %s %d\n", ofs, f, l);
119 return _iwl_legacy_read32(priv, ofs);
120}
121#define iwl_read32(priv, ofs) __iwl_legacy_read32(__FILE__, __LINE__, priv, ofs)
122#else
123#define iwl_read32(p, o) _iwl_legacy_read32(p, o)
124#endif
125
126#define IWL_POLL_INTERVAL 10 /* microseconds */
127static inline int
128_iwl_legacy_poll_bit(struct iwl_priv *priv, u32 addr,
129 u32 bits, u32 mask, int timeout)
130{
131 int t = 0;
132
133 do {
134 if ((_iwl_legacy_read32(priv, addr) & mask) == (bits & mask))
135 return t;
136 udelay(IWL_POLL_INTERVAL);
137 t += IWL_POLL_INTERVAL;
138 } while (t < timeout);
139
140 return -ETIMEDOUT;
141}
142#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
143static inline int __iwl_legacy_poll_bit(const char *f, u32 l,
144 struct iwl_priv *priv, u32 addr,
145 u32 bits, u32 mask, int timeout)
146{
147 int ret = _iwl_legacy_poll_bit(priv, addr, bits, mask, timeout);
148 IWL_DEBUG_IO(priv, "poll_bit(0x%08X, 0x%08X, 0x%08X) - %s- %s %d\n",
149 addr, bits, mask,
150 unlikely(ret == -ETIMEDOUT) ? "timeout" : "", f, l);
151 return ret;
152}
153#define iwl_poll_bit(priv, addr, bits, mask, timeout) \
154 __iwl_legacy_poll_bit(__FILE__, __LINE__, priv, addr, \
155 bits, mask, timeout)
156#else
157#define iwl_poll_bit(p, a, b, m, t) _iwl_legacy_poll_bit(p, a, b, m, t)
158#endif
159
160static inline void _iwl_legacy_set_bit(struct iwl_priv *priv, u32 reg, u32 mask)
161{
162 _iwl_legacy_write32(priv, reg, _iwl_legacy_read32(priv, reg) | mask);
163}
164#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
165static inline void __iwl_legacy_set_bit(const char *f, u32 l,
166 struct iwl_priv *priv, u32 reg, u32 mask)
167{
168 u32 val = _iwl_legacy_read32(priv, reg) | mask;
169 IWL_DEBUG_IO(priv, "set_bit(0x%08X, 0x%08X) = 0x%08X\n", reg,
170 mask, val);
171 _iwl_legacy_write32(priv, reg, val);
172}
173static inline void iwl_legacy_set_bit(struct iwl_priv *p, u32 r, u32 m)
174{
175 unsigned long reg_flags;
176
177 spin_lock_irqsave(&p->reg_lock, reg_flags);
178 __iwl_legacy_set_bit(__FILE__, __LINE__, p, r, m);
179 spin_unlock_irqrestore(&p->reg_lock, reg_flags);
180}
181#else
182static inline void iwl_legacy_set_bit(struct iwl_priv *p, u32 r, u32 m)
183{
184 unsigned long reg_flags;
185
186 spin_lock_irqsave(&p->reg_lock, reg_flags);
187 _iwl_legacy_set_bit(p, r, m);
188 spin_unlock_irqrestore(&p->reg_lock, reg_flags);
189}
190#endif
191
192static inline void
193_iwl_legacy_clear_bit(struct iwl_priv *priv, u32 reg, u32 mask)
194{
195 _iwl_legacy_write32(priv, reg, _iwl_legacy_read32(priv, reg) & ~mask);
196}
197#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
198static inline void
199__iwl_legacy_clear_bit(const char *f, u32 l,
200 struct iwl_priv *priv, u32 reg, u32 mask)
201{
202 u32 val = _iwl_legacy_read32(priv, reg) & ~mask;
203 IWL_DEBUG_IO(priv, "clear_bit(0x%08X, 0x%08X) = 0x%08X\n", reg, mask, val);
204 _iwl_legacy_write32(priv, reg, val);
205}
206static inline void iwl_legacy_clear_bit(struct iwl_priv *p, u32 r, u32 m)
207{
208 unsigned long reg_flags;
209
210 spin_lock_irqsave(&p->reg_lock, reg_flags);
211 __iwl_legacy_clear_bit(__FILE__, __LINE__, p, r, m);
212 spin_unlock_irqrestore(&p->reg_lock, reg_flags);
213}
214#else
215static inline void iwl_legacy_clear_bit(struct iwl_priv *p, u32 r, u32 m)
216{
217 unsigned long reg_flags;
218
219 spin_lock_irqsave(&p->reg_lock, reg_flags);
220 _iwl_legacy_clear_bit(p, r, m);
221 spin_unlock_irqrestore(&p->reg_lock, reg_flags);
222}
223#endif
224
225static inline int _iwl_legacy_grab_nic_access(struct iwl_priv *priv)
226{
227 int ret;
228 u32 val;
229
230 /* this bit wakes up the NIC */
231 _iwl_legacy_set_bit(priv, CSR_GP_CNTRL,
232 CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
233
234 /*
235 * These bits say the device is running, and should keep running for
236 * at least a short while (at least as long as MAC_ACCESS_REQ stays 1),
237 * but they do not indicate that embedded SRAM is restored yet;
238 * 3945 and 4965 have volatile SRAM, and must save/restore contents
239 * to/from host DRAM when sleeping/waking for power-saving.
240 * Each direction takes approximately 1/4 millisecond; with this
241 * overhead, it's a good idea to grab and hold MAC_ACCESS_REQUEST if a
242 * series of register accesses are expected (e.g. reading Event Log),
243 * to keep device from sleeping.
244 *
245 * CSR_UCODE_DRV_GP1 register bit MAC_SLEEP == 0 indicates that
246 * SRAM is okay/restored. We don't check that here because this call
247 * is just for hardware register access; but GP1 MAC_SLEEP check is a
248 * good idea before accessing 3945/4965 SRAM (e.g. reading Event Log).
249 *
250 */
251 ret = _iwl_legacy_poll_bit(priv, CSR_GP_CNTRL,
252 CSR_GP_CNTRL_REG_VAL_MAC_ACCESS_EN,
253 (CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY |
254 CSR_GP_CNTRL_REG_FLAG_GOING_TO_SLEEP), 15000);
255 if (ret < 0) {
256 val = _iwl_legacy_read32(priv, CSR_GP_CNTRL);
257 IWL_ERR(priv,
258 "MAC is in deep sleep!. CSR_GP_CNTRL = 0x%08X\n", val);
259 _iwl_legacy_write32(priv, CSR_RESET,
260 CSR_RESET_REG_FLAG_FORCE_NMI);
261 return -EIO;
262 }
263
264 return 0;
265}
266
267#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
268static inline int __iwl_legacy_grab_nic_access(const char *f, u32 l,
269 struct iwl_priv *priv)
270{
271 IWL_DEBUG_IO(priv, "grabbing nic access - %s %d\n", f, l);
272 return _iwl_legacy_grab_nic_access(priv);
273}
274#define iwl_grab_nic_access(priv) \
275 __iwl_legacy_grab_nic_access(__FILE__, __LINE__, priv)
276#else
277#define iwl_grab_nic_access(priv) \
278 _iwl_legacy_grab_nic_access(priv)
279#endif
280
281static inline void _iwl_legacy_release_nic_access(struct iwl_priv *priv)
282{
283 _iwl_legacy_clear_bit(priv, CSR_GP_CNTRL,
284 CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
285}
286#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
287static inline void __iwl_legacy_release_nic_access(const char *f, u32 l,
288 struct iwl_priv *priv)
289{
290
291 IWL_DEBUG_IO(priv, "releasing nic access - %s %d\n", f, l);
292 _iwl_legacy_release_nic_access(priv);
293}
294#define iwl_release_nic_access(priv) \
295 __iwl_legacy_release_nic_access(__FILE__, __LINE__, priv)
296#else
297#define iwl_release_nic_access(priv) \
298 _iwl_legacy_release_nic_access(priv)
299#endif
300
301static inline u32 _iwl_legacy_read_direct32(struct iwl_priv *priv, u32 reg)
302{
303 return _iwl_legacy_read32(priv, reg);
304}
305#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
306static inline u32 __iwl_legacy_read_direct32(const char *f, u32 l,
307 struct iwl_priv *priv, u32 reg)
308{
309 u32 value = _iwl_legacy_read_direct32(priv, reg);
310 IWL_DEBUG_IO(priv,
311 "read_direct32(0x%4X) = 0x%08x - %s %d\n", reg, value,
312 f, l);
313 return value;
314}
315static inline u32 iwl_legacy_read_direct32(struct iwl_priv *priv, u32 reg)
316{
317 u32 value;
318 unsigned long reg_flags;
319
320 spin_lock_irqsave(&priv->reg_lock, reg_flags);
321 iwl_grab_nic_access(priv);
322 value = __iwl_legacy_read_direct32(__FILE__, __LINE__, priv, reg);
323 iwl_release_nic_access(priv);
324 spin_unlock_irqrestore(&priv->reg_lock, reg_flags);
325 return value;
326}
327
328#else
329static inline u32 iwl_legacy_read_direct32(struct iwl_priv *priv, u32 reg)
330{
331 u32 value;
332 unsigned long reg_flags;
333
334 spin_lock_irqsave(&priv->reg_lock, reg_flags);
335 iwl_grab_nic_access(priv);
336 value = _iwl_legacy_read_direct32(priv, reg);
337 iwl_release_nic_access(priv);
338 spin_unlock_irqrestore(&priv->reg_lock, reg_flags);
339 return value;
340
341}
342#endif
343
344static inline void _iwl_legacy_write_direct32(struct iwl_priv *priv,
345 u32 reg, u32 value)
346{
347 _iwl_legacy_write32(priv, reg, value);
348}
349static inline void
350iwl_legacy_write_direct32(struct iwl_priv *priv, u32 reg, u32 value)
351{
352 unsigned long reg_flags;
353
354 spin_lock_irqsave(&priv->reg_lock, reg_flags);
355 if (!iwl_grab_nic_access(priv)) {
356 _iwl_legacy_write_direct32(priv, reg, value);
357 iwl_release_nic_access(priv);
358 }
359 spin_unlock_irqrestore(&priv->reg_lock, reg_flags);
360}
361
362static inline void iwl_legacy_write_reg_buf(struct iwl_priv *priv,
363 u32 reg, u32 len, u32 *values)
364{
365 u32 count = sizeof(u32);
366
367 if ((priv != NULL) && (values != NULL)) {
368 for (; 0 < len; len -= count, reg += count, values++)
369 iwl_legacy_write_direct32(priv, reg, *values);
370 }
371}
372
373static inline int _iwl_legacy_poll_direct_bit(struct iwl_priv *priv, u32 addr,
374 u32 mask, int timeout)
375{
376 int t = 0;
377
378 do {
379 if ((iwl_legacy_read_direct32(priv, addr) & mask) == mask)
380 return t;
381 udelay(IWL_POLL_INTERVAL);
382 t += IWL_POLL_INTERVAL;
383 } while (t < timeout);
384
385 return -ETIMEDOUT;
386}
387
388#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
389static inline int __iwl_legacy_poll_direct_bit(const char *f, u32 l,
390 struct iwl_priv *priv,
391 u32 addr, u32 mask, int timeout)
392{
393 int ret = _iwl_legacy_poll_direct_bit(priv, addr, mask, timeout);
394
395 if (unlikely(ret == -ETIMEDOUT))
396 IWL_DEBUG_IO(priv, "poll_direct_bit(0x%08X, 0x%08X) - "
397 "timedout - %s %d\n", addr, mask, f, l);
398 else
399 IWL_DEBUG_IO(priv, "poll_direct_bit(0x%08X, 0x%08X) = 0x%08X "
400 "- %s %d\n", addr, mask, ret, f, l);
401 return ret;
402}
403#define iwl_poll_direct_bit(priv, addr, mask, timeout) \
404__iwl_legacy_poll_direct_bit(__FILE__, __LINE__, priv, addr, mask, timeout)
405#else
406#define iwl_poll_direct_bit _iwl_legacy_poll_direct_bit
407#endif
408
409static inline u32 _iwl_legacy_read_prph(struct iwl_priv *priv, u32 reg)
410{
411 _iwl_legacy_write_direct32(priv, HBUS_TARG_PRPH_RADDR, reg | (3 << 24));
412 rmb();
413 return _iwl_legacy_read_direct32(priv, HBUS_TARG_PRPH_RDAT);
414}
415static inline u32 iwl_legacy_read_prph(struct iwl_priv *priv, u32 reg)
416{
417 unsigned long reg_flags;
418 u32 val;
419
420 spin_lock_irqsave(&priv->reg_lock, reg_flags);
421 iwl_grab_nic_access(priv);
422 val = _iwl_legacy_read_prph(priv, reg);
423 iwl_release_nic_access(priv);
424 spin_unlock_irqrestore(&priv->reg_lock, reg_flags);
425 return val;
426}
427
428static inline void _iwl_legacy_write_prph(struct iwl_priv *priv,
429 u32 addr, u32 val)
430{
431 _iwl_legacy_write_direct32(priv, HBUS_TARG_PRPH_WADDR,
432 ((addr & 0x0000FFFF) | (3 << 24)));
433 wmb();
434 _iwl_legacy_write_direct32(priv, HBUS_TARG_PRPH_WDAT, val);
435}
436
437static inline void
438iwl_legacy_write_prph(struct iwl_priv *priv, u32 addr, u32 val)
439{
440 unsigned long reg_flags;
441
442 spin_lock_irqsave(&priv->reg_lock, reg_flags);
443 if (!iwl_grab_nic_access(priv)) {
444 _iwl_legacy_write_prph(priv, addr, val);
445 iwl_release_nic_access(priv);
446 }
447 spin_unlock_irqrestore(&priv->reg_lock, reg_flags);
448}
449
450#define _iwl_legacy_set_bits_prph(priv, reg, mask) \
451_iwl_legacy_write_prph(priv, reg, (_iwl_legacy_read_prph(priv, reg) | mask))
452
453static inline void
454iwl_legacy_set_bits_prph(struct iwl_priv *priv, u32 reg, u32 mask)
455{
456 unsigned long reg_flags;
457
458 spin_lock_irqsave(&priv->reg_lock, reg_flags);
459 iwl_grab_nic_access(priv);
460 _iwl_legacy_set_bits_prph(priv, reg, mask);
461 iwl_release_nic_access(priv);
462 spin_unlock_irqrestore(&priv->reg_lock, reg_flags);
463}
464
465#define _iwl_legacy_set_bits_mask_prph(priv, reg, bits, mask) \
466_iwl_legacy_write_prph(priv, reg, \
467 ((_iwl_legacy_read_prph(priv, reg) & mask) | bits))
468
469static inline void iwl_legacy_set_bits_mask_prph(struct iwl_priv *priv, u32 reg,
470 u32 bits, u32 mask)
471{
472 unsigned long reg_flags;
473
474 spin_lock_irqsave(&priv->reg_lock, reg_flags);
475 iwl_grab_nic_access(priv);
476 _iwl_legacy_set_bits_mask_prph(priv, reg, bits, mask);
477 iwl_release_nic_access(priv);
478 spin_unlock_irqrestore(&priv->reg_lock, reg_flags);
479}
480
481static inline void iwl_legacy_clear_bits_prph(struct iwl_priv
482 *priv, u32 reg, u32 mask)
483{
484 unsigned long reg_flags;
485 u32 val;
486
487 spin_lock_irqsave(&priv->reg_lock, reg_flags);
488 iwl_grab_nic_access(priv);
489 val = _iwl_legacy_read_prph(priv, reg);
490 _iwl_legacy_write_prph(priv, reg, (val & ~mask));
491 iwl_release_nic_access(priv);
492 spin_unlock_irqrestore(&priv->reg_lock, reg_flags);
493}
494
495static inline u32 iwl_legacy_read_targ_mem(struct iwl_priv *priv, u32 addr)
496{
497 unsigned long reg_flags;
498 u32 value;
499
500 spin_lock_irqsave(&priv->reg_lock, reg_flags);
501 iwl_grab_nic_access(priv);
502
503 _iwl_legacy_write_direct32(priv, HBUS_TARG_MEM_RADDR, addr);
504 rmb();
505 value = _iwl_legacy_read_direct32(priv, HBUS_TARG_MEM_RDAT);
506
507 iwl_release_nic_access(priv);
508 spin_unlock_irqrestore(&priv->reg_lock, reg_flags);
509 return value;
510}
511
512static inline void
513iwl_legacy_write_targ_mem(struct iwl_priv *priv, u32 addr, u32 val)
514{
515 unsigned long reg_flags;
516
517 spin_lock_irqsave(&priv->reg_lock, reg_flags);
518 if (!iwl_grab_nic_access(priv)) {
519 _iwl_legacy_write_direct32(priv, HBUS_TARG_MEM_WADDR, addr);
520 wmb();
521 _iwl_legacy_write_direct32(priv, HBUS_TARG_MEM_WDAT, val);
522 iwl_release_nic_access(priv);
523 }
524 spin_unlock_irqrestore(&priv->reg_lock, reg_flags);
525}
526
527static inline void
528iwl_legacy_write_targ_mem_buf(struct iwl_priv *priv, u32 addr,
529 u32 len, u32 *values)
530{
531 unsigned long reg_flags;
532
533 spin_lock_irqsave(&priv->reg_lock, reg_flags);
534 if (!iwl_grab_nic_access(priv)) {
535 _iwl_legacy_write_direct32(priv, HBUS_TARG_MEM_WADDR, addr);
536 wmb();
537 for (; 0 < len; len -= sizeof(u32), values++)
538 _iwl_legacy_write_direct32(priv,
539 HBUS_TARG_MEM_WDAT, *values);
540
541 iwl_release_nic_access(priv);
542 }
543 spin_unlock_irqrestore(&priv->reg_lock, reg_flags);
544}
545#endif
diff --git a/drivers/net/wireless/iwlegacy/iwl-led.c b/drivers/net/wireless/iwlegacy/iwl-led.c
new file mode 100644
index 000000000000..15eb8b707157
--- /dev/null
+++ b/drivers/net/wireless/iwlegacy/iwl-led.c
@@ -0,0 +1,188 @@
1/******************************************************************************
2 *
3 * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of version 2 of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
17 *
18 * The full GNU General Public License is included in this distribution in the
19 * file called LICENSE.
20 *
21 * Contact Information:
22 * Intel Linux Wireless <ilw@linux.intel.com>
23 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
24 *
25 *****************************************************************************/
26
27
28#include <linux/kernel.h>
29#include <linux/module.h>
30#include <linux/init.h>
31#include <linux/pci.h>
32#include <linux/dma-mapping.h>
33#include <linux/delay.h>
34#include <linux/skbuff.h>
35#include <linux/netdevice.h>
36#include <linux/wireless.h>
37#include <net/mac80211.h>
38#include <linux/etherdevice.h>
39#include <asm/unaligned.h>
40
41#include "iwl-dev.h"
42#include "iwl-core.h"
43#include "iwl-io.h"
44
45/* default: IWL_LED_BLINK(0) using blinking index table */
46static int led_mode;
47module_param(led_mode, int, S_IRUGO);
48MODULE_PARM_DESC(led_mode, "0=system default, "
49 "1=On(RF On)/Off(RF Off), 2=blinking");
50
51static const struct ieee80211_tpt_blink iwl_blink[] = {
52 { .throughput = 0 * 1024 - 1, .blink_time = 334 },
53 { .throughput = 1 * 1024 - 1, .blink_time = 260 },
54 { .throughput = 5 * 1024 - 1, .blink_time = 220 },
55 { .throughput = 10 * 1024 - 1, .blink_time = 190 },
56 { .throughput = 20 * 1024 - 1, .blink_time = 170 },
57 { .throughput = 50 * 1024 - 1, .blink_time = 150 },
58 { .throughput = 70 * 1024 - 1, .blink_time = 130 },
59 { .throughput = 100 * 1024 - 1, .blink_time = 110 },
60 { .throughput = 200 * 1024 - 1, .blink_time = 80 },
61 { .throughput = 300 * 1024 - 1, .blink_time = 50 },
62};
63
64/*
65 * Adjust led blink rate to compensate on a MAC Clock difference on every HW
66 * Led blink rate analysis showed an average deviation of 0% on 3945,
67 * 5% on 4965 HW.
68 * Need to compensate on the led on/off time per HW according to the deviation
69 * to achieve the desired led frequency
70 * The calculation is: (100-averageDeviation)/100 * blinkTime
71 * For code efficiency the calculation will be:
72 * compensation = (100 - averageDeviation) * 64 / 100
73 * NewBlinkTime = (compensation * BlinkTime) / 64
74 */
75static inline u8 iwl_legacy_blink_compensation(struct iwl_priv *priv,
76 u8 time, u16 compensation)
77{
78 if (!compensation) {
79 IWL_ERR(priv, "undefined blink compensation: "
80 "use pre-defined blinking time\n");
81 return time;
82 }
83
84 return (u8)((time * compensation) >> 6);
85}
86
87/* Set led pattern command */
88static int iwl_legacy_led_cmd(struct iwl_priv *priv,
89 unsigned long on,
90 unsigned long off)
91{
92 struct iwl_led_cmd led_cmd = {
93 .id = IWL_LED_LINK,
94 .interval = IWL_DEF_LED_INTRVL
95 };
96 int ret;
97
98 if (!test_bit(STATUS_READY, &priv->status))
99 return -EBUSY;
100
101 if (priv->blink_on == on && priv->blink_off == off)
102 return 0;
103
104 IWL_DEBUG_LED(priv, "Led blink time compensation=%u\n",
105 priv->cfg->base_params->led_compensation);
106 led_cmd.on = iwl_legacy_blink_compensation(priv, on,
107 priv->cfg->base_params->led_compensation);
108 led_cmd.off = iwl_legacy_blink_compensation(priv, off,
109 priv->cfg->base_params->led_compensation);
110
111 ret = priv->cfg->ops->led->cmd(priv, &led_cmd);
112 if (!ret) {
113 priv->blink_on = on;
114 priv->blink_off = off;
115 }
116 return ret;
117}
118
119static void iwl_legacy_led_brightness_set(struct led_classdev *led_cdev,
120 enum led_brightness brightness)
121{
122 struct iwl_priv *priv = container_of(led_cdev, struct iwl_priv, led);
123 unsigned long on = 0;
124
125 if (brightness > 0)
126 on = IWL_LED_SOLID;
127
128 iwl_legacy_led_cmd(priv, on, 0);
129}
130
131static int iwl_legacy_led_blink_set(struct led_classdev *led_cdev,
132 unsigned long *delay_on,
133 unsigned long *delay_off)
134{
135 struct iwl_priv *priv = container_of(led_cdev, struct iwl_priv, led);
136
137 return iwl_legacy_led_cmd(priv, *delay_on, *delay_off);
138}
139
140void iwl_legacy_leds_init(struct iwl_priv *priv)
141{
142 int mode = led_mode;
143 int ret;
144
145 if (mode == IWL_LED_DEFAULT)
146 mode = priv->cfg->led_mode;
147
148 priv->led.name = kasprintf(GFP_KERNEL, "%s-led",
149 wiphy_name(priv->hw->wiphy));
150 priv->led.brightness_set = iwl_legacy_led_brightness_set;
151 priv->led.blink_set = iwl_legacy_led_blink_set;
152 priv->led.max_brightness = 1;
153
154 switch (mode) {
155 case IWL_LED_DEFAULT:
156 WARN_ON(1);
157 break;
158 case IWL_LED_BLINK:
159 priv->led.default_trigger =
160 ieee80211_create_tpt_led_trigger(priv->hw,
161 IEEE80211_TPT_LEDTRIG_FL_CONNECTED,
162 iwl_blink, ARRAY_SIZE(iwl_blink));
163 break;
164 case IWL_LED_RF_STATE:
165 priv->led.default_trigger =
166 ieee80211_get_radio_led_name(priv->hw);
167 break;
168 }
169
170 ret = led_classdev_register(&priv->pci_dev->dev, &priv->led);
171 if (ret) {
172 kfree(priv->led.name);
173 return;
174 }
175
176 priv->led_registered = true;
177}
178EXPORT_SYMBOL(iwl_legacy_leds_init);
179
180void iwl_legacy_leds_exit(struct iwl_priv *priv)
181{
182 if (!priv->led_registered)
183 return;
184
185 led_classdev_unregister(&priv->led);
186 kfree(priv->led.name);
187}
188EXPORT_SYMBOL(iwl_legacy_leds_exit);
diff --git a/drivers/net/wireless/iwlegacy/iwl-led.h b/drivers/net/wireless/iwlegacy/iwl-led.h
new file mode 100644
index 000000000000..f0791f70f79d
--- /dev/null
+++ b/drivers/net/wireless/iwlegacy/iwl-led.h
@@ -0,0 +1,56 @@
1/******************************************************************************
2 *
3 * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of version 2 of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
17 *
18 * The full GNU General Public License is included in this distribution in the
19 * file called LICENSE.
20 *
21 * Contact Information:
22 * Intel Linux Wireless <ilw@linux.intel.com>
23 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
24 *
25 *****************************************************************************/
26
27#ifndef __iwl_legacy_leds_h__
28#define __iwl_legacy_leds_h__
29
30
31struct iwl_priv;
32
33#define IWL_LED_SOLID 11
34#define IWL_DEF_LED_INTRVL cpu_to_le32(1000)
35
36#define IWL_LED_ACTIVITY (0<<1)
37#define IWL_LED_LINK (1<<1)
38
39/*
40 * LED mode
41 * IWL_LED_DEFAULT: use device default
42 * IWL_LED_RF_STATE: turn LED on/off based on RF state
43 * LED ON = RF ON
44 * LED OFF = RF OFF
45 * IWL_LED_BLINK: adjust led blink rate based on blink table
46 */
47enum iwl_led_mode {
48 IWL_LED_DEFAULT,
49 IWL_LED_RF_STATE,
50 IWL_LED_BLINK,
51};
52
53void iwl_legacy_leds_init(struct iwl_priv *priv);
54void iwl_legacy_leds_exit(struct iwl_priv *priv);
55
56#endif /* __iwl_legacy_leds_h__ */
diff --git a/drivers/net/wireless/iwlegacy/iwl-legacy-rs.h b/drivers/net/wireless/iwlegacy/iwl-legacy-rs.h
new file mode 100644
index 000000000000..f66a1b2c0397
--- /dev/null
+++ b/drivers/net/wireless/iwlegacy/iwl-legacy-rs.h
@@ -0,0 +1,456 @@
1/******************************************************************************
2 *
3 * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of version 2 of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
17 *
18 * The full GNU General Public License is included in this distribution in the
19 * file called LICENSE.
20 *
21 * Contact Information:
22 * Intel Linux Wireless <ilw@linux.intel.com>
23 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
24 *
25 *****************************************************************************/
26
27#ifndef __iwl_legacy_rs_h__
28#define __iwl_legacy_rs_h__
29
30struct iwl_rate_info {
31 u8 plcp; /* uCode API: IWL_RATE_6M_PLCP, etc. */
32 u8 plcp_siso; /* uCode API: IWL_RATE_SISO_6M_PLCP, etc. */
33 u8 plcp_mimo2; /* uCode API: IWL_RATE_MIMO2_6M_PLCP, etc. */
34 u8 ieee; /* MAC header: IWL_RATE_6M_IEEE, etc. */
35 u8 prev_ieee; /* previous rate in IEEE speeds */
36 u8 next_ieee; /* next rate in IEEE speeds */
37 u8 prev_rs; /* previous rate used in rs algo */
38 u8 next_rs; /* next rate used in rs algo */
39 u8 prev_rs_tgg; /* previous rate used in TGG rs algo */
40 u8 next_rs_tgg; /* next rate used in TGG rs algo */
41};
42
43struct iwl3945_rate_info {
44 u8 plcp; /* uCode API: IWL_RATE_6M_PLCP, etc. */
45 u8 ieee; /* MAC header: IWL_RATE_6M_IEEE, etc. */
46 u8 prev_ieee; /* previous rate in IEEE speeds */
47 u8 next_ieee; /* next rate in IEEE speeds */
48 u8 prev_rs; /* previous rate used in rs algo */
49 u8 next_rs; /* next rate used in rs algo */
50 u8 prev_rs_tgg; /* previous rate used in TGG rs algo */
51 u8 next_rs_tgg; /* next rate used in TGG rs algo */
52 u8 table_rs_index; /* index in rate scale table cmd */
53 u8 prev_table_rs; /* prev in rate table cmd */
54};
55
56
57/*
58 * These serve as indexes into
59 * struct iwl_rate_info iwl_rates[IWL_RATE_COUNT];
60 */
61enum {
62 IWL_RATE_1M_INDEX = 0,
63 IWL_RATE_2M_INDEX,
64 IWL_RATE_5M_INDEX,
65 IWL_RATE_11M_INDEX,
66 IWL_RATE_6M_INDEX,
67 IWL_RATE_9M_INDEX,
68 IWL_RATE_12M_INDEX,
69 IWL_RATE_18M_INDEX,
70 IWL_RATE_24M_INDEX,
71 IWL_RATE_36M_INDEX,
72 IWL_RATE_48M_INDEX,
73 IWL_RATE_54M_INDEX,
74 IWL_RATE_60M_INDEX,
75 IWL_RATE_COUNT,
76 IWL_RATE_COUNT_LEGACY = IWL_RATE_COUNT - 1, /* Excluding 60M */
77 IWL_RATE_COUNT_3945 = IWL_RATE_COUNT - 1,
78 IWL_RATE_INVM_INDEX = IWL_RATE_COUNT,
79 IWL_RATE_INVALID = IWL_RATE_COUNT,
80};
81
82enum {
83 IWL_RATE_6M_INDEX_TABLE = 0,
84 IWL_RATE_9M_INDEX_TABLE,
85 IWL_RATE_12M_INDEX_TABLE,
86 IWL_RATE_18M_INDEX_TABLE,
87 IWL_RATE_24M_INDEX_TABLE,
88 IWL_RATE_36M_INDEX_TABLE,
89 IWL_RATE_48M_INDEX_TABLE,
90 IWL_RATE_54M_INDEX_TABLE,
91 IWL_RATE_1M_INDEX_TABLE,
92 IWL_RATE_2M_INDEX_TABLE,
93 IWL_RATE_5M_INDEX_TABLE,
94 IWL_RATE_11M_INDEX_TABLE,
95 IWL_RATE_INVM_INDEX_TABLE = IWL_RATE_INVM_INDEX - 1,
96};
97
98enum {
99 IWL_FIRST_OFDM_RATE = IWL_RATE_6M_INDEX,
100 IWL39_LAST_OFDM_RATE = IWL_RATE_54M_INDEX,
101 IWL_LAST_OFDM_RATE = IWL_RATE_60M_INDEX,
102 IWL_FIRST_CCK_RATE = IWL_RATE_1M_INDEX,
103 IWL_LAST_CCK_RATE = IWL_RATE_11M_INDEX,
104};
105
106/* #define vs. enum to keep from defaulting to 'large integer' */
107#define IWL_RATE_6M_MASK (1 << IWL_RATE_6M_INDEX)
108#define IWL_RATE_9M_MASK (1 << IWL_RATE_9M_INDEX)
109#define IWL_RATE_12M_MASK (1 << IWL_RATE_12M_INDEX)
110#define IWL_RATE_18M_MASK (1 << IWL_RATE_18M_INDEX)
111#define IWL_RATE_24M_MASK (1 << IWL_RATE_24M_INDEX)
112#define IWL_RATE_36M_MASK (1 << IWL_RATE_36M_INDEX)
113#define IWL_RATE_48M_MASK (1 << IWL_RATE_48M_INDEX)
114#define IWL_RATE_54M_MASK (1 << IWL_RATE_54M_INDEX)
115#define IWL_RATE_60M_MASK (1 << IWL_RATE_60M_INDEX)
116#define IWL_RATE_1M_MASK (1 << IWL_RATE_1M_INDEX)
117#define IWL_RATE_2M_MASK (1 << IWL_RATE_2M_INDEX)
118#define IWL_RATE_5M_MASK (1 << IWL_RATE_5M_INDEX)
119#define IWL_RATE_11M_MASK (1 << IWL_RATE_11M_INDEX)
120
121/* uCode API values for legacy bit rates, both OFDM and CCK */
122enum {
123 IWL_RATE_6M_PLCP = 13,
124 IWL_RATE_9M_PLCP = 15,
125 IWL_RATE_12M_PLCP = 5,
126 IWL_RATE_18M_PLCP = 7,
127 IWL_RATE_24M_PLCP = 9,
128 IWL_RATE_36M_PLCP = 11,
129 IWL_RATE_48M_PLCP = 1,
130 IWL_RATE_54M_PLCP = 3,
131 IWL_RATE_60M_PLCP = 3,/*FIXME:RS:should be removed*/
132 IWL_RATE_1M_PLCP = 10,
133 IWL_RATE_2M_PLCP = 20,
134 IWL_RATE_5M_PLCP = 55,
135 IWL_RATE_11M_PLCP = 110,
136 /*FIXME:RS:add IWL_RATE_LEGACY_INVM_PLCP = 0,*/
137};
138
139/* uCode API values for OFDM high-throughput (HT) bit rates */
140enum {
141 IWL_RATE_SISO_6M_PLCP = 0,
142 IWL_RATE_SISO_12M_PLCP = 1,
143 IWL_RATE_SISO_18M_PLCP = 2,
144 IWL_RATE_SISO_24M_PLCP = 3,
145 IWL_RATE_SISO_36M_PLCP = 4,
146 IWL_RATE_SISO_48M_PLCP = 5,
147 IWL_RATE_SISO_54M_PLCP = 6,
148 IWL_RATE_SISO_60M_PLCP = 7,
149 IWL_RATE_MIMO2_6M_PLCP = 0x8,
150 IWL_RATE_MIMO2_12M_PLCP = 0x9,
151 IWL_RATE_MIMO2_18M_PLCP = 0xa,
152 IWL_RATE_MIMO2_24M_PLCP = 0xb,
153 IWL_RATE_MIMO2_36M_PLCP = 0xc,
154 IWL_RATE_MIMO2_48M_PLCP = 0xd,
155 IWL_RATE_MIMO2_54M_PLCP = 0xe,
156 IWL_RATE_MIMO2_60M_PLCP = 0xf,
157 IWL_RATE_SISO_INVM_PLCP,
158 IWL_RATE_MIMO2_INVM_PLCP = IWL_RATE_SISO_INVM_PLCP,
159};
160
161/* MAC header values for bit rates */
162enum {
163 IWL_RATE_6M_IEEE = 12,
164 IWL_RATE_9M_IEEE = 18,
165 IWL_RATE_12M_IEEE = 24,
166 IWL_RATE_18M_IEEE = 36,
167 IWL_RATE_24M_IEEE = 48,
168 IWL_RATE_36M_IEEE = 72,
169 IWL_RATE_48M_IEEE = 96,
170 IWL_RATE_54M_IEEE = 108,
171 IWL_RATE_60M_IEEE = 120,
172 IWL_RATE_1M_IEEE = 2,
173 IWL_RATE_2M_IEEE = 4,
174 IWL_RATE_5M_IEEE = 11,
175 IWL_RATE_11M_IEEE = 22,
176};
177
178#define IWL_CCK_BASIC_RATES_MASK \
179 (IWL_RATE_1M_MASK | \
180 IWL_RATE_2M_MASK)
181
182#define IWL_CCK_RATES_MASK \
183 (IWL_CCK_BASIC_RATES_MASK | \
184 IWL_RATE_5M_MASK | \
185 IWL_RATE_11M_MASK)
186
187#define IWL_OFDM_BASIC_RATES_MASK \
188 (IWL_RATE_6M_MASK | \
189 IWL_RATE_12M_MASK | \
190 IWL_RATE_24M_MASK)
191
192#define IWL_OFDM_RATES_MASK \
193 (IWL_OFDM_BASIC_RATES_MASK | \
194 IWL_RATE_9M_MASK | \
195 IWL_RATE_18M_MASK | \
196 IWL_RATE_36M_MASK | \
197 IWL_RATE_48M_MASK | \
198 IWL_RATE_54M_MASK)
199
200#define IWL_BASIC_RATES_MASK \
201 (IWL_OFDM_BASIC_RATES_MASK | \
202 IWL_CCK_BASIC_RATES_MASK)
203
204#define IWL_RATES_MASK ((1 << IWL_RATE_COUNT) - 1)
205#define IWL_RATES_MASK_3945 ((1 << IWL_RATE_COUNT_3945) - 1)
206
207#define IWL_INVALID_VALUE -1
208
209#define IWL_MIN_RSSI_VAL -100
210#define IWL_MAX_RSSI_VAL 0
211
212/* These values specify how many Tx frame attempts before
213 * searching for a new modulation mode */
214#define IWL_LEGACY_FAILURE_LIMIT 160
215#define IWL_LEGACY_SUCCESS_LIMIT 480
216#define IWL_LEGACY_TABLE_COUNT 160
217
218#define IWL_NONE_LEGACY_FAILURE_LIMIT 400
219#define IWL_NONE_LEGACY_SUCCESS_LIMIT 4500
220#define IWL_NONE_LEGACY_TABLE_COUNT 1500
221
222/* Success ratio (ACKed / attempted tx frames) values (perfect is 128 * 100) */
223#define IWL_RS_GOOD_RATIO 12800 /* 100% */
224#define IWL_RATE_SCALE_SWITCH 10880 /* 85% */
225#define IWL_RATE_HIGH_TH 10880 /* 85% */
226#define IWL_RATE_INCREASE_TH 6400 /* 50% */
227#define IWL_RATE_DECREASE_TH 1920 /* 15% */
228
229/* possible actions when in legacy mode */
230#define IWL_LEGACY_SWITCH_ANTENNA1 0
231#define IWL_LEGACY_SWITCH_ANTENNA2 1
232#define IWL_LEGACY_SWITCH_SISO 2
233#define IWL_LEGACY_SWITCH_MIMO2_AB 3
234#define IWL_LEGACY_SWITCH_MIMO2_AC 4
235#define IWL_LEGACY_SWITCH_MIMO2_BC 5
236
237/* possible actions when in siso mode */
238#define IWL_SISO_SWITCH_ANTENNA1 0
239#define IWL_SISO_SWITCH_ANTENNA2 1
240#define IWL_SISO_SWITCH_MIMO2_AB 2
241#define IWL_SISO_SWITCH_MIMO2_AC 3
242#define IWL_SISO_SWITCH_MIMO2_BC 4
243#define IWL_SISO_SWITCH_GI 5
244
245/* possible actions when in mimo mode */
246#define IWL_MIMO2_SWITCH_ANTENNA1 0
247#define IWL_MIMO2_SWITCH_ANTENNA2 1
248#define IWL_MIMO2_SWITCH_SISO_A 2
249#define IWL_MIMO2_SWITCH_SISO_B 3
250#define IWL_MIMO2_SWITCH_SISO_C 4
251#define IWL_MIMO2_SWITCH_GI 5
252
253#define IWL_MAX_SEARCH IWL_MIMO2_SWITCH_GI
254
255#define IWL_ACTION_LIMIT 3 /* # possible actions */
256
257#define LQ_SIZE 2 /* 2 mode tables: "Active" and "Search" */
258
259/* load per tid defines for A-MPDU activation */
260#define IWL_AGG_TPT_THREHOLD 0
261#define IWL_AGG_LOAD_THRESHOLD 10
262#define IWL_AGG_ALL_TID 0xff
263#define TID_QUEUE_CELL_SPACING 50 /*mS */
264#define TID_QUEUE_MAX_SIZE 20
265#define TID_ROUND_VALUE 5 /* mS */
266#define TID_MAX_LOAD_COUNT 8
267
268#define TID_MAX_TIME_DIFF ((TID_QUEUE_MAX_SIZE - 1) * TID_QUEUE_CELL_SPACING)
269#define TIME_WRAP_AROUND(x, y) (((y) > (x)) ? (y) - (x) : (0-(x)) + (y))
270
271extern const struct iwl_rate_info iwl_rates[IWL_RATE_COUNT];
272
273enum iwl_table_type {
274 LQ_NONE,
275 LQ_G, /* legacy types */
276 LQ_A,
277 LQ_SISO, /* high-throughput types */
278 LQ_MIMO2,
279 LQ_MAX,
280};
281
282#define is_legacy(tbl) (((tbl) == LQ_G) || ((tbl) == LQ_A))
283#define is_siso(tbl) ((tbl) == LQ_SISO)
284#define is_mimo2(tbl) ((tbl) == LQ_MIMO2)
285#define is_mimo(tbl) (is_mimo2(tbl))
286#define is_Ht(tbl) (is_siso(tbl) || is_mimo(tbl))
287#define is_a_band(tbl) ((tbl) == LQ_A)
288#define is_g_and(tbl) ((tbl) == LQ_G)
289
290#define ANT_NONE 0x0
291#define ANT_A BIT(0)
292#define ANT_B BIT(1)
293#define ANT_AB (ANT_A | ANT_B)
294#define ANT_C BIT(2)
295#define ANT_AC (ANT_A | ANT_C)
296#define ANT_BC (ANT_B | ANT_C)
297#define ANT_ABC (ANT_AB | ANT_C)
298
299#define IWL_MAX_MCS_DISPLAY_SIZE 12
300
301struct iwl_rate_mcs_info {
302 char mbps[IWL_MAX_MCS_DISPLAY_SIZE];
303 char mcs[IWL_MAX_MCS_DISPLAY_SIZE];
304};
305
306/**
307 * struct iwl_rate_scale_data -- tx success history for one rate
308 */
309struct iwl_rate_scale_data {
310 u64 data; /* bitmap of successful frames */
311 s32 success_counter; /* number of frames successful */
312 s32 success_ratio; /* per-cent * 128 */
313 s32 counter; /* number of frames attempted */
314 s32 average_tpt; /* success ratio * expected throughput */
315 unsigned long stamp;
316};
317
318/**
319 * struct iwl_scale_tbl_info -- tx params and success history for all rates
320 *
321 * There are two of these in struct iwl_lq_sta,
322 * one for "active", and one for "search".
323 */
324struct iwl_scale_tbl_info {
325 enum iwl_table_type lq_type;
326 u8 ant_type;
327 u8 is_SGI; /* 1 = short guard interval */
328 u8 is_ht40; /* 1 = 40 MHz channel width */
329 u8 is_dup; /* 1 = duplicated data streams */
330 u8 action; /* change modulation; IWL_[LEGACY/SISO/MIMO]_SWITCH_* */
331 u8 max_search; /* maximun number of tables we can search */
332 s32 *expected_tpt; /* throughput metrics; expected_tpt_G, etc. */
333 u32 current_rate; /* rate_n_flags, uCode API format */
334 struct iwl_rate_scale_data win[IWL_RATE_COUNT]; /* rate histories */
335};
336
337struct iwl_traffic_load {
338 unsigned long time_stamp; /* age of the oldest statistics */
339 u32 packet_count[TID_QUEUE_MAX_SIZE]; /* packet count in this time
340 * slice */
341 u32 total; /* total num of packets during the
342 * last TID_MAX_TIME_DIFF */
343 u8 queue_count; /* number of queues that has
344 * been used since the last cleanup */
345 u8 head; /* start of the circular buffer */
346};
347
348/**
349 * struct iwl_lq_sta -- driver's rate scaling private structure
350 *
351 * Pointer to this gets passed back and forth between driver and mac80211.
352 */
353struct iwl_lq_sta {
354 u8 active_tbl; /* index of active table, range 0-1 */
355 u8 enable_counter; /* indicates HT mode */
356 u8 stay_in_tbl; /* 1: disallow, 0: allow search for new mode */
357 u8 search_better_tbl; /* 1: currently trying alternate mode */
358 s32 last_tpt;
359
360 /* The following determine when to search for a new mode */
361 u32 table_count_limit;
362 u32 max_failure_limit; /* # failed frames before new search */
363 u32 max_success_limit; /* # successful frames before new search */
364 u32 table_count;
365 u32 total_failed; /* total failed frames, any/all rates */
366 u32 total_success; /* total successful frames, any/all rates */
367 u64 flush_timer; /* time staying in mode before new search */
368
369 u8 action_counter; /* # mode-switch actions tried */
370 u8 is_green;
371 u8 is_dup;
372 enum ieee80211_band band;
373
374 /* The following are bitmaps of rates; IWL_RATE_6M_MASK, etc. */
375 u32 supp_rates;
376 u16 active_legacy_rate;
377 u16 active_siso_rate;
378 u16 active_mimo2_rate;
379 s8 max_rate_idx; /* Max rate set by user */
380 u8 missed_rate_counter;
381
382 struct iwl_link_quality_cmd lq;
383 struct iwl_scale_tbl_info lq_info[LQ_SIZE]; /* "active", "search" */
384 struct iwl_traffic_load load[TID_MAX_LOAD_COUNT];
385 u8 tx_agg_tid_en;
386#ifdef CONFIG_MAC80211_DEBUGFS
387 struct dentry *rs_sta_dbgfs_scale_table_file;
388 struct dentry *rs_sta_dbgfs_stats_table_file;
389 struct dentry *rs_sta_dbgfs_rate_scale_data_file;
390 struct dentry *rs_sta_dbgfs_tx_agg_tid_en_file;
391 u32 dbg_fixed_rate;
392#endif
393 struct iwl_priv *drv;
394
395 /* used to be in sta_info */
396 int last_txrate_idx;
397 /* last tx rate_n_flags */
398 u32 last_rate_n_flags;
399 /* packets destined for this STA are aggregated */
400 u8 is_agg;
401};
402
403static inline u8 iwl4965_num_of_ant(u8 mask)
404{
405 return !!((mask) & ANT_A) +
406 !!((mask) & ANT_B) +
407 !!((mask) & ANT_C);
408}
409
410static inline u8 iwl4965_first_antenna(u8 mask)
411{
412 if (mask & ANT_A)
413 return ANT_A;
414 if (mask & ANT_B)
415 return ANT_B;
416 return ANT_C;
417}
418
419
420/**
421 * iwl3945_rate_scale_init - Initialize the rate scale table based on assoc info
422 *
423 * The specific throughput table used is based on the type of network
424 * the associated with, including A, B, G, and G w/ TGG protection
425 */
426extern void iwl3945_rate_scale_init(struct ieee80211_hw *hw, s32 sta_id);
427
428/* Initialize station's rate scaling information after adding station */
429extern void iwl4965_rs_rate_init(struct iwl_priv *priv,
430 struct ieee80211_sta *sta, u8 sta_id);
431extern void iwl3945_rs_rate_init(struct iwl_priv *priv,
432 struct ieee80211_sta *sta, u8 sta_id);
433
434/**
435 * iwl_rate_control_register - Register the rate control algorithm callbacks
436 *
437 * Since the rate control algorithm is hardware specific, there is no need
438 * or reason to place it as a stand alone module. The driver can call
439 * iwl_rate_control_register in order to register the rate control callbacks
440 * with the mac80211 subsystem. This should be performed prior to calling
441 * ieee80211_register_hw
442 *
443 */
444extern int iwl4965_rate_control_register(void);
445extern int iwl3945_rate_control_register(void);
446
447/**
448 * iwl_rate_control_unregister - Unregister the rate control callbacks
449 *
450 * This should be called after calling ieee80211_unregister_hw, but before
451 * the driver is unloaded.
452 */
453extern void iwl4965_rate_control_unregister(void);
454extern void iwl3945_rate_control_unregister(void);
455
456#endif /* __iwl_legacy_rs__ */
diff --git a/drivers/net/wireless/iwlegacy/iwl-power.c b/drivers/net/wireless/iwlegacy/iwl-power.c
new file mode 100644
index 000000000000..903ef0d6d6cb
--- /dev/null
+++ b/drivers/net/wireless/iwlegacy/iwl-power.c
@@ -0,0 +1,165 @@
1/******************************************************************************
2 *
3 * Copyright(c) 2007 - 2011 Intel Corporation. All rights reserved.
4 *
5 * Portions of this file are derived from the ipw3945 project, as well
6 * as portions of the ieee80211 subsystem header files.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of version 2 of the GNU General Public License as
10 * published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it will be useful, but WITHOUT
13 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 * more details.
16 *
17 * You should have received a copy of the GNU General Public License along with
18 * this program; if not, write to the Free Software Foundation, Inc.,
19 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
20 *
21 * The full GNU General Public License is included in this distribution in the
22 * file called LICENSE.
23 *
24 * Contact Information:
25 * Intel Linux Wireless <ilw@linux.intel.com>
26 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
27 *****************************************************************************/
28
29
30#include <linux/kernel.h>
31#include <linux/module.h>
32#include <linux/slab.h>
33#include <linux/init.h>
34
35#include <net/mac80211.h>
36
37#include "iwl-eeprom.h"
38#include "iwl-dev.h"
39#include "iwl-core.h"
40#include "iwl-io.h"
41#include "iwl-commands.h"
42#include "iwl-debug.h"
43#include "iwl-power.h"
44
45/*
46 * Setting power level allows the card to go to sleep when not busy.
47 *
48 * We calculate a sleep command based on the required latency, which
49 * we get from mac80211. In order to handle thermal throttling, we can
50 * also use pre-defined power levels.
51 */
52
53/*
54 * This defines the old power levels. They are still used by default
55 * (level 1) and for thermal throttle (levels 3 through 5)
56 */
57
58struct iwl_power_vec_entry {
59 struct iwl_powertable_cmd cmd;
60 u8 no_dtim; /* number of skip dtim */
61};
62
63static void iwl_legacy_power_sleep_cam_cmd(struct iwl_priv *priv,
64 struct iwl_powertable_cmd *cmd)
65{
66 memset(cmd, 0, sizeof(*cmd));
67
68 if (priv->power_data.pci_pm)
69 cmd->flags |= IWL_POWER_PCI_PM_MSK;
70
71 IWL_DEBUG_POWER(priv, "Sleep command for CAM\n");
72}
73
74static int
75iwl_legacy_set_power(struct iwl_priv *priv, struct iwl_powertable_cmd *cmd)
76{
77 IWL_DEBUG_POWER(priv, "Sending power/sleep command\n");
78 IWL_DEBUG_POWER(priv, "Flags value = 0x%08X\n", cmd->flags);
79 IWL_DEBUG_POWER(priv, "Tx timeout = %u\n",
80 le32_to_cpu(cmd->tx_data_timeout));
81 IWL_DEBUG_POWER(priv, "Rx timeout = %u\n",
82 le32_to_cpu(cmd->rx_data_timeout));
83 IWL_DEBUG_POWER(priv,
84 "Sleep interval vector = { %d , %d , %d , %d , %d }\n",
85 le32_to_cpu(cmd->sleep_interval[0]),
86 le32_to_cpu(cmd->sleep_interval[1]),
87 le32_to_cpu(cmd->sleep_interval[2]),
88 le32_to_cpu(cmd->sleep_interval[3]),
89 le32_to_cpu(cmd->sleep_interval[4]));
90
91 return iwl_legacy_send_cmd_pdu(priv, POWER_TABLE_CMD,
92 sizeof(struct iwl_powertable_cmd), cmd);
93}
94
95int
96iwl_legacy_power_set_mode(struct iwl_priv *priv, struct iwl_powertable_cmd *cmd,
97 bool force)
98{
99 int ret;
100 bool update_chains;
101
102 lockdep_assert_held(&priv->mutex);
103
104 /* Don't update the RX chain when chain noise calibration is running */
105 update_chains = priv->chain_noise_data.state == IWL_CHAIN_NOISE_DONE ||
106 priv->chain_noise_data.state == IWL_CHAIN_NOISE_ALIVE;
107
108 if (!memcmp(&priv->power_data.sleep_cmd, cmd, sizeof(*cmd)) && !force)
109 return 0;
110
111 if (!iwl_legacy_is_ready_rf(priv))
112 return -EIO;
113
114 /* scan complete use sleep_power_next, need to be updated */
115 memcpy(&priv->power_data.sleep_cmd_next, cmd, sizeof(*cmd));
116 if (test_bit(STATUS_SCANNING, &priv->status) && !force) {
117 IWL_DEBUG_INFO(priv, "Defer power set mode while scanning\n");
118 return 0;
119 }
120
121 if (cmd->flags & IWL_POWER_DRIVER_ALLOW_SLEEP_MSK)
122 set_bit(STATUS_POWER_PMI, &priv->status);
123
124 ret = iwl_legacy_set_power(priv, cmd);
125 if (!ret) {
126 if (!(cmd->flags & IWL_POWER_DRIVER_ALLOW_SLEEP_MSK))
127 clear_bit(STATUS_POWER_PMI, &priv->status);
128
129 if (priv->cfg->ops->lib->update_chain_flags && update_chains)
130 priv->cfg->ops->lib->update_chain_flags(priv);
131 else if (priv->cfg->ops->lib->update_chain_flags)
132 IWL_DEBUG_POWER(priv,
133 "Cannot update the power, chain noise "
134 "calibration running: %d\n",
135 priv->chain_noise_data.state);
136
137 memcpy(&priv->power_data.sleep_cmd, cmd, sizeof(*cmd));
138 } else
139 IWL_ERR(priv, "set power fail, ret = %d", ret);
140
141 return ret;
142}
143
144int iwl_legacy_power_update_mode(struct iwl_priv *priv, bool force)
145{
146 struct iwl_powertable_cmd cmd;
147
148 iwl_legacy_power_sleep_cam_cmd(priv, &cmd);
149 return iwl_legacy_power_set_mode(priv, &cmd, force);
150}
151EXPORT_SYMBOL(iwl_legacy_power_update_mode);
152
153/* initialize to default */
154void iwl_legacy_power_initialize(struct iwl_priv *priv)
155{
156 u16 lctl = iwl_legacy_pcie_link_ctl(priv);
157
158 priv->power_data.pci_pm = !(lctl & PCI_CFG_LINK_CTRL_VAL_L0S_EN);
159
160 priv->power_data.debug_sleep_level_override = -1;
161
162 memset(&priv->power_data.sleep_cmd, 0,
163 sizeof(priv->power_data.sleep_cmd));
164}
165EXPORT_SYMBOL(iwl_legacy_power_initialize);
diff --git a/drivers/net/wireless/iwlegacy/iwl-power.h b/drivers/net/wireless/iwlegacy/iwl-power.h
new file mode 100644
index 000000000000..d30b36acdc4a
--- /dev/null
+++ b/drivers/net/wireless/iwlegacy/iwl-power.h
@@ -0,0 +1,55 @@
1/******************************************************************************
2 *
3 * Copyright(c) 2007 - 2011 Intel Corporation. All rights reserved.
4 *
5 * Portions of this file are derived from the ipw3945 project, as well
6 * as portions of the ieee80211 subsystem header files.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of version 2 of the GNU General Public License as
10 * published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it will be useful, but WITHOUT
13 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 * more details.
16 *
17 * You should have received a copy of the GNU General Public License along with
18 * this program; if not, write to the Free Software Foundation, Inc.,
19 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
20 *
21 * The full GNU General Public License is included in this distribution in the
22 * file called LICENSE.
23 *
24 * Contact Information:
25 * Intel Linux Wireless <ilw@linux.intel.com>
26 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
27 *****************************************************************************/
28#ifndef __iwl_legacy_power_setting_h__
29#define __iwl_legacy_power_setting_h__
30
31#include "iwl-commands.h"
32
33enum iwl_power_level {
34 IWL_POWER_INDEX_1,
35 IWL_POWER_INDEX_2,
36 IWL_POWER_INDEX_3,
37 IWL_POWER_INDEX_4,
38 IWL_POWER_INDEX_5,
39 IWL_POWER_NUM
40};
41
42struct iwl_power_mgr {
43 struct iwl_powertable_cmd sleep_cmd;
44 struct iwl_powertable_cmd sleep_cmd_next;
45 int debug_sleep_level_override;
46 bool pci_pm;
47};
48
49int
50iwl_legacy_power_set_mode(struct iwl_priv *priv, struct iwl_powertable_cmd *cmd,
51 bool force);
52int iwl_legacy_power_update_mode(struct iwl_priv *priv, bool force);
53void iwl_legacy_power_initialize(struct iwl_priv *priv);
54
55#endif /* __iwl_legacy_power_setting_h__ */
diff --git a/drivers/net/wireless/iwlegacy/iwl-prph.h b/drivers/net/wireless/iwlegacy/iwl-prph.h
new file mode 100644
index 000000000000..30a493003ab0
--- /dev/null
+++ b/drivers/net/wireless/iwlegacy/iwl-prph.h
@@ -0,0 +1,523 @@
1/******************************************************************************
2 *
3 * This file is provided under a dual BSD/GPLv2 license. When using or
4 * redistributing this file, you may do so under either license.
5 *
6 * GPL LICENSE SUMMARY
7 *
8 * Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved.
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of version 2 of the GNU General Public License as
12 * published by the Free Software Foundation.
13 *
14 * This program is distributed in the hope that it will be useful, but
15 * WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
22 * USA
23 *
24 * The full GNU General Public License is included in this distribution
25 * in the file called LICENSE.GPL.
26 *
27 * Contact Information:
28 * Intel Linux Wireless <ilw@linux.intel.com>
29 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
30 *
31 * BSD LICENSE
32 *
33 * Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved.
34 * All rights reserved.
35 *
36 * Redistribution and use in source and binary forms, with or without
37 * modification, are permitted provided that the following conditions
38 * are met:
39 *
40 * * Redistributions of source code must retain the above copyright
41 * notice, this list of conditions and the following disclaimer.
42 * * Redistributions in binary form must reproduce the above copyright
43 * notice, this list of conditions and the following disclaimer in
44 * the documentation and/or other materials provided with the
45 * distribution.
46 * * Neither the name Intel Corporation nor the names of its
47 * contributors may be used to endorse or promote products derived
48 * from this software without specific prior written permission.
49 *
50 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
51 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
52 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
53 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
54 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
55 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
56 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
57 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
58 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
59 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
60 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
61 *****************************************************************************/
62
63#ifndef __iwl_legacy_prph_h__
64#define __iwl_legacy_prph_h__
65
66/*
67 * Registers in this file are internal, not PCI bus memory mapped.
68 * Driver accesses these via HBUS_TARG_PRPH_* registers.
69 */
70#define PRPH_BASE (0x00000)
71#define PRPH_END (0xFFFFF)
72
73/* APMG (power management) constants */
74#define APMG_BASE (PRPH_BASE + 0x3000)
75#define APMG_CLK_CTRL_REG (APMG_BASE + 0x0000)
76#define APMG_CLK_EN_REG (APMG_BASE + 0x0004)
77#define APMG_CLK_DIS_REG (APMG_BASE + 0x0008)
78#define APMG_PS_CTRL_REG (APMG_BASE + 0x000c)
79#define APMG_PCIDEV_STT_REG (APMG_BASE + 0x0010)
80#define APMG_RFKILL_REG (APMG_BASE + 0x0014)
81#define APMG_RTC_INT_STT_REG (APMG_BASE + 0x001c)
82#define APMG_RTC_INT_MSK_REG (APMG_BASE + 0x0020)
83#define APMG_DIGITAL_SVR_REG (APMG_BASE + 0x0058)
84#define APMG_ANALOG_SVR_REG (APMG_BASE + 0x006C)
85
86#define APMS_CLK_VAL_MRB_FUNC_MODE (0x00000001)
87#define APMG_CLK_VAL_DMA_CLK_RQT (0x00000200)
88#define APMG_CLK_VAL_BSM_CLK_RQT (0x00000800)
89
90#define APMG_PS_CTRL_EARLY_PWR_OFF_RESET_DIS (0x00400000)
91#define APMG_PS_CTRL_VAL_RESET_REQ (0x04000000)
92#define APMG_PS_CTRL_MSK_PWR_SRC (0x03000000)
93#define APMG_PS_CTRL_VAL_PWR_SRC_VMAIN (0x00000000)
94#define APMG_PS_CTRL_VAL_PWR_SRC_MAX (0x01000000) /* 3945 only */
95#define APMG_PS_CTRL_VAL_PWR_SRC_VAUX (0x02000000)
96#define APMG_SVR_VOLTAGE_CONFIG_BIT_MSK (0x000001E0) /* bit 8:5 */
97#define APMG_SVR_DIGITAL_VOLTAGE_1_32 (0x00000060)
98
99#define APMG_PCIDEV_STT_VAL_L1_ACT_DIS (0x00000800)
100
101/**
102 * BSM (Bootstrap State Machine)
103 *
104 * The Bootstrap State Machine (BSM) stores a short bootstrap uCode program
105 * in special SRAM that does not power down when the embedded control
106 * processor is sleeping (e.g. for periodic power-saving shutdowns of radio).
107 *
108 * When powering back up after sleeps (or during initial uCode load), the BSM
109 * internally loads the short bootstrap program from the special SRAM into the
110 * embedded processor's instruction SRAM, and starts the processor so it runs
111 * the bootstrap program.
112 *
113 * This bootstrap program loads (via PCI busmaster DMA) instructions and data
114 * images for a uCode program from host DRAM locations. The host driver
115 * indicates DRAM locations and sizes for instruction and data images via the
116 * four BSM_DRAM_* registers. Once the bootstrap program loads the new program,
117 * the new program starts automatically.
118 *
119 * The uCode used for open-source drivers includes two programs:
120 *
121 * 1) Initialization -- performs hardware calibration and sets up some
122 * internal data, then notifies host via "initialize alive" notification
123 * (struct iwl_init_alive_resp) that it has completed all of its work.
124 * After signal from host, it then loads and starts the runtime program.
125 * The initialization program must be used when initially setting up the
126 * NIC after loading the driver.
127 *
128 * 2) Runtime/Protocol -- performs all normal runtime operations. This
129 * notifies host via "alive" notification (struct iwl_alive_resp) that it
130 * is ready to be used.
131 *
132 * When initializing the NIC, the host driver does the following procedure:
133 *
134 * 1) Load bootstrap program (instructions only, no data image for bootstrap)
135 * into bootstrap memory. Use dword writes starting at BSM_SRAM_LOWER_BOUND
136 *
137 * 2) Point (via BSM_DRAM_*) to the "initialize" uCode data and instruction
138 * images in host DRAM.
139 *
140 * 3) Set up BSM to copy from BSM SRAM into uCode instruction SRAM when asked:
141 * BSM_WR_MEM_SRC_REG = 0
142 * BSM_WR_MEM_DST_REG = RTC_INST_LOWER_BOUND
143 * BSM_WR_MEM_DWCOUNT_REG = # dwords in bootstrap instruction image
144 *
145 * 4) Load bootstrap into instruction SRAM:
146 * BSM_WR_CTRL_REG = BSM_WR_CTRL_REG_BIT_START
147 *
148 * 5) Wait for load completion:
149 * Poll BSM_WR_CTRL_REG for BSM_WR_CTRL_REG_BIT_START = 0
150 *
151 * 6) Enable future boot loads whenever NIC's power management triggers it:
152 * BSM_WR_CTRL_REG = BSM_WR_CTRL_REG_BIT_START_EN
153 *
154 * 7) Start the NIC by removing all reset bits:
155 * CSR_RESET = 0
156 *
157 * The bootstrap uCode (already in instruction SRAM) loads initialization
158 * uCode. Initialization uCode performs data initialization, sends
159 * "initialize alive" notification to host, and waits for a signal from
160 * host to load runtime code.
161 *
162 * 4) Point (via BSM_DRAM_*) to the "runtime" uCode data and instruction
163 * images in host DRAM. The last register loaded must be the instruction
164 * byte count register ("1" in MSbit tells initialization uCode to load
165 * the runtime uCode):
166 * BSM_DRAM_INST_BYTECOUNT_REG = byte count | BSM_DRAM_INST_LOAD
167 *
168 * 5) Wait for "alive" notification, then issue normal runtime commands.
169 *
170 * Data caching during power-downs:
171 *
172 * Just before the embedded controller powers down (e.g for automatic
173 * power-saving modes, or for RFKILL), uCode stores (via PCI busmaster DMA)
174 * a current snapshot of the embedded processor's data SRAM into host DRAM.
175 * This caches the data while the embedded processor's memory is powered down.
176 * Location and size are controlled by BSM_DRAM_DATA_* registers.
177 *
178 * NOTE: Instruction SRAM does not need to be saved, since that doesn't
179 * change during operation; the original image (from uCode distribution
180 * file) can be used for reload.
181 *
182 * When powering back up, the BSM loads the bootstrap program. Bootstrap looks
183 * at the BSM_DRAM_* registers, which now point to the runtime instruction
184 * image and the cached (modified) runtime data (*not* the initialization
185 * uCode). Bootstrap reloads these runtime images into SRAM, and restarts the
186 * uCode from where it left off before the power-down.
187 *
188 * NOTE: Initialization uCode does *not* run as part of the save/restore
189 * procedure.
190 *
191 * This save/restore method is mostly for autonomous power management during
192 * normal operation (result of POWER_TABLE_CMD). Platform suspend/resume and
193 * RFKILL should use complete restarts (with total re-initialization) of uCode,
194 * allowing total shutdown (including BSM memory).
195 *
196 * Note that, during normal operation, the host DRAM that held the initial
197 * startup data for the runtime code is now being used as a backup data cache
198 * for modified data! If you need to completely re-initialize the NIC, make
199 * sure that you use the runtime data image from the uCode distribution file,
200 * not the modified/saved runtime data. You may want to store a separate
201 * "clean" runtime data image in DRAM to avoid disk reads of distribution file.
202 */
203
204/* BSM bit fields */
205#define BSM_WR_CTRL_REG_BIT_START (0x80000000) /* start boot load now */
206#define BSM_WR_CTRL_REG_BIT_START_EN (0x40000000) /* enable boot after pwrup*/
207#define BSM_DRAM_INST_LOAD (0x80000000) /* start program load now */
208
209/* BSM addresses */
210#define BSM_BASE (PRPH_BASE + 0x3400)
211#define BSM_END (PRPH_BASE + 0x3800)
212
213#define BSM_WR_CTRL_REG (BSM_BASE + 0x000) /* ctl and status */
214#define BSM_WR_MEM_SRC_REG (BSM_BASE + 0x004) /* source in BSM mem */
215#define BSM_WR_MEM_DST_REG (BSM_BASE + 0x008) /* dest in SRAM mem */
216#define BSM_WR_DWCOUNT_REG (BSM_BASE + 0x00C) /* bytes */
217#define BSM_WR_STATUS_REG (BSM_BASE + 0x010) /* bit 0: 1 == done */
218
219/*
220 * Pointers and size regs for bootstrap load and data SRAM save/restore.
221 * NOTE: 3945 pointers use bits 31:0 of DRAM address.
222 * 4965 pointers use bits 35:4 of DRAM address.
223 */
224#define BSM_DRAM_INST_PTR_REG (BSM_BASE + 0x090)
225#define BSM_DRAM_INST_BYTECOUNT_REG (BSM_BASE + 0x094)
226#define BSM_DRAM_DATA_PTR_REG (BSM_BASE + 0x098)
227#define BSM_DRAM_DATA_BYTECOUNT_REG (BSM_BASE + 0x09C)
228
229/*
230 * BSM special memory, stays powered on during power-save sleeps.
231 * Read/write, address range from LOWER_BOUND to (LOWER_BOUND + SIZE -1)
232 */
233#define BSM_SRAM_LOWER_BOUND (PRPH_BASE + 0x3800)
234#define BSM_SRAM_SIZE (1024) /* bytes */
235
236
237/* 3945 Tx scheduler registers */
238#define ALM_SCD_BASE (PRPH_BASE + 0x2E00)
239#define ALM_SCD_MODE_REG (ALM_SCD_BASE + 0x000)
240#define ALM_SCD_ARASTAT_REG (ALM_SCD_BASE + 0x004)
241#define ALM_SCD_TXFACT_REG (ALM_SCD_BASE + 0x010)
242#define ALM_SCD_TXF4MF_REG (ALM_SCD_BASE + 0x014)
243#define ALM_SCD_TXF5MF_REG (ALM_SCD_BASE + 0x020)
244#define ALM_SCD_SBYP_MODE_1_REG (ALM_SCD_BASE + 0x02C)
245#define ALM_SCD_SBYP_MODE_2_REG (ALM_SCD_BASE + 0x030)
246
247/**
248 * Tx Scheduler
249 *
250 * The Tx Scheduler selects the next frame to be transmitted, choosing TFDs
251 * (Transmit Frame Descriptors) from up to 16 circular Tx queues resident in
252 * host DRAM. It steers each frame's Tx command (which contains the frame
253 * data) into one of up to 7 prioritized Tx DMA FIFO channels within the
254 * device. A queue maps to only one (selectable by driver) Tx DMA channel,
255 * but one DMA channel may take input from several queues.
256 *
257 * Tx DMA FIFOs have dedicated purposes. For 4965, they are used as follows
258 * (cf. default_queue_to_tx_fifo in iwl-4965.c):
259 *
260 * 0 -- EDCA BK (background) frames, lowest priority
261 * 1 -- EDCA BE (best effort) frames, normal priority
262 * 2 -- EDCA VI (video) frames, higher priority
263 * 3 -- EDCA VO (voice) and management frames, highest priority
264 * 4 -- Commands (e.g. RXON, etc.)
265 * 5 -- unused (HCCA)
266 * 6 -- unused (HCCA)
267 * 7 -- not used by driver (device-internal only)
268 *
269 *
270 * Driver should normally map queues 0-6 to Tx DMA/FIFO channels 0-6.
271 * In addition, driver can map the remaining queues to Tx DMA/FIFO
272 * channels 0-3 to support 11n aggregation via EDCA DMA channels.
273 *
274 * The driver sets up each queue to work in one of two modes:
275 *
276 * 1) Scheduler-Ack, in which the scheduler automatically supports a
277 * block-ack (BA) window of up to 64 TFDs. In this mode, each queue
278 * contains TFDs for a unique combination of Recipient Address (RA)
279 * and Traffic Identifier (TID), that is, traffic of a given
280 * Quality-Of-Service (QOS) priority, destined for a single station.
281 *
282 * In scheduler-ack mode, the scheduler keeps track of the Tx status of
283 * each frame within the BA window, including whether it's been transmitted,
284 * and whether it's been acknowledged by the receiving station. The device
285 * automatically processes block-acks received from the receiving STA,
286 * and reschedules un-acked frames to be retransmitted (successful
287 * Tx completion may end up being out-of-order).
288 *
289 * The driver must maintain the queue's Byte Count table in host DRAM
290 * (struct iwl4965_sched_queue_byte_cnt_tbl) for this mode.
291 * This mode does not support fragmentation.
292 *
293 * 2) FIFO (a.k.a. non-Scheduler-ACK), in which each TFD is processed in order.
294 * The device may automatically retry Tx, but will retry only one frame
295 * at a time, until receiving ACK from receiving station, or reaching
296 * retry limit and giving up.
297 *
298 * The command queue (#4/#9) must use this mode!
299 * This mode does not require use of the Byte Count table in host DRAM.
300 *
301 * Driver controls scheduler operation via 3 means:
302 * 1) Scheduler registers
303 * 2) Shared scheduler data base in internal 4956 SRAM
304 * 3) Shared data in host DRAM
305 *
306 * Initialization:
307 *
308 * When loading, driver should allocate memory for:
309 * 1) 16 TFD circular buffers, each with space for (typically) 256 TFDs.
310 * 2) 16 Byte Count circular buffers in 16 KBytes contiguous memory
311 * (1024 bytes for each queue).
312 *
313 * After receiving "Alive" response from uCode, driver must initialize
314 * the scheduler (especially for queue #4/#9, the command queue, otherwise
315 * the driver can't issue commands!):
316 */
317
318/**
319 * Max Tx window size is the max number of contiguous TFDs that the scheduler
320 * can keep track of at one time when creating block-ack chains of frames.
321 * Note that "64" matches the number of ack bits in a block-ack packet.
322 * Driver should use SCD_WIN_SIZE and SCD_FRAME_LIMIT values to initialize
323 * IWL49_SCD_CONTEXT_QUEUE_OFFSET(x) values.
324 */
325#define SCD_WIN_SIZE 64
326#define SCD_FRAME_LIMIT 64
327
328/* SCD registers are internal, must be accessed via HBUS_TARG_PRPH regs */
329#define IWL49_SCD_START_OFFSET 0xa02c00
330
331/*
332 * 4965 tells driver SRAM address for internal scheduler structs via this reg.
333 * Value is valid only after "Alive" response from uCode.
334 */
335#define IWL49_SCD_SRAM_BASE_ADDR (IWL49_SCD_START_OFFSET + 0x0)
336
337/*
338 * Driver may need to update queue-empty bits after changing queue's
339 * write and read pointers (indexes) during (re-)initialization (i.e. when
340 * scheduler is not tracking what's happening).
341 * Bit fields:
342 * 31-16: Write mask -- 1: update empty bit, 0: don't change empty bit
343 * 15-00: Empty state, one for each queue -- 1: empty, 0: non-empty
344 * NOTE: This register is not used by Linux driver.
345 */
346#define IWL49_SCD_EMPTY_BITS (IWL49_SCD_START_OFFSET + 0x4)
347
348/*
349 * Physical base address of array of byte count (BC) circular buffers (CBs).
350 * Each Tx queue has a BC CB in host DRAM to support Scheduler-ACK mode.
351 * This register points to BC CB for queue 0, must be on 1024-byte boundary.
352 * Others are spaced by 1024 bytes.
353 * Each BC CB is 2 bytes * (256 + 64) = 740 bytes, followed by 384 bytes pad.
354 * (Index into a queue's BC CB) = (index into queue's TFD CB) = (SSN & 0xff).
355 * Bit fields:
356 * 25-00: Byte Count CB physical address [35:10], must be 1024-byte aligned.
357 */
358#define IWL49_SCD_DRAM_BASE_ADDR (IWL49_SCD_START_OFFSET + 0x10)
359
360/*
361 * Enables any/all Tx DMA/FIFO channels.
362 * Scheduler generates requests for only the active channels.
363 * Set this to 0xff to enable all 8 channels (normal usage).
364 * Bit fields:
365 * 7- 0: Enable (1), disable (0), one bit for each channel 0-7
366 */
367#define IWL49_SCD_TXFACT (IWL49_SCD_START_OFFSET + 0x1c)
368/*
369 * Queue (x) Write Pointers (indexes, really!), one for each Tx queue.
370 * Initialized and updated by driver as new TFDs are added to queue.
371 * NOTE: If using Block Ack, index must correspond to frame's
372 * Start Sequence Number; index = (SSN & 0xff)
373 * NOTE: Alternative to HBUS_TARG_WRPTR, which is what Linux driver uses?
374 */
375#define IWL49_SCD_QUEUE_WRPTR(x) (IWL49_SCD_START_OFFSET + 0x24 + (x) * 4)
376
377/*
378 * Queue (x) Read Pointers (indexes, really!), one for each Tx queue.
379 * For FIFO mode, index indicates next frame to transmit.
380 * For Scheduler-ACK mode, index indicates first frame in Tx window.
381 * Initialized by driver, updated by scheduler.
382 */
383#define IWL49_SCD_QUEUE_RDPTR(x) (IWL49_SCD_START_OFFSET + 0x64 + (x) * 4)
384
385/*
386 * Select which queues work in chain mode (1) vs. not (0).
387 * Use chain mode to build chains of aggregated frames.
388 * Bit fields:
389 * 31-16: Reserved
390 * 15-00: Mode, one bit for each queue -- 1: Chain mode, 0: one-at-a-time
391 * NOTE: If driver sets up queue for chain mode, it should be also set up
392 * Scheduler-ACK mode as well, via SCD_QUEUE_STATUS_BITS(x).
393 */
394#define IWL49_SCD_QUEUECHAIN_SEL (IWL49_SCD_START_OFFSET + 0xd0)
395
396/*
397 * Select which queues interrupt driver when scheduler increments
398 * a queue's read pointer (index).
399 * Bit fields:
400 * 31-16: Reserved
401 * 15-00: Interrupt enable, one bit for each queue -- 1: enabled, 0: disabled
402 * NOTE: This functionality is apparently a no-op; driver relies on interrupts
403 * from Rx queue to read Tx command responses and update Tx queues.
404 */
405#define IWL49_SCD_INTERRUPT_MASK (IWL49_SCD_START_OFFSET + 0xe4)
406
407/*
408 * Queue search status registers. One for each queue.
409 * Sets up queue mode and assigns queue to Tx DMA channel.
410 * Bit fields:
411 * 19-10: Write mask/enable bits for bits 0-9
412 * 9: Driver should init to "0"
413 * 8: Scheduler-ACK mode (1), non-Scheduler-ACK (i.e. FIFO) mode (0).
414 * Driver should init to "1" for aggregation mode, or "0" otherwise.
415 * 7-6: Driver should init to "0"
416 * 5: Window Size Left; indicates whether scheduler can request
417 * another TFD, based on window size, etc. Driver should init
418 * this bit to "1" for aggregation mode, or "0" for non-agg.
419 * 4-1: Tx FIFO to use (range 0-7).
420 * 0: Queue is active (1), not active (0).
421 * Other bits should be written as "0"
422 *
423 * NOTE: If enabling Scheduler-ACK mode, chain mode should also be enabled
424 * via SCD_QUEUECHAIN_SEL.
425 */
426#define IWL49_SCD_QUEUE_STATUS_BITS(x)\
427 (IWL49_SCD_START_OFFSET + 0x104 + (x) * 4)
428
429/* Bit field positions */
430#define IWL49_SCD_QUEUE_STTS_REG_POS_ACTIVE (0)
431#define IWL49_SCD_QUEUE_STTS_REG_POS_TXF (1)
432#define IWL49_SCD_QUEUE_STTS_REG_POS_WSL (5)
433#define IWL49_SCD_QUEUE_STTS_REG_POS_SCD_ACK (8)
434
435/* Write masks */
436#define IWL49_SCD_QUEUE_STTS_REG_POS_SCD_ACT_EN (10)
437#define IWL49_SCD_QUEUE_STTS_REG_MSK (0x0007FC00)
438
439/**
440 * 4965 internal SRAM structures for scheduler, shared with driver ...
441 *
442 * Driver should clear and initialize the following areas after receiving
443 * "Alive" response from 4965 uCode, i.e. after initial
444 * uCode load, or after a uCode load done for error recovery:
445 *
446 * SCD_CONTEXT_DATA_OFFSET (size 128 bytes)
447 * SCD_TX_STTS_BITMAP_OFFSET (size 256 bytes)
448 * SCD_TRANSLATE_TBL_OFFSET (size 32 bytes)
449 *
450 * Driver accesses SRAM via HBUS_TARG_MEM_* registers.
451 * Driver reads base address of this scheduler area from SCD_SRAM_BASE_ADDR.
452 * All OFFSET values must be added to this base address.
453 */
454
455/*
456 * Queue context. One 8-byte entry for each of 16 queues.
457 *
458 * Driver should clear this entire area (size 0x80) to 0 after receiving
459 * "Alive" notification from uCode. Additionally, driver should init
460 * each queue's entry as follows:
461 *
462 * LS Dword bit fields:
463 * 0-06: Max Tx window size for Scheduler-ACK. Driver should init to 64.
464 *
465 * MS Dword bit fields:
466 * 16-22: Frame limit. Driver should init to 10 (0xa).
467 *
468 * Driver should init all other bits to 0.
469 *
470 * Init must be done after driver receives "Alive" response from 4965 uCode,
471 * and when setting up queue for aggregation.
472 */
473#define IWL49_SCD_CONTEXT_DATA_OFFSET 0x380
474#define IWL49_SCD_CONTEXT_QUEUE_OFFSET(x) \
475 (IWL49_SCD_CONTEXT_DATA_OFFSET + ((x) * 8))
476
477#define IWL49_SCD_QUEUE_CTX_REG1_WIN_SIZE_POS (0)
478#define IWL49_SCD_QUEUE_CTX_REG1_WIN_SIZE_MSK (0x0000007F)
479#define IWL49_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS (16)
480#define IWL49_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK (0x007F0000)
481
482/*
483 * Tx Status Bitmap
484 *
485 * Driver should clear this entire area (size 0x100) to 0 after receiving
486 * "Alive" notification from uCode. Area is used only by device itself;
487 * no other support (besides clearing) is required from driver.
488 */
489#define IWL49_SCD_TX_STTS_BITMAP_OFFSET 0x400
490
491/*
492 * RAxTID to queue translation mapping.
493 *
494 * When queue is in Scheduler-ACK mode, frames placed in a that queue must be
495 * for only one combination of receiver address (RA) and traffic ID (TID), i.e.
496 * one QOS priority level destined for one station (for this wireless link,
497 * not final destination). The SCD_TRANSLATE_TABLE area provides 16 16-bit
498 * mappings, one for each of the 16 queues. If queue is not in Scheduler-ACK
499 * mode, the device ignores the mapping value.
500 *
501 * Bit fields, for each 16-bit map:
502 * 15-9: Reserved, set to 0
503 * 8-4: Index into device's station table for recipient station
504 * 3-0: Traffic ID (tid), range 0-15
505 *
506 * Driver should clear this entire area (size 32 bytes) to 0 after receiving
507 * "Alive" notification from uCode. To update a 16-bit map value, driver
508 * must read a dword-aligned value from device SRAM, replace the 16-bit map
509 * value of interest, and write the dword value back into device SRAM.
510 */
511#define IWL49_SCD_TRANSLATE_TBL_OFFSET 0x500
512
513/* Find translation table dword to read/write for given queue */
514#define IWL49_SCD_TRANSLATE_TBL_OFFSET_QUEUE(x) \
515 ((IWL49_SCD_TRANSLATE_TBL_OFFSET + ((x) * 2)) & 0xfffffffc)
516
517#define IWL_SCD_TXFIFO_POS_TID (0)
518#define IWL_SCD_TXFIFO_POS_RA (4)
519#define IWL_SCD_QUEUE_RA_TID_MAP_RATID_MSK (0x01FF)
520
521/*********************** END TX SCHEDULER *************************************/
522
523#endif /* __iwl_legacy_prph_h__ */
diff --git a/drivers/net/wireless/iwlegacy/iwl-rx.c b/drivers/net/wireless/iwlegacy/iwl-rx.c
new file mode 100644
index 000000000000..654cf233a384
--- /dev/null
+++ b/drivers/net/wireless/iwlegacy/iwl-rx.c
@@ -0,0 +1,302 @@
1/******************************************************************************
2 *
3 * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved.
4 *
5 * Portions of this file are derived from the ipw3945 project, as well
6 * as portions of the ieee80211 subsystem header files.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of version 2 of the GNU General Public License as
10 * published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it will be useful, but WITHOUT
13 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 * more details.
16 *
17 * You should have received a copy of the GNU General Public License along with
18 * this program; if not, write to the Free Software Foundation, Inc.,
19 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
20 *
21 * The full GNU General Public License is included in this distribution in the
22 * file called LICENSE.
23 *
24 * Contact Information:
25 * Intel Linux Wireless <ilw@linux.intel.com>
26 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
27 *
28 *****************************************************************************/
29
30#include <linux/etherdevice.h>
31#include <linux/slab.h>
32#include <net/mac80211.h>
33#include <asm/unaligned.h>
34#include "iwl-eeprom.h"
35#include "iwl-dev.h"
36#include "iwl-core.h"
37#include "iwl-sta.h"
38#include "iwl-io.h"
39#include "iwl-helpers.h"
40/************************** RX-FUNCTIONS ****************************/
41/*
42 * Rx theory of operation
43 *
44 * Driver allocates a circular buffer of Receive Buffer Descriptors (RBDs),
45 * each of which point to Receive Buffers to be filled by the NIC. These get
46 * used not only for Rx frames, but for any command response or notification
47 * from the NIC. The driver and NIC manage the Rx buffers by means
48 * of indexes into the circular buffer.
49 *
50 * Rx Queue Indexes
51 * The host/firmware share two index registers for managing the Rx buffers.
52 *
53 * The READ index maps to the first position that the firmware may be writing
54 * to -- the driver can read up to (but not including) this position and get
55 * good data.
56 * The READ index is managed by the firmware once the card is enabled.
57 *
58 * The WRITE index maps to the last position the driver has read from -- the
59 * position preceding WRITE is the last slot the firmware can place a packet.
60 *
61 * The queue is empty (no good data) if WRITE = READ - 1, and is full if
62 * WRITE = READ.
63 *
64 * During initialization, the host sets up the READ queue position to the first
65 * INDEX position, and WRITE to the last (READ - 1 wrapped)
66 *
67 * When the firmware places a packet in a buffer, it will advance the READ index
68 * and fire the RX interrupt. The driver can then query the READ index and
69 * process as many packets as possible, moving the WRITE index forward as it
70 * resets the Rx queue buffers with new memory.
71 *
72 * The management in the driver is as follows:
73 * + A list of pre-allocated SKBs is stored in iwl->rxq->rx_free. When
74 * iwl->rxq->free_count drops to or below RX_LOW_WATERMARK, work is scheduled
75 * to replenish the iwl->rxq->rx_free.
76 * + In iwl_rx_replenish (scheduled) if 'processed' != 'read' then the
77 * iwl->rxq is replenished and the READ INDEX is updated (updating the
78 * 'processed' and 'read' driver indexes as well)
79 * + A received packet is processed and handed to the kernel network stack,
80 * detached from the iwl->rxq. The driver 'processed' index is updated.
81 * + The Host/Firmware iwl->rxq is replenished at tasklet time from the rx_free
82 * list. If there are no allocated buffers in iwl->rxq->rx_free, the READ
83 * INDEX is not incremented and iwl->status(RX_STALLED) is set. If there
84 * were enough free buffers and RX_STALLED is set it is cleared.
85 *
86 *
87 * Driver sequence:
88 *
89 * iwl_legacy_rx_queue_alloc() Allocates rx_free
90 * iwl_rx_replenish() Replenishes rx_free list from rx_used, and calls
91 * iwl_rx_queue_restock
92 * iwl_rx_queue_restock() Moves available buffers from rx_free into Rx
93 * queue, updates firmware pointers, and updates
94 * the WRITE index. If insufficient rx_free buffers
95 * are available, schedules iwl_rx_replenish
96 *
97 * -- enable interrupts --
98 * ISR - iwl_rx() Detach iwl_rx_mem_buffers from pool up to the
99 * READ INDEX, detaching the SKB from the pool.
100 * Moves the packet buffer from queue to rx_used.
101 * Calls iwl_rx_queue_restock to refill any empty
102 * slots.
103 * ...
104 *
105 */
106
107/**
108 * iwl_legacy_rx_queue_space - Return number of free slots available in queue.
109 */
110int iwl_legacy_rx_queue_space(const struct iwl_rx_queue *q)
111{
112 int s = q->read - q->write;
113 if (s <= 0)
114 s += RX_QUEUE_SIZE;
115 /* keep some buffer to not confuse full and empty queue */
116 s -= 2;
117 if (s < 0)
118 s = 0;
119 return s;
120}
121EXPORT_SYMBOL(iwl_legacy_rx_queue_space);
122
123/**
124 * iwl_legacy_rx_queue_update_write_ptr - Update the write pointer for the RX queue
125 */
126void
127iwl_legacy_rx_queue_update_write_ptr(struct iwl_priv *priv,
128 struct iwl_rx_queue *q)
129{
130 unsigned long flags;
131 u32 rx_wrt_ptr_reg = priv->hw_params.rx_wrt_ptr_reg;
132 u32 reg;
133
134 spin_lock_irqsave(&q->lock, flags);
135
136 if (q->need_update == 0)
137 goto exit_unlock;
138
139 /* If power-saving is in use, make sure device is awake */
140 if (test_bit(STATUS_POWER_PMI, &priv->status)) {
141 reg = iwl_read32(priv, CSR_UCODE_DRV_GP1);
142
143 if (reg & CSR_UCODE_DRV_GP1_BIT_MAC_SLEEP) {
144 IWL_DEBUG_INFO(priv,
145 "Rx queue requesting wakeup,"
146 " GP1 = 0x%x\n", reg);
147 iwl_legacy_set_bit(priv, CSR_GP_CNTRL,
148 CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
149 goto exit_unlock;
150 }
151
152 q->write_actual = (q->write & ~0x7);
153 iwl_legacy_write_direct32(priv, rx_wrt_ptr_reg,
154 q->write_actual);
155
156 /* Else device is assumed to be awake */
157 } else {
158 /* Device expects a multiple of 8 */
159 q->write_actual = (q->write & ~0x7);
160 iwl_legacy_write_direct32(priv, rx_wrt_ptr_reg,
161 q->write_actual);
162 }
163
164 q->need_update = 0;
165
166 exit_unlock:
167 spin_unlock_irqrestore(&q->lock, flags);
168}
169EXPORT_SYMBOL(iwl_legacy_rx_queue_update_write_ptr);
170
171int iwl_legacy_rx_queue_alloc(struct iwl_priv *priv)
172{
173 struct iwl_rx_queue *rxq = &priv->rxq;
174 struct device *dev = &priv->pci_dev->dev;
175 int i;
176
177 spin_lock_init(&rxq->lock);
178 INIT_LIST_HEAD(&rxq->rx_free);
179 INIT_LIST_HEAD(&rxq->rx_used);
180
181 /* Alloc the circular buffer of Read Buffer Descriptors (RBDs) */
182 rxq->bd = dma_alloc_coherent(dev, 4 * RX_QUEUE_SIZE, &rxq->bd_dma,
183 GFP_KERNEL);
184 if (!rxq->bd)
185 goto err_bd;
186
187 rxq->rb_stts = dma_alloc_coherent(dev, sizeof(struct iwl_rb_status),
188 &rxq->rb_stts_dma, GFP_KERNEL);
189 if (!rxq->rb_stts)
190 goto err_rb;
191
192 /* Fill the rx_used queue with _all_ of the Rx buffers */
193 for (i = 0; i < RX_FREE_BUFFERS + RX_QUEUE_SIZE; i++)
194 list_add_tail(&rxq->pool[i].list, &rxq->rx_used);
195
196 /* Set us so that we have processed and used all buffers, but have
197 * not restocked the Rx queue with fresh buffers */
198 rxq->read = rxq->write = 0;
199 rxq->write_actual = 0;
200 rxq->free_count = 0;
201 rxq->need_update = 0;
202 return 0;
203
204err_rb:
205 dma_free_coherent(&priv->pci_dev->dev, 4 * RX_QUEUE_SIZE, rxq->bd,
206 rxq->bd_dma);
207err_bd:
208 return -ENOMEM;
209}
210EXPORT_SYMBOL(iwl_legacy_rx_queue_alloc);
211
212
213void iwl_legacy_rx_spectrum_measure_notif(struct iwl_priv *priv,
214 struct iwl_rx_mem_buffer *rxb)
215{
216 struct iwl_rx_packet *pkt = rxb_addr(rxb);
217 struct iwl_spectrum_notification *report = &(pkt->u.spectrum_notif);
218
219 if (!report->state) {
220 IWL_DEBUG_11H(priv,
221 "Spectrum Measure Notification: Start\n");
222 return;
223 }
224
225 memcpy(&priv->measure_report, report, sizeof(*report));
226 priv->measurement_status |= MEASUREMENT_READY;
227}
228EXPORT_SYMBOL(iwl_legacy_rx_spectrum_measure_notif);
229
230void iwl_legacy_recover_from_statistics(struct iwl_priv *priv,
231 struct iwl_rx_packet *pkt)
232{
233 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
234 return;
235 if (iwl_legacy_is_any_associated(priv)) {
236 if (priv->cfg->ops->lib->check_plcp_health) {
237 if (!priv->cfg->ops->lib->check_plcp_health(
238 priv, pkt)) {
239 /*
240 * high plcp error detected
241 * reset Radio
242 */
243 iwl_legacy_force_reset(priv,
244 IWL_RF_RESET, false);
245 }
246 }
247 }
248}
249EXPORT_SYMBOL(iwl_legacy_recover_from_statistics);
250
251/*
252 * returns non-zero if packet should be dropped
253 */
254int iwl_legacy_set_decrypted_flag(struct iwl_priv *priv,
255 struct ieee80211_hdr *hdr,
256 u32 decrypt_res,
257 struct ieee80211_rx_status *stats)
258{
259 u16 fc = le16_to_cpu(hdr->frame_control);
260
261 /*
262 * All contexts have the same setting here due to it being
263 * a module parameter, so OK to check any context.
264 */
265 if (priv->contexts[IWL_RXON_CTX_BSS].active.filter_flags &
266 RXON_FILTER_DIS_DECRYPT_MSK)
267 return 0;
268
269 if (!(fc & IEEE80211_FCTL_PROTECTED))
270 return 0;
271
272 IWL_DEBUG_RX(priv, "decrypt_res:0x%x\n", decrypt_res);
273 switch (decrypt_res & RX_RES_STATUS_SEC_TYPE_MSK) {
274 case RX_RES_STATUS_SEC_TYPE_TKIP:
275 /* The uCode has got a bad phase 1 Key, pushes the packet.
276 * Decryption will be done in SW. */
277 if ((decrypt_res & RX_RES_STATUS_DECRYPT_TYPE_MSK) ==
278 RX_RES_STATUS_BAD_KEY_TTAK)
279 break;
280
281 case RX_RES_STATUS_SEC_TYPE_WEP:
282 if ((decrypt_res & RX_RES_STATUS_DECRYPT_TYPE_MSK) ==
283 RX_RES_STATUS_BAD_ICV_MIC) {
284 /* bad ICV, the packet is destroyed since the
285 * decryption is inplace, drop it */
286 IWL_DEBUG_RX(priv, "Packet destroyed\n");
287 return -1;
288 }
289 case RX_RES_STATUS_SEC_TYPE_CCMP:
290 if ((decrypt_res & RX_RES_STATUS_DECRYPT_TYPE_MSK) ==
291 RX_RES_STATUS_DECRYPT_OK) {
292 IWL_DEBUG_RX(priv, "hw decrypt successfully!!!\n");
293 stats->flag |= RX_FLAG_DECRYPTED;
294 }
295 break;
296
297 default:
298 break;
299 }
300 return 0;
301}
302EXPORT_SYMBOL(iwl_legacy_set_decrypted_flag);
diff --git a/drivers/net/wireless/iwlegacy/iwl-scan.c b/drivers/net/wireless/iwlegacy/iwl-scan.c
new file mode 100644
index 000000000000..842f0b46b6df
--- /dev/null
+++ b/drivers/net/wireless/iwlegacy/iwl-scan.c
@@ -0,0 +1,625 @@
1/******************************************************************************
2 *
3 * GPL LICENSE SUMMARY
4 *
5 * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of version 2 of the GNU General Public License as
9 * published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful, but
12 * WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
19 * USA
20 *
21 * The full GNU General Public License is included in this distribution
22 * in the file called LICENSE.GPL.
23 *
24 * Contact Information:
25 * Intel Linux Wireless <ilw@linux.intel.com>
26 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
27 *****************************************************************************/
28#include <linux/slab.h>
29#include <linux/types.h>
30#include <linux/etherdevice.h>
31#include <net/mac80211.h>
32
33#include "iwl-eeprom.h"
34#include "iwl-dev.h"
35#include "iwl-core.h"
36#include "iwl-sta.h"
37#include "iwl-io.h"
38#include "iwl-helpers.h"
39
40/* For active scan, listen ACTIVE_DWELL_TIME (msec) on each channel after
41 * sending probe req. This should be set long enough to hear probe responses
42 * from more than one AP. */
43#define IWL_ACTIVE_DWELL_TIME_24 (30) /* all times in msec */
44#define IWL_ACTIVE_DWELL_TIME_52 (20)
45
46#define IWL_ACTIVE_DWELL_FACTOR_24GHZ (3)
47#define IWL_ACTIVE_DWELL_FACTOR_52GHZ (2)
48
49/* For passive scan, listen PASSIVE_DWELL_TIME (msec) on each channel.
50 * Must be set longer than active dwell time.
51 * For the most reliable scan, set > AP beacon interval (typically 100msec). */
52#define IWL_PASSIVE_DWELL_TIME_24 (20) /* all times in msec */
53#define IWL_PASSIVE_DWELL_TIME_52 (10)
54#define IWL_PASSIVE_DWELL_BASE (100)
55#define IWL_CHANNEL_TUNE_TIME 5
56
57static int iwl_legacy_send_scan_abort(struct iwl_priv *priv)
58{
59 int ret;
60 struct iwl_rx_packet *pkt;
61 struct iwl_host_cmd cmd = {
62 .id = REPLY_SCAN_ABORT_CMD,
63 .flags = CMD_WANT_SKB,
64 };
65
66 /* Exit instantly with error when device is not ready
67 * to receive scan abort command or it does not perform
68 * hardware scan currently */
69 if (!test_bit(STATUS_READY, &priv->status) ||
70 !test_bit(STATUS_GEO_CONFIGURED, &priv->status) ||
71 !test_bit(STATUS_SCAN_HW, &priv->status) ||
72 test_bit(STATUS_FW_ERROR, &priv->status) ||
73 test_bit(STATUS_EXIT_PENDING, &priv->status))
74 return -EIO;
75
76 ret = iwl_legacy_send_cmd_sync(priv, &cmd);
77 if (ret)
78 return ret;
79
80 pkt = (struct iwl_rx_packet *)cmd.reply_page;
81 if (pkt->u.status != CAN_ABORT_STATUS) {
82 /* The scan abort will return 1 for success or
83 * 2 for "failure". A failure condition can be
84 * due to simply not being in an active scan which
85 * can occur if we send the scan abort before we
86 * the microcode has notified us that a scan is
87 * completed. */
88 IWL_DEBUG_SCAN(priv, "SCAN_ABORT ret %d.\n", pkt->u.status);
89 ret = -EIO;
90 }
91
92 iwl_legacy_free_pages(priv, cmd.reply_page);
93 return ret;
94}
95
96static void iwl_legacy_complete_scan(struct iwl_priv *priv, bool aborted)
97{
98 /* check if scan was requested from mac80211 */
99 if (priv->scan_request) {
100 IWL_DEBUG_SCAN(priv, "Complete scan in mac80211\n");
101 ieee80211_scan_completed(priv->hw, aborted);
102 }
103
104 priv->is_internal_short_scan = false;
105 priv->scan_vif = NULL;
106 priv->scan_request = NULL;
107}
108
109void iwl_legacy_force_scan_end(struct iwl_priv *priv)
110{
111 lockdep_assert_held(&priv->mutex);
112
113 if (!test_bit(STATUS_SCANNING, &priv->status)) {
114 IWL_DEBUG_SCAN(priv, "Forcing scan end while not scanning\n");
115 return;
116 }
117
118 IWL_DEBUG_SCAN(priv, "Forcing scan end\n");
119 clear_bit(STATUS_SCANNING, &priv->status);
120 clear_bit(STATUS_SCAN_HW, &priv->status);
121 clear_bit(STATUS_SCAN_ABORTING, &priv->status);
122 iwl_legacy_complete_scan(priv, true);
123}
124
125static void iwl_legacy_do_scan_abort(struct iwl_priv *priv)
126{
127 int ret;
128
129 lockdep_assert_held(&priv->mutex);
130
131 if (!test_bit(STATUS_SCANNING, &priv->status)) {
132 IWL_DEBUG_SCAN(priv, "Not performing scan to abort\n");
133 return;
134 }
135
136 if (test_and_set_bit(STATUS_SCAN_ABORTING, &priv->status)) {
137 IWL_DEBUG_SCAN(priv, "Scan abort in progress\n");
138 return;
139 }
140
141 ret = iwl_legacy_send_scan_abort(priv);
142 if (ret) {
143 IWL_DEBUG_SCAN(priv, "Send scan abort failed %d\n", ret);
144 iwl_legacy_force_scan_end(priv);
145 } else
146 IWL_DEBUG_SCAN(priv, "Sucessfully send scan abort\n");
147}
148
149/**
150 * iwl_scan_cancel - Cancel any currently executing HW scan
151 */
152int iwl_legacy_scan_cancel(struct iwl_priv *priv)
153{
154 IWL_DEBUG_SCAN(priv, "Queuing abort scan\n");
155 queue_work(priv->workqueue, &priv->abort_scan);
156 return 0;
157}
158EXPORT_SYMBOL(iwl_legacy_scan_cancel);
159
160/**
161 * iwl_legacy_scan_cancel_timeout - Cancel any currently executing HW scan
162 * @ms: amount of time to wait (in milliseconds) for scan to abort
163 *
164 */
165int iwl_legacy_scan_cancel_timeout(struct iwl_priv *priv, unsigned long ms)
166{
167 unsigned long timeout = jiffies + msecs_to_jiffies(ms);
168
169 lockdep_assert_held(&priv->mutex);
170
171 IWL_DEBUG_SCAN(priv, "Scan cancel timeout\n");
172
173 iwl_legacy_do_scan_abort(priv);
174
175 while (time_before_eq(jiffies, timeout)) {
176 if (!test_bit(STATUS_SCAN_HW, &priv->status))
177 break;
178 msleep(20);
179 }
180
181 return test_bit(STATUS_SCAN_HW, &priv->status);
182}
183EXPORT_SYMBOL(iwl_legacy_scan_cancel_timeout);
184
185/* Service response to REPLY_SCAN_CMD (0x80) */
186static void iwl_legacy_rx_reply_scan(struct iwl_priv *priv,
187 struct iwl_rx_mem_buffer *rxb)
188{
189#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
190 struct iwl_rx_packet *pkt = rxb_addr(rxb);
191 struct iwl_scanreq_notification *notif =
192 (struct iwl_scanreq_notification *)pkt->u.raw;
193
194 IWL_DEBUG_SCAN(priv, "Scan request status = 0x%x\n", notif->status);
195#endif
196}
197
198/* Service SCAN_START_NOTIFICATION (0x82) */
199static void iwl_legacy_rx_scan_start_notif(struct iwl_priv *priv,
200 struct iwl_rx_mem_buffer *rxb)
201{
202 struct iwl_rx_packet *pkt = rxb_addr(rxb);
203 struct iwl_scanstart_notification *notif =
204 (struct iwl_scanstart_notification *)pkt->u.raw;
205 priv->scan_start_tsf = le32_to_cpu(notif->tsf_low);
206 IWL_DEBUG_SCAN(priv, "Scan start: "
207 "%d [802.11%s] "
208 "(TSF: 0x%08X:%08X) - %d (beacon timer %u)\n",
209 notif->channel,
210 notif->band ? "bg" : "a",
211 le32_to_cpu(notif->tsf_high),
212 le32_to_cpu(notif->tsf_low),
213 notif->status, notif->beacon_timer);
214}
215
216/* Service SCAN_RESULTS_NOTIFICATION (0x83) */
217static void iwl_legacy_rx_scan_results_notif(struct iwl_priv *priv,
218 struct iwl_rx_mem_buffer *rxb)
219{
220#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
221 struct iwl_rx_packet *pkt = rxb_addr(rxb);
222 struct iwl_scanresults_notification *notif =
223 (struct iwl_scanresults_notification *)pkt->u.raw;
224
225 IWL_DEBUG_SCAN(priv, "Scan ch.res: "
226 "%d [802.11%s] "
227 "(TSF: 0x%08X:%08X) - %d "
228 "elapsed=%lu usec\n",
229 notif->channel,
230 notif->band ? "bg" : "a",
231 le32_to_cpu(notif->tsf_high),
232 le32_to_cpu(notif->tsf_low),
233 le32_to_cpu(notif->statistics[0]),
234 le32_to_cpu(notif->tsf_low) - priv->scan_start_tsf);
235#endif
236}
237
238/* Service SCAN_COMPLETE_NOTIFICATION (0x84) */
239static void iwl_legacy_rx_scan_complete_notif(struct iwl_priv *priv,
240 struct iwl_rx_mem_buffer *rxb)
241{
242
243#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
244 struct iwl_rx_packet *pkt = rxb_addr(rxb);
245 struct iwl_scancomplete_notification *scan_notif = (void *)pkt->u.raw;
246#endif
247
248 IWL_DEBUG_SCAN(priv,
249 "Scan complete: %d channels (TSF 0x%08X:%08X) - %d\n",
250 scan_notif->scanned_channels,
251 scan_notif->tsf_low,
252 scan_notif->tsf_high, scan_notif->status);
253
254 /* The HW is no longer scanning */
255 clear_bit(STATUS_SCAN_HW, &priv->status);
256
257 IWL_DEBUG_SCAN(priv, "Scan on %sGHz took %dms\n",
258 (priv->scan_band == IEEE80211_BAND_2GHZ) ? "2.4" : "5.2",
259 jiffies_to_msecs(jiffies - priv->scan_start));
260
261 queue_work(priv->workqueue, &priv->scan_completed);
262}
263
264void iwl_legacy_setup_rx_scan_handlers(struct iwl_priv *priv)
265{
266 /* scan handlers */
267 priv->rx_handlers[REPLY_SCAN_CMD] = iwl_legacy_rx_reply_scan;
268 priv->rx_handlers[SCAN_START_NOTIFICATION] =
269 iwl_legacy_rx_scan_start_notif;
270 priv->rx_handlers[SCAN_RESULTS_NOTIFICATION] =
271 iwl_legacy_rx_scan_results_notif;
272 priv->rx_handlers[SCAN_COMPLETE_NOTIFICATION] =
273 iwl_legacy_rx_scan_complete_notif;
274}
275EXPORT_SYMBOL(iwl_legacy_setup_rx_scan_handlers);
276
277inline u16 iwl_legacy_get_active_dwell_time(struct iwl_priv *priv,
278 enum ieee80211_band band,
279 u8 n_probes)
280{
281 if (band == IEEE80211_BAND_5GHZ)
282 return IWL_ACTIVE_DWELL_TIME_52 +
283 IWL_ACTIVE_DWELL_FACTOR_52GHZ * (n_probes + 1);
284 else
285 return IWL_ACTIVE_DWELL_TIME_24 +
286 IWL_ACTIVE_DWELL_FACTOR_24GHZ * (n_probes + 1);
287}
288EXPORT_SYMBOL(iwl_legacy_get_active_dwell_time);
289
290u16 iwl_legacy_get_passive_dwell_time(struct iwl_priv *priv,
291 enum ieee80211_band band,
292 struct ieee80211_vif *vif)
293{
294 struct iwl_rxon_context *ctx;
295 u16 passive = (band == IEEE80211_BAND_2GHZ) ?
296 IWL_PASSIVE_DWELL_BASE + IWL_PASSIVE_DWELL_TIME_24 :
297 IWL_PASSIVE_DWELL_BASE + IWL_PASSIVE_DWELL_TIME_52;
298
299 if (iwl_legacy_is_any_associated(priv)) {
300 /*
301 * If we're associated, we clamp the maximum passive
302 * dwell time to be 98% of the smallest beacon interval
303 * (minus 2 * channel tune time)
304 */
305 for_each_context(priv, ctx) {
306 u16 value;
307
308 if (!iwl_legacy_is_associated_ctx(ctx))
309 continue;
310 value = ctx->vif ? ctx->vif->bss_conf.beacon_int : 0;
311 if ((value > IWL_PASSIVE_DWELL_BASE) || !value)
312 value = IWL_PASSIVE_DWELL_BASE;
313 value = (value * 98) / 100 - IWL_CHANNEL_TUNE_TIME * 2;
314 passive = min(value, passive);
315 }
316 }
317
318 return passive;
319}
320EXPORT_SYMBOL(iwl_legacy_get_passive_dwell_time);
321
322void iwl_legacy_init_scan_params(struct iwl_priv *priv)
323{
324 u8 ant_idx = fls(priv->hw_params.valid_tx_ant) - 1;
325 if (!priv->scan_tx_ant[IEEE80211_BAND_5GHZ])
326 priv->scan_tx_ant[IEEE80211_BAND_5GHZ] = ant_idx;
327 if (!priv->scan_tx_ant[IEEE80211_BAND_2GHZ])
328 priv->scan_tx_ant[IEEE80211_BAND_2GHZ] = ant_idx;
329}
330EXPORT_SYMBOL(iwl_legacy_init_scan_params);
331
332static int __must_check iwl_legacy_scan_initiate(struct iwl_priv *priv,
333 struct ieee80211_vif *vif,
334 bool internal,
335 enum ieee80211_band band)
336{
337 int ret;
338
339 lockdep_assert_held(&priv->mutex);
340
341 if (WARN_ON(!priv->cfg->ops->utils->request_scan))
342 return -EOPNOTSUPP;
343
344 cancel_delayed_work(&priv->scan_check);
345
346 if (!iwl_legacy_is_ready_rf(priv)) {
347 IWL_WARN(priv, "Request scan called when driver not ready.\n");
348 return -EIO;
349 }
350
351 if (test_bit(STATUS_SCAN_HW, &priv->status)) {
352 IWL_DEBUG_SCAN(priv,
353 "Multiple concurrent scan requests in parallel.\n");
354 return -EBUSY;
355 }
356
357 if (test_bit(STATUS_SCAN_ABORTING, &priv->status)) {
358 IWL_DEBUG_SCAN(priv, "Scan request while abort pending.\n");
359 return -EBUSY;
360 }
361
362 IWL_DEBUG_SCAN(priv, "Starting %sscan...\n",
363 internal ? "internal short " : "");
364
365 set_bit(STATUS_SCANNING, &priv->status);
366 priv->is_internal_short_scan = internal;
367 priv->scan_start = jiffies;
368 priv->scan_band = band;
369
370 ret = priv->cfg->ops->utils->request_scan(priv, vif);
371 if (ret) {
372 clear_bit(STATUS_SCANNING, &priv->status);
373 priv->is_internal_short_scan = false;
374 return ret;
375 }
376
377 queue_delayed_work(priv->workqueue, &priv->scan_check,
378 IWL_SCAN_CHECK_WATCHDOG);
379
380 return 0;
381}
382
383int iwl_legacy_mac_hw_scan(struct ieee80211_hw *hw,
384 struct ieee80211_vif *vif,
385 struct cfg80211_scan_request *req)
386{
387 struct iwl_priv *priv = hw->priv;
388 int ret;
389
390 IWL_DEBUG_MAC80211(priv, "enter\n");
391
392 if (req->n_channels == 0)
393 return -EINVAL;
394
395 mutex_lock(&priv->mutex);
396
397 if (test_bit(STATUS_SCANNING, &priv->status) &&
398 !priv->is_internal_short_scan) {
399 IWL_DEBUG_SCAN(priv, "Scan already in progress.\n");
400 ret = -EAGAIN;
401 goto out_unlock;
402 }
403
404 /* mac80211 will only ask for one band at a time */
405 priv->scan_request = req;
406 priv->scan_vif = vif;
407
408 /*
409 * If an internal scan is in progress, just set
410 * up the scan_request as per above.
411 */
412 if (priv->is_internal_short_scan) {
413 IWL_DEBUG_SCAN(priv, "SCAN request during internal scan\n");
414 ret = 0;
415 } else
416 ret = iwl_legacy_scan_initiate(priv, vif, false,
417 req->channels[0]->band);
418
419 IWL_DEBUG_MAC80211(priv, "leave\n");
420
421out_unlock:
422 mutex_unlock(&priv->mutex);
423
424 return ret;
425}
426EXPORT_SYMBOL(iwl_legacy_mac_hw_scan);
427
428/*
429 * internal short scan, this function should only been called while associated.
430 * It will reset and tune the radio to prevent possible RF related problem
431 */
432void iwl_legacy_internal_short_hw_scan(struct iwl_priv *priv)
433{
434 queue_work(priv->workqueue, &priv->start_internal_scan);
435}
436
437static void iwl_legacy_bg_start_internal_scan(struct work_struct *work)
438{
439 struct iwl_priv *priv =
440 container_of(work, struct iwl_priv, start_internal_scan);
441
442 IWL_DEBUG_SCAN(priv, "Start internal scan\n");
443
444 mutex_lock(&priv->mutex);
445
446 if (priv->is_internal_short_scan == true) {
447 IWL_DEBUG_SCAN(priv, "Internal scan already in progress\n");
448 goto unlock;
449 }
450
451 if (test_bit(STATUS_SCANNING, &priv->status)) {
452 IWL_DEBUG_SCAN(priv, "Scan already in progress.\n");
453 goto unlock;
454 }
455
456 if (iwl_legacy_scan_initiate(priv, NULL, true, priv->band))
457 IWL_DEBUG_SCAN(priv, "failed to start internal short scan\n");
458 unlock:
459 mutex_unlock(&priv->mutex);
460}
461
462static void iwl_legacy_bg_scan_check(struct work_struct *data)
463{
464 struct iwl_priv *priv =
465 container_of(data, struct iwl_priv, scan_check.work);
466
467 IWL_DEBUG_SCAN(priv, "Scan check work\n");
468
469 /* Since we are here firmware does not finish scan and
470 * most likely is in bad shape, so we don't bother to
471 * send abort command, just force scan complete to mac80211 */
472 mutex_lock(&priv->mutex);
473 iwl_legacy_force_scan_end(priv);
474 mutex_unlock(&priv->mutex);
475}
476
477/**
478 * iwl_legacy_fill_probe_req - fill in all required fields and IE for probe request
479 */
480
481u16
482iwl_legacy_fill_probe_req(struct iwl_priv *priv, struct ieee80211_mgmt *frame,
483 const u8 *ta, const u8 *ies, int ie_len, int left)
484{
485 int len = 0;
486 u8 *pos = NULL;
487
488 /* Make sure there is enough space for the probe request,
489 * two mandatory IEs and the data */
490 left -= 24;
491 if (left < 0)
492 return 0;
493
494 frame->frame_control = cpu_to_le16(IEEE80211_STYPE_PROBE_REQ);
495 memcpy(frame->da, iwl_bcast_addr, ETH_ALEN);
496 memcpy(frame->sa, ta, ETH_ALEN);
497 memcpy(frame->bssid, iwl_bcast_addr, ETH_ALEN);
498 frame->seq_ctrl = 0;
499
500 len += 24;
501
502 /* ...next IE... */
503 pos = &frame->u.probe_req.variable[0];
504
505 /* fill in our indirect SSID IE */
506 left -= 2;
507 if (left < 0)
508 return 0;
509 *pos++ = WLAN_EID_SSID;
510 *pos++ = 0;
511
512 len += 2;
513
514 if (WARN_ON(left < ie_len))
515 return len;
516
517 if (ies && ie_len) {
518 memcpy(pos, ies, ie_len);
519 len += ie_len;
520 }
521
522 return (u16)len;
523}
524EXPORT_SYMBOL(iwl_legacy_fill_probe_req);
525
526static void iwl_legacy_bg_abort_scan(struct work_struct *work)
527{
528 struct iwl_priv *priv = container_of(work, struct iwl_priv, abort_scan);
529
530 IWL_DEBUG_SCAN(priv, "Abort scan work\n");
531
532 /* We keep scan_check work queued in case when firmware will not
533 * report back scan completed notification */
534 mutex_lock(&priv->mutex);
535 iwl_legacy_scan_cancel_timeout(priv, 200);
536 mutex_unlock(&priv->mutex);
537}
538
539static void iwl_legacy_bg_scan_completed(struct work_struct *work)
540{
541 struct iwl_priv *priv =
542 container_of(work, struct iwl_priv, scan_completed);
543 bool aborted;
544
545 IWL_DEBUG_SCAN(priv, "Completed %sscan.\n",
546 priv->is_internal_short_scan ? "internal short " : "");
547
548 cancel_delayed_work(&priv->scan_check);
549
550 mutex_lock(&priv->mutex);
551
552 aborted = test_and_clear_bit(STATUS_SCAN_ABORTING, &priv->status);
553 if (aborted)
554 IWL_DEBUG_SCAN(priv, "Aborted scan completed.\n");
555
556 if (!test_and_clear_bit(STATUS_SCANNING, &priv->status)) {
557 IWL_DEBUG_SCAN(priv, "Scan already completed.\n");
558 goto out_settings;
559 }
560
561 if (priv->is_internal_short_scan && !aborted) {
562 int err;
563
564 /* Check if mac80211 requested scan during our internal scan */
565 if (priv->scan_request == NULL)
566 goto out_complete;
567
568 /* If so request a new scan */
569 err = iwl_legacy_scan_initiate(priv, priv->scan_vif, false,
570 priv->scan_request->channels[0]->band);
571 if (err) {
572 IWL_DEBUG_SCAN(priv,
573 "failed to initiate pending scan: %d\n", err);
574 aborted = true;
575 goto out_complete;
576 }
577
578 goto out;
579 }
580
581out_complete:
582 iwl_legacy_complete_scan(priv, aborted);
583
584out_settings:
585 /* Can we still talk to firmware ? */
586 if (!iwl_legacy_is_ready_rf(priv))
587 goto out;
588
589 /*
590 * We do not commit power settings while scan is pending,
591 * do it now if the settings changed.
592 */
593 iwl_legacy_power_set_mode(priv, &priv->power_data.sleep_cmd_next,
594 false);
595 iwl_legacy_set_tx_power(priv, priv->tx_power_next, false);
596
597 priv->cfg->ops->utils->post_scan(priv);
598
599out:
600 mutex_unlock(&priv->mutex);
601}
602
603void iwl_legacy_setup_scan_deferred_work(struct iwl_priv *priv)
604{
605 INIT_WORK(&priv->scan_completed, iwl_legacy_bg_scan_completed);
606 INIT_WORK(&priv->abort_scan, iwl_legacy_bg_abort_scan);
607 INIT_WORK(&priv->start_internal_scan,
608 iwl_legacy_bg_start_internal_scan);
609 INIT_DELAYED_WORK(&priv->scan_check, iwl_legacy_bg_scan_check);
610}
611EXPORT_SYMBOL(iwl_legacy_setup_scan_deferred_work);
612
613void iwl_legacy_cancel_scan_deferred_work(struct iwl_priv *priv)
614{
615 cancel_work_sync(&priv->start_internal_scan);
616 cancel_work_sync(&priv->abort_scan);
617 cancel_work_sync(&priv->scan_completed);
618
619 if (cancel_delayed_work_sync(&priv->scan_check)) {
620 mutex_lock(&priv->mutex);
621 iwl_legacy_force_scan_end(priv);
622 mutex_unlock(&priv->mutex);
623 }
624}
625EXPORT_SYMBOL(iwl_legacy_cancel_scan_deferred_work);
diff --git a/drivers/net/wireless/iwlegacy/iwl-spectrum.h b/drivers/net/wireless/iwlegacy/iwl-spectrum.h
new file mode 100644
index 000000000000..9f70a4723103
--- /dev/null
+++ b/drivers/net/wireless/iwlegacy/iwl-spectrum.h
@@ -0,0 +1,92 @@
1/******************************************************************************
2 *
3 * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved.
4 *
5 * Portions of this file are derived from the ieee80211 subsystem header files.
6 *
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms of version 2 of the GNU General Public License as
9 * published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful, but WITHOUT
12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 * more details.
15 *
16 * You should have received a copy of the GNU General Public License along with
17 * this program; if not, write to the Free Software Foundation, Inc.,
18 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
19 *
20 * The full GNU General Public License is included in this distribution in the
21 * file called LICENSE.
22 *
23 * Contact Information:
24 * Intel Linux Wireless <ilw@linux.intel.com>
25 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
26 *
27 *****************************************************************************/
28
29#ifndef __iwl_legacy_spectrum_h__
30#define __iwl_legacy_spectrum_h__
31enum { /* ieee80211_basic_report.map */
32 IEEE80211_BASIC_MAP_BSS = (1 << 0),
33 IEEE80211_BASIC_MAP_OFDM = (1 << 1),
34 IEEE80211_BASIC_MAP_UNIDENTIFIED = (1 << 2),
35 IEEE80211_BASIC_MAP_RADAR = (1 << 3),
36 IEEE80211_BASIC_MAP_UNMEASURED = (1 << 4),
37 /* Bits 5-7 are reserved */
38
39};
40struct ieee80211_basic_report {
41 u8 channel;
42 __le64 start_time;
43 __le16 duration;
44 u8 map;
45} __packed;
46
47enum { /* ieee80211_measurement_request.mode */
48 /* Bit 0 is reserved */
49 IEEE80211_MEASUREMENT_ENABLE = (1 << 1),
50 IEEE80211_MEASUREMENT_REQUEST = (1 << 2),
51 IEEE80211_MEASUREMENT_REPORT = (1 << 3),
52 /* Bits 4-7 are reserved */
53};
54
55enum {
56 IEEE80211_REPORT_BASIC = 0, /* required */
57 IEEE80211_REPORT_CCA = 1, /* optional */
58 IEEE80211_REPORT_RPI = 2, /* optional */
59 /* 3-255 reserved */
60};
61
62struct ieee80211_measurement_params {
63 u8 channel;
64 __le64 start_time;
65 __le16 duration;
66} __packed;
67
68struct ieee80211_info_element {
69 u8 id;
70 u8 len;
71 u8 data[0];
72} __packed;
73
74struct ieee80211_measurement_request {
75 struct ieee80211_info_element ie;
76 u8 token;
77 u8 mode;
78 u8 type;
79 struct ieee80211_measurement_params params[0];
80} __packed;
81
82struct ieee80211_measurement_report {
83 struct ieee80211_info_element ie;
84 u8 token;
85 u8 mode;
86 u8 type;
87 union {
88 struct ieee80211_basic_report basic[0];
89 } u;
90} __packed;
91
92#endif
diff --git a/drivers/net/wireless/iwlegacy/iwl-sta.c b/drivers/net/wireless/iwlegacy/iwl-sta.c
new file mode 100644
index 000000000000..47c9da3834ea
--- /dev/null
+++ b/drivers/net/wireless/iwlegacy/iwl-sta.c
@@ -0,0 +1,816 @@
1/******************************************************************************
2 *
3 * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved.
4 *
5 * Portions of this file are derived from the ipw3945 project, as well
6 * as portions of the ieee80211 subsystem header files.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of version 2 of the GNU General Public License as
10 * published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it will be useful, but WITHOUT
13 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 * more details.
16 *
17 * You should have received a copy of the GNU General Public License along with
18 * this program; if not, write to the Free Software Foundation, Inc.,
19 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
20 *
21 * The full GNU General Public License is included in this distribution in the
22 * file called LICENSE.
23 *
24 * Contact Information:
25 * Intel Linux Wireless <ilw@linux.intel.com>
26 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
27 *
28 *****************************************************************************/
29
30#include <net/mac80211.h>
31#include <linux/etherdevice.h>
32#include <linux/sched.h>
33#include <linux/lockdep.h>
34
35#include "iwl-dev.h"
36#include "iwl-core.h"
37#include "iwl-sta.h"
38
39/* priv->sta_lock must be held */
40static void iwl_legacy_sta_ucode_activate(struct iwl_priv *priv, u8 sta_id)
41{
42
43 if (!(priv->stations[sta_id].used & IWL_STA_DRIVER_ACTIVE))
44 IWL_ERR(priv,
45 "ACTIVATE a non DRIVER active station id %u addr %pM\n",
46 sta_id, priv->stations[sta_id].sta.sta.addr);
47
48 if (priv->stations[sta_id].used & IWL_STA_UCODE_ACTIVE) {
49 IWL_DEBUG_ASSOC(priv,
50 "STA id %u addr %pM already present"
51 " in uCode (according to driver)\n",
52 sta_id, priv->stations[sta_id].sta.sta.addr);
53 } else {
54 priv->stations[sta_id].used |= IWL_STA_UCODE_ACTIVE;
55 IWL_DEBUG_ASSOC(priv, "Added STA id %u addr %pM to uCode\n",
56 sta_id, priv->stations[sta_id].sta.sta.addr);
57 }
58}
59
60static int iwl_legacy_process_add_sta_resp(struct iwl_priv *priv,
61 struct iwl_legacy_addsta_cmd *addsta,
62 struct iwl_rx_packet *pkt,
63 bool sync)
64{
65 u8 sta_id = addsta->sta.sta_id;
66 unsigned long flags;
67 int ret = -EIO;
68
69 if (pkt->hdr.flags & IWL_CMD_FAILED_MSK) {
70 IWL_ERR(priv, "Bad return from REPLY_ADD_STA (0x%08X)\n",
71 pkt->hdr.flags);
72 return ret;
73 }
74
75 IWL_DEBUG_INFO(priv, "Processing response for adding station %u\n",
76 sta_id);
77
78 spin_lock_irqsave(&priv->sta_lock, flags);
79
80 switch (pkt->u.add_sta.status) {
81 case ADD_STA_SUCCESS_MSK:
82 IWL_DEBUG_INFO(priv, "REPLY_ADD_STA PASSED\n");
83 iwl_legacy_sta_ucode_activate(priv, sta_id);
84 ret = 0;
85 break;
86 case ADD_STA_NO_ROOM_IN_TABLE:
87 IWL_ERR(priv, "Adding station %d failed, no room in table.\n",
88 sta_id);
89 break;
90 case ADD_STA_NO_BLOCK_ACK_RESOURCE:
91 IWL_ERR(priv,
92 "Adding station %d failed, no block ack resource.\n",
93 sta_id);
94 break;
95 case ADD_STA_MODIFY_NON_EXIST_STA:
96 IWL_ERR(priv, "Attempting to modify non-existing station %d\n",
97 sta_id);
98 break;
99 default:
100 IWL_DEBUG_ASSOC(priv, "Received REPLY_ADD_STA:(0x%08X)\n",
101 pkt->u.add_sta.status);
102 break;
103 }
104
105 IWL_DEBUG_INFO(priv, "%s station id %u addr %pM\n",
106 priv->stations[sta_id].sta.mode ==
107 STA_CONTROL_MODIFY_MSK ? "Modified" : "Added",
108 sta_id, priv->stations[sta_id].sta.sta.addr);
109
110 /*
111 * XXX: The MAC address in the command buffer is often changed from
112 * the original sent to the device. That is, the MAC address
113 * written to the command buffer often is not the same MAC adress
114 * read from the command buffer when the command returns. This
115 * issue has not yet been resolved and this debugging is left to
116 * observe the problem.
117 */
118 IWL_DEBUG_INFO(priv, "%s station according to cmd buffer %pM\n",
119 priv->stations[sta_id].sta.mode ==
120 STA_CONTROL_MODIFY_MSK ? "Modified" : "Added",
121 addsta->sta.addr);
122 spin_unlock_irqrestore(&priv->sta_lock, flags);
123
124 return ret;
125}
126
127static void iwl_legacy_add_sta_callback(struct iwl_priv *priv,
128 struct iwl_device_cmd *cmd,
129 struct iwl_rx_packet *pkt)
130{
131 struct iwl_legacy_addsta_cmd *addsta =
132 (struct iwl_legacy_addsta_cmd *)cmd->cmd.payload;
133
134 iwl_legacy_process_add_sta_resp(priv, addsta, pkt, false);
135
136}
137
138int iwl_legacy_send_add_sta(struct iwl_priv *priv,
139 struct iwl_legacy_addsta_cmd *sta, u8 flags)
140{
141 struct iwl_rx_packet *pkt = NULL;
142 int ret = 0;
143 u8 data[sizeof(*sta)];
144 struct iwl_host_cmd cmd = {
145 .id = REPLY_ADD_STA,
146 .flags = flags,
147 .data = data,
148 };
149 u8 sta_id __maybe_unused = sta->sta.sta_id;
150
151 IWL_DEBUG_INFO(priv, "Adding sta %u (%pM) %ssynchronously\n",
152 sta_id, sta->sta.addr, flags & CMD_ASYNC ? "a" : "");
153
154 if (flags & CMD_ASYNC)
155 cmd.callback = iwl_legacy_add_sta_callback;
156 else {
157 cmd.flags |= CMD_WANT_SKB;
158 might_sleep();
159 }
160
161 cmd.len = priv->cfg->ops->utils->build_addsta_hcmd(sta, data);
162 ret = iwl_legacy_send_cmd(priv, &cmd);
163
164 if (ret || (flags & CMD_ASYNC))
165 return ret;
166
167 if (ret == 0) {
168 pkt = (struct iwl_rx_packet *)cmd.reply_page;
169 ret = iwl_legacy_process_add_sta_resp(priv, sta, pkt, true);
170 }
171 iwl_legacy_free_pages(priv, cmd.reply_page);
172
173 return ret;
174}
175EXPORT_SYMBOL(iwl_legacy_send_add_sta);
176
177static void iwl_legacy_set_ht_add_station(struct iwl_priv *priv, u8 index,
178 struct ieee80211_sta *sta,
179 struct iwl_rxon_context *ctx)
180{
181 struct ieee80211_sta_ht_cap *sta_ht_inf = &sta->ht_cap;
182 __le32 sta_flags;
183 u8 mimo_ps_mode;
184
185 if (!sta || !sta_ht_inf->ht_supported)
186 goto done;
187
188 mimo_ps_mode = (sta_ht_inf->cap & IEEE80211_HT_CAP_SM_PS) >> 2;
189 IWL_DEBUG_ASSOC(priv, "spatial multiplexing power save mode: %s\n",
190 (mimo_ps_mode == WLAN_HT_CAP_SM_PS_STATIC) ?
191 "static" :
192 (mimo_ps_mode == WLAN_HT_CAP_SM_PS_DYNAMIC) ?
193 "dynamic" : "disabled");
194
195 sta_flags = priv->stations[index].sta.station_flags;
196
197 sta_flags &= ~(STA_FLG_RTS_MIMO_PROT_MSK | STA_FLG_MIMO_DIS_MSK);
198
199 switch (mimo_ps_mode) {
200 case WLAN_HT_CAP_SM_PS_STATIC:
201 sta_flags |= STA_FLG_MIMO_DIS_MSK;
202 break;
203 case WLAN_HT_CAP_SM_PS_DYNAMIC:
204 sta_flags |= STA_FLG_RTS_MIMO_PROT_MSK;
205 break;
206 case WLAN_HT_CAP_SM_PS_DISABLED:
207 break;
208 default:
209 IWL_WARN(priv, "Invalid MIMO PS mode %d\n", mimo_ps_mode);
210 break;
211 }
212
213 sta_flags |= cpu_to_le32(
214 (u32)sta_ht_inf->ampdu_factor << STA_FLG_MAX_AGG_SIZE_POS);
215
216 sta_flags |= cpu_to_le32(
217 (u32)sta_ht_inf->ampdu_density << STA_FLG_AGG_MPDU_DENSITY_POS);
218
219 if (iwl_legacy_is_ht40_tx_allowed(priv, ctx, &sta->ht_cap))
220 sta_flags |= STA_FLG_HT40_EN_MSK;
221 else
222 sta_flags &= ~STA_FLG_HT40_EN_MSK;
223
224 priv->stations[index].sta.station_flags = sta_flags;
225 done:
226 return;
227}
228
229/**
230 * iwl_legacy_prep_station - Prepare station information for addition
231 *
232 * should be called with sta_lock held
233 */
234u8 iwl_legacy_prep_station(struct iwl_priv *priv, struct iwl_rxon_context *ctx,
235 const u8 *addr, bool is_ap, struct ieee80211_sta *sta)
236{
237 struct iwl_station_entry *station;
238 int i;
239 u8 sta_id = IWL_INVALID_STATION;
240 u16 rate;
241
242 if (is_ap)
243 sta_id = ctx->ap_sta_id;
244 else if (is_broadcast_ether_addr(addr))
245 sta_id = ctx->bcast_sta_id;
246 else
247 for (i = IWL_STA_ID; i < priv->hw_params.max_stations; i++) {
248 if (!compare_ether_addr(priv->stations[i].sta.sta.addr,
249 addr)) {
250 sta_id = i;
251 break;
252 }
253
254 if (!priv->stations[i].used &&
255 sta_id == IWL_INVALID_STATION)
256 sta_id = i;
257 }
258
259 /*
260 * These two conditions have the same outcome, but keep them
261 * separate
262 */
263 if (unlikely(sta_id == IWL_INVALID_STATION))
264 return sta_id;
265
266 /*
267 * uCode is not able to deal with multiple requests to add a
268 * station. Keep track if one is in progress so that we do not send
269 * another.
270 */
271 if (priv->stations[sta_id].used & IWL_STA_UCODE_INPROGRESS) {
272 IWL_DEBUG_INFO(priv,
273 "STA %d already in process of being added.\n",
274 sta_id);
275 return sta_id;
276 }
277
278 if ((priv->stations[sta_id].used & IWL_STA_DRIVER_ACTIVE) &&
279 (priv->stations[sta_id].used & IWL_STA_UCODE_ACTIVE) &&
280 !compare_ether_addr(priv->stations[sta_id].sta.sta.addr, addr)) {
281 IWL_DEBUG_ASSOC(priv,
282 "STA %d (%pM) already added, not adding again.\n",
283 sta_id, addr);
284 return sta_id;
285 }
286
287 station = &priv->stations[sta_id];
288 station->used = IWL_STA_DRIVER_ACTIVE;
289 IWL_DEBUG_ASSOC(priv, "Add STA to driver ID %d: %pM\n",
290 sta_id, addr);
291 priv->num_stations++;
292
293 /* Set up the REPLY_ADD_STA command to send to device */
294 memset(&station->sta, 0, sizeof(struct iwl_legacy_addsta_cmd));
295 memcpy(station->sta.sta.addr, addr, ETH_ALEN);
296 station->sta.mode = 0;
297 station->sta.sta.sta_id = sta_id;
298 station->sta.station_flags = ctx->station_flags;
299 station->ctxid = ctx->ctxid;
300
301 if (sta) {
302 struct iwl_station_priv_common *sta_priv;
303
304 sta_priv = (void *)sta->drv_priv;
305 sta_priv->ctx = ctx;
306 }
307
308 /*
309 * OK to call unconditionally, since local stations (IBSS BSSID
310 * STA and broadcast STA) pass in a NULL sta, and mac80211
311 * doesn't allow HT IBSS.
312 */
313 iwl_legacy_set_ht_add_station(priv, sta_id, sta, ctx);
314
315 /* 3945 only */
316 rate = (priv->band == IEEE80211_BAND_5GHZ) ?
317 IWL_RATE_6M_PLCP : IWL_RATE_1M_PLCP;
318 /* Turn on both antennas for the station... */
319 station->sta.rate_n_flags = cpu_to_le16(rate | RATE_MCS_ANT_AB_MSK);
320
321 return sta_id;
322
323}
324EXPORT_SYMBOL_GPL(iwl_legacy_prep_station);
325
326#define STA_WAIT_TIMEOUT (HZ/2)
327
328/**
329 * iwl_legacy_add_station_common -
330 */
331int
332iwl_legacy_add_station_common(struct iwl_priv *priv,
333 struct iwl_rxon_context *ctx,
334 const u8 *addr, bool is_ap,
335 struct ieee80211_sta *sta, u8 *sta_id_r)
336{
337 unsigned long flags_spin;
338 int ret = 0;
339 u8 sta_id;
340 struct iwl_legacy_addsta_cmd sta_cmd;
341
342 *sta_id_r = 0;
343 spin_lock_irqsave(&priv->sta_lock, flags_spin);
344 sta_id = iwl_legacy_prep_station(priv, ctx, addr, is_ap, sta);
345 if (sta_id == IWL_INVALID_STATION) {
346 IWL_ERR(priv, "Unable to prepare station %pM for addition\n",
347 addr);
348 spin_unlock_irqrestore(&priv->sta_lock, flags_spin);
349 return -EINVAL;
350 }
351
352 /*
353 * uCode is not able to deal with multiple requests to add a
354 * station. Keep track if one is in progress so that we do not send
355 * another.
356 */
357 if (priv->stations[sta_id].used & IWL_STA_UCODE_INPROGRESS) {
358 IWL_DEBUG_INFO(priv,
359 "STA %d already in process of being added.\n",
360 sta_id);
361 spin_unlock_irqrestore(&priv->sta_lock, flags_spin);
362 return -EEXIST;
363 }
364
365 if ((priv->stations[sta_id].used & IWL_STA_DRIVER_ACTIVE) &&
366 (priv->stations[sta_id].used & IWL_STA_UCODE_ACTIVE)) {
367 IWL_DEBUG_ASSOC(priv,
368 "STA %d (%pM) already added, not adding again.\n",
369 sta_id, addr);
370 spin_unlock_irqrestore(&priv->sta_lock, flags_spin);
371 return -EEXIST;
372 }
373
374 priv->stations[sta_id].used |= IWL_STA_UCODE_INPROGRESS;
375 memcpy(&sta_cmd, &priv->stations[sta_id].sta,
376 sizeof(struct iwl_legacy_addsta_cmd));
377 spin_unlock_irqrestore(&priv->sta_lock, flags_spin);
378
379 /* Add station to device's station table */
380 ret = iwl_legacy_send_add_sta(priv, &sta_cmd, CMD_SYNC);
381 if (ret) {
382 spin_lock_irqsave(&priv->sta_lock, flags_spin);
383 IWL_ERR(priv, "Adding station %pM failed.\n",
384 priv->stations[sta_id].sta.sta.addr);
385 priv->stations[sta_id].used &= ~IWL_STA_DRIVER_ACTIVE;
386 priv->stations[sta_id].used &= ~IWL_STA_UCODE_INPROGRESS;
387 spin_unlock_irqrestore(&priv->sta_lock, flags_spin);
388 }
389 *sta_id_r = sta_id;
390 return ret;
391}
392EXPORT_SYMBOL(iwl_legacy_add_station_common);
393
394/**
395 * iwl_legacy_sta_ucode_deactivate - deactivate ucode status for a station
396 *
397 * priv->sta_lock must be held
398 */
399static void iwl_legacy_sta_ucode_deactivate(struct iwl_priv *priv, u8 sta_id)
400{
401 /* Ucode must be active and driver must be non active */
402 if ((priv->stations[sta_id].used &
403 (IWL_STA_UCODE_ACTIVE | IWL_STA_DRIVER_ACTIVE)) !=
404 IWL_STA_UCODE_ACTIVE)
405 IWL_ERR(priv, "removed non active STA %u\n", sta_id);
406
407 priv->stations[sta_id].used &= ~IWL_STA_UCODE_ACTIVE;
408
409 memset(&priv->stations[sta_id], 0, sizeof(struct iwl_station_entry));
410 IWL_DEBUG_ASSOC(priv, "Removed STA %u\n", sta_id);
411}
412
413static int iwl_legacy_send_remove_station(struct iwl_priv *priv,
414 const u8 *addr, int sta_id,
415 bool temporary)
416{
417 struct iwl_rx_packet *pkt;
418 int ret;
419
420 unsigned long flags_spin;
421 struct iwl_rem_sta_cmd rm_sta_cmd;
422
423 struct iwl_host_cmd cmd = {
424 .id = REPLY_REMOVE_STA,
425 .len = sizeof(struct iwl_rem_sta_cmd),
426 .flags = CMD_SYNC,
427 .data = &rm_sta_cmd,
428 };
429
430 memset(&rm_sta_cmd, 0, sizeof(rm_sta_cmd));
431 rm_sta_cmd.num_sta = 1;
432 memcpy(&rm_sta_cmd.addr, addr, ETH_ALEN);
433
434 cmd.flags |= CMD_WANT_SKB;
435
436 ret = iwl_legacy_send_cmd(priv, &cmd);
437
438 if (ret)
439 return ret;
440
441 pkt = (struct iwl_rx_packet *)cmd.reply_page;
442 if (pkt->hdr.flags & IWL_CMD_FAILED_MSK) {
443 IWL_ERR(priv, "Bad return from REPLY_REMOVE_STA (0x%08X)\n",
444 pkt->hdr.flags);
445 ret = -EIO;
446 }
447
448 if (!ret) {
449 switch (pkt->u.rem_sta.status) {
450 case REM_STA_SUCCESS_MSK:
451 if (!temporary) {
452 spin_lock_irqsave(&priv->sta_lock, flags_spin);
453 iwl_legacy_sta_ucode_deactivate(priv, sta_id);
454 spin_unlock_irqrestore(&priv->sta_lock,
455 flags_spin);
456 }
457 IWL_DEBUG_ASSOC(priv, "REPLY_REMOVE_STA PASSED\n");
458 break;
459 default:
460 ret = -EIO;
461 IWL_ERR(priv, "REPLY_REMOVE_STA failed\n");
462 break;
463 }
464 }
465 iwl_legacy_free_pages(priv, cmd.reply_page);
466
467 return ret;
468}
469
470/**
471 * iwl_legacy_remove_station - Remove driver's knowledge of station.
472 */
473int iwl_legacy_remove_station(struct iwl_priv *priv, const u8 sta_id,
474 const u8 *addr)
475{
476 unsigned long flags;
477
478 if (!iwl_legacy_is_ready(priv)) {
479 IWL_DEBUG_INFO(priv,
480 "Unable to remove station %pM, device not ready.\n",
481 addr);
482 /*
483 * It is typical for stations to be removed when we are
484 * going down. Return success since device will be down
485 * soon anyway
486 */
487 return 0;
488 }
489
490 IWL_DEBUG_ASSOC(priv, "Removing STA from driver:%d %pM\n",
491 sta_id, addr);
492
493 if (WARN_ON(sta_id == IWL_INVALID_STATION))
494 return -EINVAL;
495
496 spin_lock_irqsave(&priv->sta_lock, flags);
497
498 if (!(priv->stations[sta_id].used & IWL_STA_DRIVER_ACTIVE)) {
499 IWL_DEBUG_INFO(priv, "Removing %pM but non DRIVER active\n",
500 addr);
501 goto out_err;
502 }
503
504 if (!(priv->stations[sta_id].used & IWL_STA_UCODE_ACTIVE)) {
505 IWL_DEBUG_INFO(priv, "Removing %pM but non UCODE active\n",
506 addr);
507 goto out_err;
508 }
509
510 if (priv->stations[sta_id].used & IWL_STA_LOCAL) {
511 kfree(priv->stations[sta_id].lq);
512 priv->stations[sta_id].lq = NULL;
513 }
514
515 priv->stations[sta_id].used &= ~IWL_STA_DRIVER_ACTIVE;
516
517 priv->num_stations--;
518
519 BUG_ON(priv->num_stations < 0);
520
521 spin_unlock_irqrestore(&priv->sta_lock, flags);
522
523 return iwl_legacy_send_remove_station(priv, addr, sta_id, false);
524out_err:
525 spin_unlock_irqrestore(&priv->sta_lock, flags);
526 return -EINVAL;
527}
528EXPORT_SYMBOL_GPL(iwl_legacy_remove_station);
529
530/**
531 * iwl_legacy_clear_ucode_stations - clear ucode station table bits
532 *
533 * This function clears all the bits in the driver indicating
534 * which stations are active in the ucode. Call when something
535 * other than explicit station management would cause this in
536 * the ucode, e.g. unassociated RXON.
537 */
538void iwl_legacy_clear_ucode_stations(struct iwl_priv *priv,
539 struct iwl_rxon_context *ctx)
540{
541 int i;
542 unsigned long flags_spin;
543 bool cleared = false;
544
545 IWL_DEBUG_INFO(priv, "Clearing ucode stations in driver\n");
546
547 spin_lock_irqsave(&priv->sta_lock, flags_spin);
548 for (i = 0; i < priv->hw_params.max_stations; i++) {
549 if (ctx && ctx->ctxid != priv->stations[i].ctxid)
550 continue;
551
552 if (priv->stations[i].used & IWL_STA_UCODE_ACTIVE) {
553 IWL_DEBUG_INFO(priv,
554 "Clearing ucode active for station %d\n", i);
555 priv->stations[i].used &= ~IWL_STA_UCODE_ACTIVE;
556 cleared = true;
557 }
558 }
559 spin_unlock_irqrestore(&priv->sta_lock, flags_spin);
560
561 if (!cleared)
562 IWL_DEBUG_INFO(priv,
563 "No active stations found to be cleared\n");
564}
565EXPORT_SYMBOL(iwl_legacy_clear_ucode_stations);
566
567/**
568 * iwl_legacy_restore_stations() - Restore driver known stations to device
569 *
570 * All stations considered active by driver, but not present in ucode, is
571 * restored.
572 *
573 * Function sleeps.
574 */
575void
576iwl_legacy_restore_stations(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
577{
578 struct iwl_legacy_addsta_cmd sta_cmd;
579 struct iwl_link_quality_cmd lq;
580 unsigned long flags_spin;
581 int i;
582 bool found = false;
583 int ret;
584 bool send_lq;
585
586 if (!iwl_legacy_is_ready(priv)) {
587 IWL_DEBUG_INFO(priv,
588 "Not ready yet, not restoring any stations.\n");
589 return;
590 }
591
592 IWL_DEBUG_ASSOC(priv, "Restoring all known stations ... start.\n");
593 spin_lock_irqsave(&priv->sta_lock, flags_spin);
594 for (i = 0; i < priv->hw_params.max_stations; i++) {
595 if (ctx->ctxid != priv->stations[i].ctxid)
596 continue;
597 if ((priv->stations[i].used & IWL_STA_DRIVER_ACTIVE) &&
598 !(priv->stations[i].used & IWL_STA_UCODE_ACTIVE)) {
599 IWL_DEBUG_ASSOC(priv, "Restoring sta %pM\n",
600 priv->stations[i].sta.sta.addr);
601 priv->stations[i].sta.mode = 0;
602 priv->stations[i].used |= IWL_STA_UCODE_INPROGRESS;
603 found = true;
604 }
605 }
606
607 for (i = 0; i < priv->hw_params.max_stations; i++) {
608 if ((priv->stations[i].used & IWL_STA_UCODE_INPROGRESS)) {
609 memcpy(&sta_cmd, &priv->stations[i].sta,
610 sizeof(struct iwl_legacy_addsta_cmd));
611 send_lq = false;
612 if (priv->stations[i].lq) {
613 memcpy(&lq, priv->stations[i].lq,
614 sizeof(struct iwl_link_quality_cmd));
615 send_lq = true;
616 }
617 spin_unlock_irqrestore(&priv->sta_lock, flags_spin);
618 ret = iwl_legacy_send_add_sta(priv, &sta_cmd, CMD_SYNC);
619 if (ret) {
620 spin_lock_irqsave(&priv->sta_lock, flags_spin);
621 IWL_ERR(priv, "Adding station %pM failed.\n",
622 priv->stations[i].sta.sta.addr);
623 priv->stations[i].used &=
624 ~IWL_STA_DRIVER_ACTIVE;
625 priv->stations[i].used &=
626 ~IWL_STA_UCODE_INPROGRESS;
627 spin_unlock_irqrestore(&priv->sta_lock,
628 flags_spin);
629 }
630 /*
631 * Rate scaling has already been initialized, send
632 * current LQ command
633 */
634 if (send_lq)
635 iwl_legacy_send_lq_cmd(priv, ctx, &lq,
636 CMD_SYNC, true);
637 spin_lock_irqsave(&priv->sta_lock, flags_spin);
638 priv->stations[i].used &= ~IWL_STA_UCODE_INPROGRESS;
639 }
640 }
641
642 spin_unlock_irqrestore(&priv->sta_lock, flags_spin);
643 if (!found)
644 IWL_DEBUG_INFO(priv, "Restoring all known stations"
645 " .... no stations to be restored.\n");
646 else
647 IWL_DEBUG_INFO(priv, "Restoring all known stations"
648 " .... complete.\n");
649}
650EXPORT_SYMBOL(iwl_legacy_restore_stations);
651
652int iwl_legacy_get_free_ucode_key_index(struct iwl_priv *priv)
653{
654 int i;
655
656 for (i = 0; i < priv->sta_key_max_num; i++)
657 if (!test_and_set_bit(i, &priv->ucode_key_table))
658 return i;
659
660 return WEP_INVALID_OFFSET;
661}
662EXPORT_SYMBOL(iwl_legacy_get_free_ucode_key_index);
663
664void iwl_legacy_dealloc_bcast_stations(struct iwl_priv *priv)
665{
666 unsigned long flags;
667 int i;
668
669 spin_lock_irqsave(&priv->sta_lock, flags);
670 for (i = 0; i < priv->hw_params.max_stations; i++) {
671 if (!(priv->stations[i].used & IWL_STA_BCAST))
672 continue;
673
674 priv->stations[i].used &= ~IWL_STA_UCODE_ACTIVE;
675 priv->num_stations--;
676 BUG_ON(priv->num_stations < 0);
677 kfree(priv->stations[i].lq);
678 priv->stations[i].lq = NULL;
679 }
680 spin_unlock_irqrestore(&priv->sta_lock, flags);
681}
682EXPORT_SYMBOL_GPL(iwl_legacy_dealloc_bcast_stations);
683
684#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
685static void iwl_legacy_dump_lq_cmd(struct iwl_priv *priv,
686 struct iwl_link_quality_cmd *lq)
687{
688 int i;
689 IWL_DEBUG_RATE(priv, "lq station id 0x%x\n", lq->sta_id);
690 IWL_DEBUG_RATE(priv, "lq ant 0x%X 0x%X\n",
691 lq->general_params.single_stream_ant_msk,
692 lq->general_params.dual_stream_ant_msk);
693
694 for (i = 0; i < LINK_QUAL_MAX_RETRY_NUM; i++)
695 IWL_DEBUG_RATE(priv, "lq index %d 0x%X\n",
696 i, lq->rs_table[i].rate_n_flags);
697}
698#else
699static inline void iwl_legacy_dump_lq_cmd(struct iwl_priv *priv,
700 struct iwl_link_quality_cmd *lq)
701{
702}
703#endif
704
705/**
706 * iwl_legacy_is_lq_table_valid() - Test one aspect of LQ cmd for validity
707 *
708 * It sometimes happens when a HT rate has been in use and we
709 * loose connectivity with AP then mac80211 will first tell us that the
710 * current channel is not HT anymore before removing the station. In such a
711 * scenario the RXON flags will be updated to indicate we are not
712 * communicating HT anymore, but the LQ command may still contain HT rates.
713 * Test for this to prevent driver from sending LQ command between the time
714 * RXON flags are updated and when LQ command is updated.
715 */
716static bool iwl_legacy_is_lq_table_valid(struct iwl_priv *priv,
717 struct iwl_rxon_context *ctx,
718 struct iwl_link_quality_cmd *lq)
719{
720 int i;
721
722 if (ctx->ht.enabled)
723 return true;
724
725 IWL_DEBUG_INFO(priv, "Channel %u is not an HT channel\n",
726 ctx->active.channel);
727 for (i = 0; i < LINK_QUAL_MAX_RETRY_NUM; i++) {
728 if (le32_to_cpu(lq->rs_table[i].rate_n_flags) &
729 RATE_MCS_HT_MSK) {
730 IWL_DEBUG_INFO(priv,
731 "index %d of LQ expects HT channel\n",
732 i);
733 return false;
734 }
735 }
736 return true;
737}
738
739/**
740 * iwl_legacy_send_lq_cmd() - Send link quality command
741 * @init: This command is sent as part of station initialization right
742 * after station has been added.
743 *
744 * The link quality command is sent as the last step of station creation.
745 * This is the special case in which init is set and we call a callback in
746 * this case to clear the state indicating that station creation is in
747 * progress.
748 */
749int iwl_legacy_send_lq_cmd(struct iwl_priv *priv, struct iwl_rxon_context *ctx,
750 struct iwl_link_quality_cmd *lq, u8 flags, bool init)
751{
752 int ret = 0;
753 unsigned long flags_spin;
754
755 struct iwl_host_cmd cmd = {
756 .id = REPLY_TX_LINK_QUALITY_CMD,
757 .len = sizeof(struct iwl_link_quality_cmd),
758 .flags = flags,
759 .data = lq,
760 };
761
762 if (WARN_ON(lq->sta_id == IWL_INVALID_STATION))
763 return -EINVAL;
764
765
766 spin_lock_irqsave(&priv->sta_lock, flags_spin);
767 if (!(priv->stations[lq->sta_id].used & IWL_STA_DRIVER_ACTIVE)) {
768 spin_unlock_irqrestore(&priv->sta_lock, flags_spin);
769 return -EINVAL;
770 }
771 spin_unlock_irqrestore(&priv->sta_lock, flags_spin);
772
773 iwl_legacy_dump_lq_cmd(priv, lq);
774 BUG_ON(init && (cmd.flags & CMD_ASYNC));
775
776 if (iwl_legacy_is_lq_table_valid(priv, ctx, lq))
777 ret = iwl_legacy_send_cmd(priv, &cmd);
778 else
779 ret = -EINVAL;
780
781 if (cmd.flags & CMD_ASYNC)
782 return ret;
783
784 if (init) {
785 IWL_DEBUG_INFO(priv, "init LQ command complete,"
786 " clearing sta addition status for sta %d\n",
787 lq->sta_id);
788 spin_lock_irqsave(&priv->sta_lock, flags_spin);
789 priv->stations[lq->sta_id].used &= ~IWL_STA_UCODE_INPROGRESS;
790 spin_unlock_irqrestore(&priv->sta_lock, flags_spin);
791 }
792 return ret;
793}
794EXPORT_SYMBOL(iwl_legacy_send_lq_cmd);
795
796int iwl_legacy_mac_sta_remove(struct ieee80211_hw *hw,
797 struct ieee80211_vif *vif,
798 struct ieee80211_sta *sta)
799{
800 struct iwl_priv *priv = hw->priv;
801 struct iwl_station_priv_common *sta_common = (void *)sta->drv_priv;
802 int ret;
803
804 IWL_DEBUG_INFO(priv, "received request to remove station %pM\n",
805 sta->addr);
806 mutex_lock(&priv->mutex);
807 IWL_DEBUG_INFO(priv, "proceeding to remove station %pM\n",
808 sta->addr);
809 ret = iwl_legacy_remove_station(priv, sta_common->sta_id, sta->addr);
810 if (ret)
811 IWL_ERR(priv, "Error removing station %pM\n",
812 sta->addr);
813 mutex_unlock(&priv->mutex);
814 return ret;
815}
816EXPORT_SYMBOL(iwl_legacy_mac_sta_remove);
diff --git a/drivers/net/wireless/iwlegacy/iwl-sta.h b/drivers/net/wireless/iwlegacy/iwl-sta.h
new file mode 100644
index 000000000000..67bd75fe01a1
--- /dev/null
+++ b/drivers/net/wireless/iwlegacy/iwl-sta.h
@@ -0,0 +1,148 @@
1/******************************************************************************
2 *
3 * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved.
4 *
5 * Portions of this file are derived from the ipw3945 project, as well
6 * as portions of the ieee80211 subsystem header files.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of version 2 of the GNU General Public License as
10 * published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it will be useful, but WITHOUT
13 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 * more details.
16 *
17 * You should have received a copy of the GNU General Public License along with
18 * this program; if not, write to the Free Software Foundation, Inc.,
19 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
20 *
21 * The full GNU General Public License is included in this distribution in the
22 * file called LICENSE.
23 *
24 * Contact Information:
25 * Intel Linux Wireless <ilw@linux.intel.com>
26 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
27 *
28 *****************************************************************************/
29#ifndef __iwl_legacy_sta_h__
30#define __iwl_legacy_sta_h__
31
32#include "iwl-dev.h"
33
34#define HW_KEY_DYNAMIC 0
35#define HW_KEY_DEFAULT 1
36
37#define IWL_STA_DRIVER_ACTIVE BIT(0) /* driver entry is active */
38#define IWL_STA_UCODE_ACTIVE BIT(1) /* ucode entry is active */
39#define IWL_STA_UCODE_INPROGRESS BIT(2) /* ucode entry is in process of
40 being activated */
41#define IWL_STA_LOCAL BIT(3) /* station state not directed by mac80211;
42 (this is for the IBSS BSSID stations) */
43#define IWL_STA_BCAST BIT(4) /* this station is the special bcast station */
44
45
46void iwl_legacy_restore_stations(struct iwl_priv *priv,
47 struct iwl_rxon_context *ctx);
48void iwl_legacy_clear_ucode_stations(struct iwl_priv *priv,
49 struct iwl_rxon_context *ctx);
50void iwl_legacy_dealloc_bcast_stations(struct iwl_priv *priv);
51int iwl_legacy_get_free_ucode_key_index(struct iwl_priv *priv);
52int iwl_legacy_send_add_sta(struct iwl_priv *priv,
53 struct iwl_legacy_addsta_cmd *sta, u8 flags);
54int iwl_legacy_add_station_common(struct iwl_priv *priv,
55 struct iwl_rxon_context *ctx,
56 const u8 *addr, bool is_ap,
57 struct ieee80211_sta *sta, u8 *sta_id_r);
58int iwl_legacy_remove_station(struct iwl_priv *priv,
59 const u8 sta_id,
60 const u8 *addr);
61int iwl_legacy_mac_sta_remove(struct ieee80211_hw *hw,
62 struct ieee80211_vif *vif,
63 struct ieee80211_sta *sta);
64
65u8 iwl_legacy_prep_station(struct iwl_priv *priv,
66 struct iwl_rxon_context *ctx,
67 const u8 *addr, bool is_ap,
68 struct ieee80211_sta *sta);
69
70int iwl_legacy_send_lq_cmd(struct iwl_priv *priv,
71 struct iwl_rxon_context *ctx,
72 struct iwl_link_quality_cmd *lq,
73 u8 flags, bool init);
74
75/**
76 * iwl_legacy_clear_driver_stations - clear knowledge of all stations from driver
77 * @priv: iwl priv struct
78 *
79 * This is called during iwl_down() to make sure that in the case
80 * we're coming there from a hardware restart mac80211 will be
81 * able to reconfigure stations -- if we're getting there in the
82 * normal down flow then the stations will already be cleared.
83 */
84static inline void iwl_legacy_clear_driver_stations(struct iwl_priv *priv)
85{
86 unsigned long flags;
87 struct iwl_rxon_context *ctx;
88
89 spin_lock_irqsave(&priv->sta_lock, flags);
90 memset(priv->stations, 0, sizeof(priv->stations));
91 priv->num_stations = 0;
92
93 priv->ucode_key_table = 0;
94
95 for_each_context(priv, ctx) {
96 /*
97 * Remove all key information that is not stored as part
98 * of station information since mac80211 may not have had
99 * a chance to remove all the keys. When device is
100 * reconfigured by mac80211 after an error all keys will
101 * be reconfigured.
102 */
103 memset(ctx->wep_keys, 0, sizeof(ctx->wep_keys));
104 ctx->key_mapping_keys = 0;
105 }
106
107 spin_unlock_irqrestore(&priv->sta_lock, flags);
108}
109
110static inline int iwl_legacy_sta_id(struct ieee80211_sta *sta)
111{
112 if (WARN_ON(!sta))
113 return IWL_INVALID_STATION;
114
115 return ((struct iwl_station_priv_common *)sta->drv_priv)->sta_id;
116}
117
118/**
119 * iwl_legacy_sta_id_or_broadcast - return sta_id or broadcast sta
120 * @priv: iwl priv
121 * @context: the current context
122 * @sta: mac80211 station
123 *
124 * In certain circumstances mac80211 passes a station pointer
125 * that may be %NULL, for example during TX or key setup. In
126 * that case, we need to use the broadcast station, so this
127 * inline wraps that pattern.
128 */
129static inline int iwl_legacy_sta_id_or_broadcast(struct iwl_priv *priv,
130 struct iwl_rxon_context *context,
131 struct ieee80211_sta *sta)
132{
133 int sta_id;
134
135 if (!sta)
136 return context->bcast_sta_id;
137
138 sta_id = iwl_legacy_sta_id(sta);
139
140 /*
141 * mac80211 should not be passing a partially
142 * initialised station!
143 */
144 WARN_ON(sta_id == IWL_INVALID_STATION);
145
146 return sta_id;
147}
148#endif /* __iwl_legacy_sta_h__ */
diff --git a/drivers/net/wireless/iwlegacy/iwl-tx.c b/drivers/net/wireless/iwlegacy/iwl-tx.c
new file mode 100644
index 000000000000..7db8340d1c07
--- /dev/null
+++ b/drivers/net/wireless/iwlegacy/iwl-tx.c
@@ -0,0 +1,637 @@
1/******************************************************************************
2 *
3 * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved.
4 *
5 * Portions of this file are derived from the ipw3945 project, as well
6 * as portions of the ieee80211 subsystem header files.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of version 2 of the GNU General Public License as
10 * published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it will be useful, but WITHOUT
13 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 * more details.
16 *
17 * You should have received a copy of the GNU General Public License along with
18 * this program; if not, write to the Free Software Foundation, Inc.,
19 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
20 *
21 * The full GNU General Public License is included in this distribution in the
22 * file called LICENSE.
23 *
24 * Contact Information:
25 * Intel Linux Wireless <ilw@linux.intel.com>
26 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
27 *
28 *****************************************************************************/
29
30#include <linux/etherdevice.h>
31#include <linux/sched.h>
32#include <linux/slab.h>
33#include <net/mac80211.h>
34#include "iwl-eeprom.h"
35#include "iwl-dev.h"
36#include "iwl-core.h"
37#include "iwl-sta.h"
38#include "iwl-io.h"
39#include "iwl-helpers.h"
40
41/**
42 * iwl_legacy_txq_update_write_ptr - Send new write index to hardware
43 */
44void
45iwl_legacy_txq_update_write_ptr(struct iwl_priv *priv, struct iwl_tx_queue *txq)
46{
47 u32 reg = 0;
48 int txq_id = txq->q.id;
49
50 if (txq->need_update == 0)
51 return;
52
53 /* if we're trying to save power */
54 if (test_bit(STATUS_POWER_PMI, &priv->status)) {
55 /* wake up nic if it's powered down ...
56 * uCode will wake up, and interrupt us again, so next
57 * time we'll skip this part. */
58 reg = iwl_read32(priv, CSR_UCODE_DRV_GP1);
59
60 if (reg & CSR_UCODE_DRV_GP1_BIT_MAC_SLEEP) {
61 IWL_DEBUG_INFO(priv,
62 "Tx queue %d requesting wakeup,"
63 " GP1 = 0x%x\n", txq_id, reg);
64 iwl_legacy_set_bit(priv, CSR_GP_CNTRL,
65 CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
66 return;
67 }
68
69 iwl_legacy_write_direct32(priv, HBUS_TARG_WRPTR,
70 txq->q.write_ptr | (txq_id << 8));
71
72 /*
73 * else not in power-save mode,
74 * uCode will never sleep when we're
75 * trying to tx (during RFKILL, we're not trying to tx).
76 */
77 } else
78 iwl_write32(priv, HBUS_TARG_WRPTR,
79 txq->q.write_ptr | (txq_id << 8));
80 txq->need_update = 0;
81}
82EXPORT_SYMBOL(iwl_legacy_txq_update_write_ptr);
83
84/**
85 * iwl_legacy_tx_queue_free - Deallocate DMA queue.
86 * @txq: Transmit queue to deallocate.
87 *
88 * Empty queue by removing and destroying all BD's.
89 * Free all buffers.
90 * 0-fill, but do not free "txq" descriptor structure.
91 */
92void iwl_legacy_tx_queue_free(struct iwl_priv *priv, int txq_id)
93{
94 struct iwl_tx_queue *txq = &priv->txq[txq_id];
95 struct iwl_queue *q = &txq->q;
96 struct device *dev = &priv->pci_dev->dev;
97 int i;
98
99 if (q->n_bd == 0)
100 return;
101
102 /* first, empty all BD's */
103 for (; q->write_ptr != q->read_ptr;
104 q->read_ptr = iwl_legacy_queue_inc_wrap(q->read_ptr, q->n_bd))
105 priv->cfg->ops->lib->txq_free_tfd(priv, txq);
106
107 /* De-alloc array of command/tx buffers */
108 for (i = 0; i < TFD_TX_CMD_SLOTS; i++)
109 kfree(txq->cmd[i]);
110
111 /* De-alloc circular buffer of TFDs */
112 if (txq->q.n_bd)
113 dma_free_coherent(dev, priv->hw_params.tfd_size *
114 txq->q.n_bd, txq->tfds, txq->q.dma_addr);
115
116 /* De-alloc array of per-TFD driver data */
117 kfree(txq->txb);
118 txq->txb = NULL;
119
120 /* deallocate arrays */
121 kfree(txq->cmd);
122 kfree(txq->meta);
123 txq->cmd = NULL;
124 txq->meta = NULL;
125
126 /* 0-fill queue descriptor structure */
127 memset(txq, 0, sizeof(*txq));
128}
129EXPORT_SYMBOL(iwl_legacy_tx_queue_free);
130
131/**
132 * iwl_legacy_cmd_queue_free - Deallocate DMA queue.
133 * @txq: Transmit queue to deallocate.
134 *
135 * Empty queue by removing and destroying all BD's.
136 * Free all buffers.
137 * 0-fill, but do not free "txq" descriptor structure.
138 */
139void iwl_legacy_cmd_queue_free(struct iwl_priv *priv)
140{
141 struct iwl_tx_queue *txq = &priv->txq[priv->cmd_queue];
142 struct iwl_queue *q = &txq->q;
143 struct device *dev = &priv->pci_dev->dev;
144 int i;
145 bool huge = false;
146
147 if (q->n_bd == 0)
148 return;
149
150 for (; q->read_ptr != q->write_ptr;
151 q->read_ptr = iwl_legacy_queue_inc_wrap(q->read_ptr, q->n_bd)) {
152 /* we have no way to tell if it is a huge cmd ATM */
153 i = iwl_legacy_get_cmd_index(q, q->read_ptr, 0);
154
155 if (txq->meta[i].flags & CMD_SIZE_HUGE) {
156 huge = true;
157 continue;
158 }
159
160 pci_unmap_single(priv->pci_dev,
161 dma_unmap_addr(&txq->meta[i], mapping),
162 dma_unmap_len(&txq->meta[i], len),
163 PCI_DMA_BIDIRECTIONAL);
164 }
165 if (huge) {
166 i = q->n_window;
167 pci_unmap_single(priv->pci_dev,
168 dma_unmap_addr(&txq->meta[i], mapping),
169 dma_unmap_len(&txq->meta[i], len),
170 PCI_DMA_BIDIRECTIONAL);
171 }
172
173 /* De-alloc array of command/tx buffers */
174 for (i = 0; i <= TFD_CMD_SLOTS; i++)
175 kfree(txq->cmd[i]);
176
177 /* De-alloc circular buffer of TFDs */
178 if (txq->q.n_bd)
179 dma_free_coherent(dev, priv->hw_params.tfd_size * txq->q.n_bd,
180 txq->tfds, txq->q.dma_addr);
181
182 /* deallocate arrays */
183 kfree(txq->cmd);
184 kfree(txq->meta);
185 txq->cmd = NULL;
186 txq->meta = NULL;
187
188 /* 0-fill queue descriptor structure */
189 memset(txq, 0, sizeof(*txq));
190}
191EXPORT_SYMBOL(iwl_legacy_cmd_queue_free);
192
193/*************** DMA-QUEUE-GENERAL-FUNCTIONS *****
194 * DMA services
195 *
196 * Theory of operation
197 *
198 * A Tx or Rx queue resides in host DRAM, and is comprised of a circular buffer
199 * of buffer descriptors, each of which points to one or more data buffers for
200 * the device to read from or fill. Driver and device exchange status of each
201 * queue via "read" and "write" pointers. Driver keeps minimum of 2 empty
202 * entries in each circular buffer, to protect against confusing empty and full
203 * queue states.
204 *
205 * The device reads or writes the data in the queues via the device's several
206 * DMA/FIFO channels. Each queue is mapped to a single DMA channel.
207 *
208 * For Tx queue, there are low mark and high mark limits. If, after queuing
209 * the packet for Tx, free space become < low mark, Tx queue stopped. When
210 * reclaiming packets (on 'tx done IRQ), if free space become > high mark,
211 * Tx queue resumed.
212 *
213 * See more detailed info in iwl-4965-hw.h.
214 ***************************************************/
215
216int iwl_legacy_queue_space(const struct iwl_queue *q)
217{
218 int s = q->read_ptr - q->write_ptr;
219
220 if (q->read_ptr > q->write_ptr)
221 s -= q->n_bd;
222
223 if (s <= 0)
224 s += q->n_window;
225 /* keep some reserve to not confuse empty and full situations */
226 s -= 2;
227 if (s < 0)
228 s = 0;
229 return s;
230}
231EXPORT_SYMBOL(iwl_legacy_queue_space);
232
233
234/**
235 * iwl_legacy_queue_init - Initialize queue's high/low-water and read/write indexes
236 */
237static int iwl_legacy_queue_init(struct iwl_priv *priv, struct iwl_queue *q,
238 int count, int slots_num, u32 id)
239{
240 q->n_bd = count;
241 q->n_window = slots_num;
242 q->id = id;
243
244 /* count must be power-of-two size, otherwise iwl_legacy_queue_inc_wrap
245 * and iwl_legacy_queue_dec_wrap are broken. */
246 BUG_ON(!is_power_of_2(count));
247
248 /* slots_num must be power-of-two size, otherwise
249 * iwl_legacy_get_cmd_index is broken. */
250 BUG_ON(!is_power_of_2(slots_num));
251
252 q->low_mark = q->n_window / 4;
253 if (q->low_mark < 4)
254 q->low_mark = 4;
255
256 q->high_mark = q->n_window / 8;
257 if (q->high_mark < 2)
258 q->high_mark = 2;
259
260 q->write_ptr = q->read_ptr = 0;
261
262 return 0;
263}
264
265/**
266 * iwl_legacy_tx_queue_alloc - Alloc driver data and TFD CB for one Tx/cmd queue
267 */
268static int iwl_legacy_tx_queue_alloc(struct iwl_priv *priv,
269 struct iwl_tx_queue *txq, u32 id)
270{
271 struct device *dev = &priv->pci_dev->dev;
272 size_t tfd_sz = priv->hw_params.tfd_size * TFD_QUEUE_SIZE_MAX;
273
274 /* Driver private data, only for Tx (not command) queues,
275 * not shared with device. */
276 if (id != priv->cmd_queue) {
277 txq->txb = kzalloc(sizeof(txq->txb[0]) *
278 TFD_QUEUE_SIZE_MAX, GFP_KERNEL);
279 if (!txq->txb) {
280 IWL_ERR(priv, "kmalloc for auxiliary BD "
281 "structures failed\n");
282 goto error;
283 }
284 } else {
285 txq->txb = NULL;
286 }
287
288 /* Circular buffer of transmit frame descriptors (TFDs),
289 * shared with device */
290 txq->tfds = dma_alloc_coherent(dev, tfd_sz, &txq->q.dma_addr,
291 GFP_KERNEL);
292 if (!txq->tfds) {
293 IWL_ERR(priv, "pci_alloc_consistent(%zd) failed\n", tfd_sz);
294 goto error;
295 }
296 txq->q.id = id;
297
298 return 0;
299
300 error:
301 kfree(txq->txb);
302 txq->txb = NULL;
303
304 return -ENOMEM;
305}
306
307/**
308 * iwl_legacy_tx_queue_init - Allocate and initialize one tx/cmd queue
309 */
310int iwl_legacy_tx_queue_init(struct iwl_priv *priv, struct iwl_tx_queue *txq,
311 int slots_num, u32 txq_id)
312{
313 int i, len;
314 int ret;
315 int actual_slots = slots_num;
316
317 /*
318 * Alloc buffer array for commands (Tx or other types of commands).
319 * For the command queue (#4/#9), allocate command space + one big
320 * command for scan, since scan command is very huge; the system will
321 * not have two scans at the same time, so only one is needed.
322 * For normal Tx queues (all other queues), no super-size command
323 * space is needed.
324 */
325 if (txq_id == priv->cmd_queue)
326 actual_slots++;
327
328 txq->meta = kzalloc(sizeof(struct iwl_cmd_meta) * actual_slots,
329 GFP_KERNEL);
330 txq->cmd = kzalloc(sizeof(struct iwl_device_cmd *) * actual_slots,
331 GFP_KERNEL);
332
333 if (!txq->meta || !txq->cmd)
334 goto out_free_arrays;
335
336 len = sizeof(struct iwl_device_cmd);
337 for (i = 0; i < actual_slots; i++) {
338 /* only happens for cmd queue */
339 if (i == slots_num)
340 len = IWL_MAX_CMD_SIZE;
341
342 txq->cmd[i] = kmalloc(len, GFP_KERNEL);
343 if (!txq->cmd[i])
344 goto err;
345 }
346
347 /* Alloc driver data array and TFD circular buffer */
348 ret = iwl_legacy_tx_queue_alloc(priv, txq, txq_id);
349 if (ret)
350 goto err;
351
352 txq->need_update = 0;
353
354 /*
355 * For the default queues 0-3, set up the swq_id
356 * already -- all others need to get one later
357 * (if they need one at all).
358 */
359 if (txq_id < 4)
360 iwl_legacy_set_swq_id(txq, txq_id, txq_id);
361
362 /* TFD_QUEUE_SIZE_MAX must be power-of-two size, otherwise
363 * iwl_legacy_queue_inc_wrap and iwl_legacy_queue_dec_wrap are broken. */
364 BUILD_BUG_ON(TFD_QUEUE_SIZE_MAX & (TFD_QUEUE_SIZE_MAX - 1));
365
366 /* Initialize queue's high/low-water marks, and head/tail indexes */
367 iwl_legacy_queue_init(priv, &txq->q,
368 TFD_QUEUE_SIZE_MAX, slots_num, txq_id);
369
370 /* Tell device where to find queue */
371 priv->cfg->ops->lib->txq_init(priv, txq);
372
373 return 0;
374err:
375 for (i = 0; i < actual_slots; i++)
376 kfree(txq->cmd[i]);
377out_free_arrays:
378 kfree(txq->meta);
379 kfree(txq->cmd);
380
381 return -ENOMEM;
382}
383EXPORT_SYMBOL(iwl_legacy_tx_queue_init);
384
385void iwl_legacy_tx_queue_reset(struct iwl_priv *priv, struct iwl_tx_queue *txq,
386 int slots_num, u32 txq_id)
387{
388 int actual_slots = slots_num;
389
390 if (txq_id == priv->cmd_queue)
391 actual_slots++;
392
393 memset(txq->meta, 0, sizeof(struct iwl_cmd_meta) * actual_slots);
394
395 txq->need_update = 0;
396
397 /* Initialize queue's high/low-water marks, and head/tail indexes */
398 iwl_legacy_queue_init(priv, &txq->q,
399 TFD_QUEUE_SIZE_MAX, slots_num, txq_id);
400
401 /* Tell device where to find queue */
402 priv->cfg->ops->lib->txq_init(priv, txq);
403}
404EXPORT_SYMBOL(iwl_legacy_tx_queue_reset);
405
406/*************** HOST COMMAND QUEUE FUNCTIONS *****/
407
408/**
409 * iwl_legacy_enqueue_hcmd - enqueue a uCode command
410 * @priv: device private data point
411 * @cmd: a point to the ucode command structure
412 *
413 * The function returns < 0 values to indicate the operation is
414 * failed. On success, it turns the index (> 0) of command in the
415 * command queue.
416 */
417int iwl_legacy_enqueue_hcmd(struct iwl_priv *priv, struct iwl_host_cmd *cmd)
418{
419 struct iwl_tx_queue *txq = &priv->txq[priv->cmd_queue];
420 struct iwl_queue *q = &txq->q;
421 struct iwl_device_cmd *out_cmd;
422 struct iwl_cmd_meta *out_meta;
423 dma_addr_t phys_addr;
424 unsigned long flags;
425 int len;
426 u32 idx;
427 u16 fix_size;
428
429 cmd->len = priv->cfg->ops->utils->get_hcmd_size(cmd->id, cmd->len);
430 fix_size = (u16)(cmd->len + sizeof(out_cmd->hdr));
431
432 /* If any of the command structures end up being larger than
433 * the TFD_MAX_PAYLOAD_SIZE, and it sent as a 'small' command then
434 * we will need to increase the size of the TFD entries
435 * Also, check to see if command buffer should not exceed the size
436 * of device_cmd and max_cmd_size. */
437 BUG_ON((fix_size > TFD_MAX_PAYLOAD_SIZE) &&
438 !(cmd->flags & CMD_SIZE_HUGE));
439 BUG_ON(fix_size > IWL_MAX_CMD_SIZE);
440
441 if (iwl_legacy_is_rfkill(priv) || iwl_legacy_is_ctkill(priv)) {
442 IWL_WARN(priv, "Not sending command - %s KILL\n",
443 iwl_legacy_is_rfkill(priv) ? "RF" : "CT");
444 return -EIO;
445 }
446
447 if (iwl_legacy_queue_space(q) < ((cmd->flags & CMD_ASYNC) ? 2 : 1)) {
448 IWL_ERR(priv, "No space in command queue\n");
449 IWL_ERR(priv, "Restarting adapter due to queue full\n");
450 queue_work(priv->workqueue, &priv->restart);
451 return -ENOSPC;
452 }
453
454 spin_lock_irqsave(&priv->hcmd_lock, flags);
455
456 /* If this is a huge cmd, mark the huge flag also on the meta.flags
457 * of the _original_ cmd. This is used for DMA mapping clean up.
458 */
459 if (cmd->flags & CMD_SIZE_HUGE) {
460 idx = iwl_legacy_get_cmd_index(q, q->write_ptr, 0);
461 txq->meta[idx].flags = CMD_SIZE_HUGE;
462 }
463
464 idx = iwl_legacy_get_cmd_index(q, q->write_ptr, cmd->flags & CMD_SIZE_HUGE);
465 out_cmd = txq->cmd[idx];
466 out_meta = &txq->meta[idx];
467
468 memset(out_meta, 0, sizeof(*out_meta)); /* re-initialize to NULL */
469 out_meta->flags = cmd->flags;
470 if (cmd->flags & CMD_WANT_SKB)
471 out_meta->source = cmd;
472 if (cmd->flags & CMD_ASYNC)
473 out_meta->callback = cmd->callback;
474
475 out_cmd->hdr.cmd = cmd->id;
476 memcpy(&out_cmd->cmd.payload, cmd->data, cmd->len);
477
478 /* At this point, the out_cmd now has all of the incoming cmd
479 * information */
480
481 out_cmd->hdr.flags = 0;
482 out_cmd->hdr.sequence = cpu_to_le16(QUEUE_TO_SEQ(priv->cmd_queue) |
483 INDEX_TO_SEQ(q->write_ptr));
484 if (cmd->flags & CMD_SIZE_HUGE)
485 out_cmd->hdr.sequence |= SEQ_HUGE_FRAME;
486 len = sizeof(struct iwl_device_cmd);
487 if (idx == TFD_CMD_SLOTS)
488 len = IWL_MAX_CMD_SIZE;
489
490#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
491 switch (out_cmd->hdr.cmd) {
492 case REPLY_TX_LINK_QUALITY_CMD:
493 case SENSITIVITY_CMD:
494 IWL_DEBUG_HC_DUMP(priv,
495 "Sending command %s (#%x), seq: 0x%04X, "
496 "%d bytes at %d[%d]:%d\n",
497 iwl_legacy_get_cmd_string(out_cmd->hdr.cmd),
498 out_cmd->hdr.cmd,
499 le16_to_cpu(out_cmd->hdr.sequence), fix_size,
500 q->write_ptr, idx, priv->cmd_queue);
501 break;
502 default:
503 IWL_DEBUG_HC(priv, "Sending command %s (#%x), seq: 0x%04X, "
504 "%d bytes at %d[%d]:%d\n",
505 iwl_legacy_get_cmd_string(out_cmd->hdr.cmd),
506 out_cmd->hdr.cmd,
507 le16_to_cpu(out_cmd->hdr.sequence), fix_size,
508 q->write_ptr, idx, priv->cmd_queue);
509 }
510#endif
511 txq->need_update = 1;
512
513 if (priv->cfg->ops->lib->txq_update_byte_cnt_tbl)
514 /* Set up entry in queue's byte count circular buffer */
515 priv->cfg->ops->lib->txq_update_byte_cnt_tbl(priv, txq, 0);
516
517 phys_addr = pci_map_single(priv->pci_dev, &out_cmd->hdr,
518 fix_size, PCI_DMA_BIDIRECTIONAL);
519 dma_unmap_addr_set(out_meta, mapping, phys_addr);
520 dma_unmap_len_set(out_meta, len, fix_size);
521
522 trace_iwlwifi_legacy_dev_hcmd(priv, &out_cmd->hdr,
523 fix_size, cmd->flags);
524
525 priv->cfg->ops->lib->txq_attach_buf_to_tfd(priv, txq,
526 phys_addr, fix_size, 1,
527 U32_PAD(cmd->len));
528
529 /* Increment and update queue's write index */
530 q->write_ptr = iwl_legacy_queue_inc_wrap(q->write_ptr, q->n_bd);
531 iwl_legacy_txq_update_write_ptr(priv, txq);
532
533 spin_unlock_irqrestore(&priv->hcmd_lock, flags);
534 return idx;
535}
536
537/**
538 * iwl_legacy_hcmd_queue_reclaim - Reclaim TX command queue entries already Tx'd
539 *
540 * When FW advances 'R' index, all entries between old and new 'R' index
541 * need to be reclaimed. As result, some free space forms. If there is
542 * enough free space (> low mark), wake the stack that feeds us.
543 */
544static void iwl_legacy_hcmd_queue_reclaim(struct iwl_priv *priv, int txq_id,
545 int idx, int cmd_idx)
546{
547 struct iwl_tx_queue *txq = &priv->txq[txq_id];
548 struct iwl_queue *q = &txq->q;
549 int nfreed = 0;
550
551 if ((idx >= q->n_bd) || (iwl_legacy_queue_used(q, idx) == 0)) {
552 IWL_ERR(priv, "Read index for DMA queue txq id (%d), index %d, "
553 "is out of range [0-%d] %d %d.\n", txq_id,
554 idx, q->n_bd, q->write_ptr, q->read_ptr);
555 return;
556 }
557
558 for (idx = iwl_legacy_queue_inc_wrap(idx, q->n_bd); q->read_ptr != idx;
559 q->read_ptr = iwl_legacy_queue_inc_wrap(q->read_ptr, q->n_bd)) {
560
561 if (nfreed++ > 0) {
562 IWL_ERR(priv, "HCMD skipped: index (%d) %d %d\n", idx,
563 q->write_ptr, q->read_ptr);
564 queue_work(priv->workqueue, &priv->restart);
565 }
566
567 }
568}
569
570/**
571 * iwl_legacy_tx_cmd_complete - Pull unused buffers off the queue and reclaim them
572 * @rxb: Rx buffer to reclaim
573 *
574 * If an Rx buffer has an async callback associated with it the callback
575 * will be executed. The attached skb (if present) will only be freed
576 * if the callback returns 1
577 */
578void
579iwl_legacy_tx_cmd_complete(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb)
580{
581 struct iwl_rx_packet *pkt = rxb_addr(rxb);
582 u16 sequence = le16_to_cpu(pkt->hdr.sequence);
583 int txq_id = SEQ_TO_QUEUE(sequence);
584 int index = SEQ_TO_INDEX(sequence);
585 int cmd_index;
586 bool huge = !!(pkt->hdr.sequence & SEQ_HUGE_FRAME);
587 struct iwl_device_cmd *cmd;
588 struct iwl_cmd_meta *meta;
589 struct iwl_tx_queue *txq = &priv->txq[priv->cmd_queue];
590
591 /* If a Tx command is being handled and it isn't in the actual
592 * command queue then there a command routing bug has been introduced
593 * in the queue management code. */
594 if (WARN(txq_id != priv->cmd_queue,
595 "wrong command queue %d (should be %d), sequence 0x%X readp=%d writep=%d\n",
596 txq_id, priv->cmd_queue, sequence,
597 priv->txq[priv->cmd_queue].q.read_ptr,
598 priv->txq[priv->cmd_queue].q.write_ptr)) {
599 iwl_print_hex_error(priv, pkt, 32);
600 return;
601 }
602
603 /* If this is a huge cmd, clear the huge flag on the meta.flags
604 * of the _original_ cmd. So that iwl_legacy_cmd_queue_free won't unmap
605 * the DMA buffer for the scan (huge) command.
606 */
607 if (huge) {
608 cmd_index = iwl_legacy_get_cmd_index(&txq->q, index, 0);
609 txq->meta[cmd_index].flags = 0;
610 }
611 cmd_index = iwl_legacy_get_cmd_index(&txq->q, index, huge);
612 cmd = txq->cmd[cmd_index];
613 meta = &txq->meta[cmd_index];
614
615 pci_unmap_single(priv->pci_dev,
616 dma_unmap_addr(meta, mapping),
617 dma_unmap_len(meta, len),
618 PCI_DMA_BIDIRECTIONAL);
619
620 /* Input error checking is done when commands are added to queue. */
621 if (meta->flags & CMD_WANT_SKB) {
622 meta->source->reply_page = (unsigned long)rxb_addr(rxb);
623 rxb->page = NULL;
624 } else if (meta->callback)
625 meta->callback(priv, cmd, pkt);
626
627 iwl_legacy_hcmd_queue_reclaim(priv, txq_id, index, cmd_index);
628
629 if (!(meta->flags & CMD_ASYNC)) {
630 clear_bit(STATUS_HCMD_ACTIVE, &priv->status);
631 IWL_DEBUG_INFO(priv, "Clearing HCMD_ACTIVE for command %s\n",
632 iwl_legacy_get_cmd_string(cmd->hdr.cmd));
633 wake_up_interruptible(&priv->wait_command_queue);
634 }
635 meta->flags = 0;
636}
637EXPORT_SYMBOL(iwl_legacy_tx_cmd_complete);
diff --git a/drivers/net/wireless/iwlwifi/iwl3945-base.c b/drivers/net/wireless/iwlegacy/iwl3945-base.c
index adcef735180a..ef94d161b783 100644
--- a/drivers/net/wireless/iwlwifi/iwl3945-base.c
+++ b/drivers/net/wireless/iwlegacy/iwl3945-base.c
@@ -1,6 +1,6 @@
1/****************************************************************************** 1/******************************************************************************
2 * 2 *
3 * Copyright(c) 2003 - 2010 Intel Corporation. All rights reserved. 3 * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved.
4 * 4 *
5 * Portions of this file are derived from the ipw3945 project, as well 5 * Portions of this file are derived from the ipw3945 project, as well
6 * as portions of the ieee80211 subsystem header files. 6 * as portions of the ieee80211 subsystem header files.
@@ -61,7 +61,6 @@
61#include "iwl-helpers.h" 61#include "iwl-helpers.h"
62#include "iwl-dev.h" 62#include "iwl-dev.h"
63#include "iwl-spectrum.h" 63#include "iwl-spectrum.h"
64#include "iwl-legacy.h"
65 64
66/* 65/*
67 * module name, copyright, version, etc. 66 * module name, copyright, version, etc.
@@ -70,7 +69,7 @@
70#define DRV_DESCRIPTION \ 69#define DRV_DESCRIPTION \
71"Intel(R) PRO/Wireless 3945ABG/BG Network Connection driver for Linux" 70"Intel(R) PRO/Wireless 3945ABG/BG Network Connection driver for Linux"
72 71
73#ifdef CONFIG_IWLWIFI_DEBUG 72#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
74#define VD "d" 73#define VD "d"
75#else 74#else
76#define VD 75#define VD
@@ -82,7 +81,7 @@
82 * this was configurable. 81 * this was configurable.
83 */ 82 */
84#define DRV_VERSION IWLWIFI_VERSION VD "s" 83#define DRV_VERSION IWLWIFI_VERSION VD "s"
85#define DRV_COPYRIGHT "Copyright(c) 2003-2010 Intel Corporation" 84#define DRV_COPYRIGHT "Copyright(c) 2003-2011 Intel Corporation"
86#define DRV_AUTHOR "<ilw@linux.intel.com>" 85#define DRV_AUTHOR "<ilw@linux.intel.com>"
87 86
88MODULE_DESCRIPTION(DRV_DESCRIPTION); 87MODULE_DESCRIPTION(DRV_DESCRIPTION);
@@ -164,7 +163,7 @@ static int iwl3945_set_ccmp_dynamic_key_info(struct iwl_priv *priv,
164 if ((priv->stations[sta_id].sta.key.key_flags & STA_KEY_FLG_ENCRYPT_MSK) 163 if ((priv->stations[sta_id].sta.key.key_flags & STA_KEY_FLG_ENCRYPT_MSK)
165 == STA_KEY_FLG_NO_ENC) 164 == STA_KEY_FLG_NO_ENC)
166 priv->stations[sta_id].sta.key.key_offset = 165 priv->stations[sta_id].sta.key.key_offset =
167 iwl_get_free_ucode_key_index(priv); 166 iwl_legacy_get_free_ucode_key_index(priv);
168 /* else, we are overriding an existing key => no need to allocated room 167 /* else, we are overriding an existing key => no need to allocated room
169 * in uCode. */ 168 * in uCode. */
170 169
@@ -177,7 +176,8 @@ static int iwl3945_set_ccmp_dynamic_key_info(struct iwl_priv *priv,
177 176
178 IWL_DEBUG_INFO(priv, "hwcrypto: modify ucode station key info\n"); 177 IWL_DEBUG_INFO(priv, "hwcrypto: modify ucode station key info\n");
179 178
180 ret = iwl_send_add_sta(priv, &priv->stations[sta_id].sta, CMD_ASYNC); 179 ret = iwl_legacy_send_add_sta(priv,
180 &priv->stations[sta_id].sta, CMD_ASYNC);
181 181
182 spin_unlock_irqrestore(&priv->sta_lock, flags); 182 spin_unlock_irqrestore(&priv->sta_lock, flags);
183 183
@@ -201,7 +201,7 @@ static int iwl3945_set_wep_dynamic_key_info(struct iwl_priv *priv,
201static int iwl3945_clear_sta_key_info(struct iwl_priv *priv, u8 sta_id) 201static int iwl3945_clear_sta_key_info(struct iwl_priv *priv, u8 sta_id)
202{ 202{
203 unsigned long flags; 203 unsigned long flags;
204 struct iwl_addsta_cmd sta_cmd; 204 struct iwl_legacy_addsta_cmd sta_cmd;
205 205
206 spin_lock_irqsave(&priv->sta_lock, flags); 206 spin_lock_irqsave(&priv->sta_lock, flags);
207 memset(&priv->stations[sta_id].keyinfo, 0, sizeof(struct iwl_hw_key)); 207 memset(&priv->stations[sta_id].keyinfo, 0, sizeof(struct iwl_hw_key));
@@ -210,11 +210,11 @@ static int iwl3945_clear_sta_key_info(struct iwl_priv *priv, u8 sta_id)
210 priv->stations[sta_id].sta.key.key_flags = STA_KEY_FLG_NO_ENC; 210 priv->stations[sta_id].sta.key.key_flags = STA_KEY_FLG_NO_ENC;
211 priv->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_KEY_MASK; 211 priv->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_KEY_MASK;
212 priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK; 212 priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
213 memcpy(&sta_cmd, &priv->stations[sta_id].sta, sizeof(struct iwl_addsta_cmd)); 213 memcpy(&sta_cmd, &priv->stations[sta_id].sta, sizeof(struct iwl_legacy_addsta_cmd));
214 spin_unlock_irqrestore(&priv->sta_lock, flags); 214 spin_unlock_irqrestore(&priv->sta_lock, flags);
215 215
216 IWL_DEBUG_INFO(priv, "hwcrypto: clear ucode station key info\n"); 216 IWL_DEBUG_INFO(priv, "hwcrypto: clear ucode station key info\n");
217 return iwl_send_add_sta(priv, &sta_cmd, CMD_SYNC); 217 return iwl_legacy_send_add_sta(priv, &sta_cmd, CMD_SYNC);
218} 218}
219 219
220static int iwl3945_set_dynamic_key(struct iwl_priv *priv, 220static int iwl3945_set_dynamic_key(struct iwl_priv *priv,
@@ -318,7 +318,7 @@ unsigned int iwl3945_fill_beacon_frame(struct iwl_priv *priv,
318 int left) 318 int left)
319{ 319{
320 320
321 if (!iwl_is_associated(priv, IWL_RXON_CTX_BSS) || !priv->beacon_skb) 321 if (!iwl_legacy_is_associated(priv, IWL_RXON_CTX_BSS) || !priv->beacon_skb)
322 return 0; 322 return 0;
323 323
324 if (priv->beacon_skb->len > left) 324 if (priv->beacon_skb->len > left)
@@ -344,12 +344,12 @@ static int iwl3945_send_beacon_cmd(struct iwl_priv *priv)
344 return -ENOMEM; 344 return -ENOMEM;
345 } 345 }
346 346
347 rate = iwl_rate_get_lowest_plcp(priv, 347 rate = iwl_legacy_get_lowest_plcp(priv,
348 &priv->contexts[IWL_RXON_CTX_BSS]); 348 &priv->contexts[IWL_RXON_CTX_BSS]);
349 349
350 frame_size = iwl3945_hw_get_beacon_cmd(priv, frame, rate); 350 frame_size = iwl3945_hw_get_beacon_cmd(priv, frame, rate);
351 351
352 rc = iwl_send_cmd_pdu(priv, REPLY_TX_BEACON, frame_size, 352 rc = iwl_legacy_send_cmd_pdu(priv, REPLY_TX_BEACON, frame_size,
353 &frame->u.cmd[0]); 353 &frame->u.cmd[0]);
354 354
355 iwl3945_free_frame(priv, frame); 355 iwl3945_free_frame(priv, frame);
@@ -443,7 +443,7 @@ static void iwl3945_build_tx_cmd_basic(struct iwl_priv *priv,
443 tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK; 443 tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK;
444 } 444 }
445 445
446 priv->cfg->ops->utils->tx_cmd_protection(priv, info, fc, &tx_flags); 446 iwl_legacy_tx_cmd_protection(priv, info, fc, &tx_flags);
447 447
448 tx_flags &= ~(TX_CMD_FLG_ANT_SEL_MSK); 448 tx_flags &= ~(TX_CMD_FLG_ANT_SEL_MSK);
449 if (ieee80211_is_mgmt(fc)) { 449 if (ieee80211_is_mgmt(fc)) {
@@ -485,7 +485,7 @@ static int iwl3945_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
485 unsigned long flags; 485 unsigned long flags;
486 486
487 spin_lock_irqsave(&priv->lock, flags); 487 spin_lock_irqsave(&priv->lock, flags);
488 if (iwl_is_rfkill(priv)) { 488 if (iwl_legacy_is_rfkill(priv)) {
489 IWL_DEBUG_DROP(priv, "Dropping - RF KILL\n"); 489 IWL_DEBUG_DROP(priv, "Dropping - RF KILL\n");
490 goto drop_unlock; 490 goto drop_unlock;
491 } 491 }
@@ -500,7 +500,7 @@ static int iwl3945_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
500 500
501 fc = hdr->frame_control; 501 fc = hdr->frame_control;
502 502
503#ifdef CONFIG_IWLWIFI_DEBUG 503#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
504 if (ieee80211_is_auth(fc)) 504 if (ieee80211_is_auth(fc))
505 IWL_DEBUG_TX(priv, "Sending AUTH frame\n"); 505 IWL_DEBUG_TX(priv, "Sending AUTH frame\n");
506 else if (ieee80211_is_assoc_req(fc)) 506 else if (ieee80211_is_assoc_req(fc))
@@ -514,7 +514,7 @@ static int iwl3945_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
514 hdr_len = ieee80211_hdrlen(fc); 514 hdr_len = ieee80211_hdrlen(fc);
515 515
516 /* Find index into station table for destination station */ 516 /* Find index into station table for destination station */
517 sta_id = iwl_sta_id_or_broadcast( 517 sta_id = iwl_legacy_sta_id_or_broadcast(
518 priv, &priv->contexts[IWL_RXON_CTX_BSS], 518 priv, &priv->contexts[IWL_RXON_CTX_BSS],
519 info->control.sta); 519 info->control.sta);
520 if (sta_id == IWL_INVALID_STATION) { 520 if (sta_id == IWL_INVALID_STATION) {
@@ -536,12 +536,12 @@ static int iwl3945_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
536 txq = &priv->txq[txq_id]; 536 txq = &priv->txq[txq_id];
537 q = &txq->q; 537 q = &txq->q;
538 538
539 if ((iwl_queue_space(q) < q->high_mark)) 539 if ((iwl_legacy_queue_space(q) < q->high_mark))
540 goto drop; 540 goto drop;
541 541
542 spin_lock_irqsave(&priv->lock, flags); 542 spin_lock_irqsave(&priv->lock, flags);
543 543
544 idx = get_cmd_index(q, q->write_ptr, 0); 544 idx = iwl_legacy_get_cmd_index(q, q->write_ptr, 0);
545 545
546 /* Set up driver data for this TFD */ 546 /* Set up driver data for this TFD */
547 memset(&(txq->txb[q->write_ptr]), 0, sizeof(struct iwl_tx_info)); 547 memset(&(txq->txb[q->write_ptr]), 0, sizeof(struct iwl_tx_info));
@@ -582,8 +582,8 @@ static int iwl3945_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
582 len = (u16)skb->len; 582 len = (u16)skb->len;
583 tx_cmd->len = cpu_to_le16(len); 583 tx_cmd->len = cpu_to_le16(len);
584 584
585 iwl_dbg_log_tx_data_frame(priv, len, hdr); 585 iwl_legacy_dbg_log_tx_data_frame(priv, len, hdr);
586 iwl_update_stats(priv, true, fc, len); 586 iwl_legacy_update_stats(priv, true, fc, len);
587 tx_cmd->tx_flags &= ~TX_CMD_FLG_ANT_A_MSK; 587 tx_cmd->tx_flags &= ~TX_CMD_FLG_ANT_A_MSK;
588 tx_cmd->tx_flags &= ~TX_CMD_FLG_ANT_B_MSK; 588 tx_cmd->tx_flags &= ~TX_CMD_FLG_ANT_B_MSK;
589 589
@@ -642,20 +642,20 @@ static int iwl3945_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
642 642
643 643
644 /* Tell device the write index *just past* this latest filled TFD */ 644 /* Tell device the write index *just past* this latest filled TFD */
645 q->write_ptr = iwl_queue_inc_wrap(q->write_ptr, q->n_bd); 645 q->write_ptr = iwl_legacy_queue_inc_wrap(q->write_ptr, q->n_bd);
646 iwl_txq_update_write_ptr(priv, txq); 646 iwl_legacy_txq_update_write_ptr(priv, txq);
647 spin_unlock_irqrestore(&priv->lock, flags); 647 spin_unlock_irqrestore(&priv->lock, flags);
648 648
649 if ((iwl_queue_space(q) < q->high_mark) 649 if ((iwl_legacy_queue_space(q) < q->high_mark)
650 && priv->mac80211_registered) { 650 && priv->mac80211_registered) {
651 if (wait_write_ptr) { 651 if (wait_write_ptr) {
652 spin_lock_irqsave(&priv->lock, flags); 652 spin_lock_irqsave(&priv->lock, flags);
653 txq->need_update = 1; 653 txq->need_update = 1;
654 iwl_txq_update_write_ptr(priv, txq); 654 iwl_legacy_txq_update_write_ptr(priv, txq);
655 spin_unlock_irqrestore(&priv->lock, flags); 655 spin_unlock_irqrestore(&priv->lock, flags);
656 } 656 }
657 657
658 iwl_stop_queue(priv, txq); 658 iwl_legacy_stop_queue(priv, txq);
659 } 659 }
660 660
661 return 0; 661 return 0;
@@ -683,8 +683,8 @@ static int iwl3945_get_measurement(struct iwl_priv *priv,
683 int duration = le16_to_cpu(params->duration); 683 int duration = le16_to_cpu(params->duration);
684 struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS]; 684 struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
685 685
686 if (iwl_is_associated(priv, IWL_RXON_CTX_BSS)) 686 if (iwl_legacy_is_associated(priv, IWL_RXON_CTX_BSS))
687 add_time = iwl_usecs_to_beacons(priv, 687 add_time = iwl_legacy_usecs_to_beacons(priv,
688 le64_to_cpu(params->start_time) - priv->_3945.last_tsf, 688 le64_to_cpu(params->start_time) - priv->_3945.last_tsf,
689 le16_to_cpu(ctx->timing.beacon_interval)); 689 le16_to_cpu(ctx->timing.beacon_interval));
690 690
@@ -697,9 +697,9 @@ static int iwl3945_get_measurement(struct iwl_priv *priv,
697 cmd.len = sizeof(spectrum); 697 cmd.len = sizeof(spectrum);
698 spectrum.len = cpu_to_le16(cmd.len - sizeof(spectrum.len)); 698 spectrum.len = cpu_to_le16(cmd.len - sizeof(spectrum.len));
699 699
700 if (iwl_is_associated(priv, IWL_RXON_CTX_BSS)) 700 if (iwl_legacy_is_associated(priv, IWL_RXON_CTX_BSS))
701 spectrum.start_time = 701 spectrum.start_time =
702 iwl_add_beacon_time(priv, 702 iwl_legacy_add_beacon_time(priv,
703 priv->_3945.last_beacon_time, add_time, 703 priv->_3945.last_beacon_time, add_time,
704 le16_to_cpu(ctx->timing.beacon_interval)); 704 le16_to_cpu(ctx->timing.beacon_interval));
705 else 705 else
@@ -712,7 +712,7 @@ static int iwl3945_get_measurement(struct iwl_priv *priv,
712 spectrum.flags |= RXON_FLG_BAND_24G_MSK | 712 spectrum.flags |= RXON_FLG_BAND_24G_MSK |
713 RXON_FLG_AUTO_DETECT_MSK | RXON_FLG_TGG_PROTECT_MSK; 713 RXON_FLG_AUTO_DETECT_MSK | RXON_FLG_TGG_PROTECT_MSK;
714 714
715 rc = iwl_send_cmd_sync(priv, &cmd); 715 rc = iwl_legacy_send_cmd_sync(priv, &cmd);
716 if (rc) 716 if (rc)
717 return rc; 717 return rc;
718 718
@@ -739,7 +739,7 @@ static int iwl3945_get_measurement(struct iwl_priv *priv,
739 break; 739 break;
740 } 740 }
741 741
742 iwl_free_pages(priv, cmd.reply_page); 742 iwl_legacy_free_pages(priv, cmd.reply_page);
743 743
744 return rc; 744 return rc;
745} 745}
@@ -783,45 +783,19 @@ static void iwl3945_rx_reply_alive(struct iwl_priv *priv,
783static void iwl3945_rx_reply_add_sta(struct iwl_priv *priv, 783static void iwl3945_rx_reply_add_sta(struct iwl_priv *priv,
784 struct iwl_rx_mem_buffer *rxb) 784 struct iwl_rx_mem_buffer *rxb)
785{ 785{
786#ifdef CONFIG_IWLWIFI_DEBUG 786#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
787 struct iwl_rx_packet *pkt = rxb_addr(rxb); 787 struct iwl_rx_packet *pkt = rxb_addr(rxb);
788#endif 788#endif
789 789
790 IWL_DEBUG_RX(priv, "Received REPLY_ADD_STA: 0x%02X\n", pkt->u.status); 790 IWL_DEBUG_RX(priv, "Received REPLY_ADD_STA: 0x%02X\n", pkt->u.status);
791} 791}
792 792
793static void iwl3945_bg_beacon_update(struct work_struct *work)
794{
795 struct iwl_priv *priv =
796 container_of(work, struct iwl_priv, beacon_update);
797 struct sk_buff *beacon;
798
799 /* Pull updated AP beacon from mac80211. will fail if not in AP mode */
800 beacon = ieee80211_beacon_get(priv->hw,
801 priv->contexts[IWL_RXON_CTX_BSS].vif);
802
803 if (!beacon) {
804 IWL_ERR(priv, "update beacon failed\n");
805 return;
806 }
807
808 mutex_lock(&priv->mutex);
809 /* new beacon skb is allocated every time; dispose previous.*/
810 if (priv->beacon_skb)
811 dev_kfree_skb(priv->beacon_skb);
812
813 priv->beacon_skb = beacon;
814 mutex_unlock(&priv->mutex);
815
816 iwl3945_send_beacon_cmd(priv);
817}
818
819static void iwl3945_rx_beacon_notif(struct iwl_priv *priv, 793static void iwl3945_rx_beacon_notif(struct iwl_priv *priv,
820 struct iwl_rx_mem_buffer *rxb) 794 struct iwl_rx_mem_buffer *rxb)
821{ 795{
822 struct iwl_rx_packet *pkt = rxb_addr(rxb); 796 struct iwl_rx_packet *pkt = rxb_addr(rxb);
823 struct iwl3945_beacon_notif *beacon = &(pkt->u.beacon_status); 797 struct iwl3945_beacon_notif *beacon = &(pkt->u.beacon_status);
824#ifdef CONFIG_IWLWIFI_DEBUG 798#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
825 u8 rate = beacon->beacon_notify_hdr.rate; 799 u8 rate = beacon->beacon_notify_hdr.rate;
826 800
827 IWL_DEBUG_RX(priv, "beacon status %x retries %d iss %d " 801 IWL_DEBUG_RX(priv, "beacon status %x retries %d iss %d "
@@ -835,9 +809,6 @@ static void iwl3945_rx_beacon_notif(struct iwl_priv *priv,
835 809
836 priv->ibss_manager = le32_to_cpu(beacon->ibss_mgr_status); 810 priv->ibss_manager = le32_to_cpu(beacon->ibss_mgr_status);
837 811
838 if ((priv->iw_mode == NL80211_IFTYPE_AP) &&
839 (!test_bit(STATUS_EXIT_PENDING, &priv->status)))
840 queue_work(priv->workqueue, &priv->beacon_update);
841} 812}
842 813
843/* Handle notification from uCode that card's power state is changing 814/* Handle notification from uCode that card's power state is changing
@@ -862,7 +833,7 @@ static void iwl3945_rx_card_state_notif(struct iwl_priv *priv,
862 clear_bit(STATUS_RF_KILL_HW, &priv->status); 833 clear_bit(STATUS_RF_KILL_HW, &priv->status);
863 834
864 835
865 iwl_scan_cancel(priv); 836 iwl_legacy_scan_cancel(priv);
866 837
867 if ((test_bit(STATUS_RF_KILL_HW, &status) != 838 if ((test_bit(STATUS_RF_KILL_HW, &status) !=
868 test_bit(STATUS_RF_KILL_HW, &priv->status))) 839 test_bit(STATUS_RF_KILL_HW, &priv->status)))
@@ -885,13 +856,13 @@ static void iwl3945_setup_rx_handlers(struct iwl_priv *priv)
885{ 856{
886 priv->rx_handlers[REPLY_ALIVE] = iwl3945_rx_reply_alive; 857 priv->rx_handlers[REPLY_ALIVE] = iwl3945_rx_reply_alive;
887 priv->rx_handlers[REPLY_ADD_STA] = iwl3945_rx_reply_add_sta; 858 priv->rx_handlers[REPLY_ADD_STA] = iwl3945_rx_reply_add_sta;
888 priv->rx_handlers[REPLY_ERROR] = iwl_rx_reply_error; 859 priv->rx_handlers[REPLY_ERROR] = iwl_legacy_rx_reply_error;
889 priv->rx_handlers[CHANNEL_SWITCH_NOTIFICATION] = iwl_rx_csa; 860 priv->rx_handlers[CHANNEL_SWITCH_NOTIFICATION] = iwl_legacy_rx_csa;
890 priv->rx_handlers[SPECTRUM_MEASURE_NOTIFICATION] = 861 priv->rx_handlers[SPECTRUM_MEASURE_NOTIFICATION] =
891 iwl_rx_spectrum_measure_notif; 862 iwl_legacy_rx_spectrum_measure_notif;
892 priv->rx_handlers[PM_SLEEP_NOTIFICATION] = iwl_rx_pm_sleep_notif; 863 priv->rx_handlers[PM_SLEEP_NOTIFICATION] = iwl_legacy_rx_pm_sleep_notif;
893 priv->rx_handlers[PM_DEBUG_STATISTIC_NOTIFIC] = 864 priv->rx_handlers[PM_DEBUG_STATISTIC_NOTIFIC] =
894 iwl_rx_pm_debug_statistics_notif; 865 iwl_legacy_rx_pm_debug_statistics_notif;
895 priv->rx_handlers[BEACON_NOTIFICATION] = iwl3945_rx_beacon_notif; 866 priv->rx_handlers[BEACON_NOTIFICATION] = iwl3945_rx_beacon_notif;
896 867
897 /* 868 /*
@@ -902,7 +873,7 @@ static void iwl3945_setup_rx_handlers(struct iwl_priv *priv)
902 priv->rx_handlers[REPLY_STATISTICS_CMD] = iwl3945_reply_statistics; 873 priv->rx_handlers[REPLY_STATISTICS_CMD] = iwl3945_reply_statistics;
903 priv->rx_handlers[STATISTICS_NOTIFICATION] = iwl3945_hw_rx_statistics; 874 priv->rx_handlers[STATISTICS_NOTIFICATION] = iwl3945_hw_rx_statistics;
904 875
905 iwl_setup_rx_scan_handlers(priv); 876 iwl_legacy_setup_rx_scan_handlers(priv);
906 priv->rx_handlers[CARD_STATE_NOTIFICATION] = iwl3945_rx_card_state_notif; 877 priv->rx_handlers[CARD_STATE_NOTIFICATION] = iwl3945_rx_card_state_notif;
907 878
908 /* Set up hardware specific Rx handlers */ 879 /* Set up hardware specific Rx handlers */
@@ -1003,7 +974,7 @@ static void iwl3945_rx_queue_restock(struct iwl_priv *priv)
1003 974
1004 spin_lock_irqsave(&rxq->lock, flags); 975 spin_lock_irqsave(&rxq->lock, flags);
1005 write = rxq->write & ~0x7; 976 write = rxq->write & ~0x7;
1006 while ((iwl_rx_queue_space(rxq) > 0) && (rxq->free_count)) { 977 while ((iwl_legacy_rx_queue_space(rxq) > 0) && (rxq->free_count)) {
1007 /* Get next free Rx buffer, remove from free list */ 978 /* Get next free Rx buffer, remove from free list */
1008 element = rxq->rx_free.next; 979 element = rxq->rx_free.next;
1009 rxb = list_entry(element, struct iwl_rx_mem_buffer, list); 980 rxb = list_entry(element, struct iwl_rx_mem_buffer, list);
@@ -1029,7 +1000,7 @@ static void iwl3945_rx_queue_restock(struct iwl_priv *priv)
1029 spin_lock_irqsave(&rxq->lock, flags); 1000 spin_lock_irqsave(&rxq->lock, flags);
1030 rxq->need_update = 1; 1001 rxq->need_update = 1;
1031 spin_unlock_irqrestore(&rxq->lock, flags); 1002 spin_unlock_irqrestore(&rxq->lock, flags);
1032 iwl_rx_queue_update_write_ptr(priv, rxq); 1003 iwl_legacy_rx_queue_update_write_ptr(priv, rxq);
1033 } 1004 }
1034} 1005}
1035 1006
@@ -1123,7 +1094,7 @@ void iwl3945_rx_queue_reset(struct iwl_priv *priv, struct iwl_rx_queue *rxq)
1123 pci_unmap_page(priv->pci_dev, rxq->pool[i].page_dma, 1094 pci_unmap_page(priv->pci_dev, rxq->pool[i].page_dma,
1124 PAGE_SIZE << priv->hw_params.rx_page_order, 1095 PAGE_SIZE << priv->hw_params.rx_page_order,
1125 PCI_DMA_FROMDEVICE); 1096 PCI_DMA_FROMDEVICE);
1126 __iwl_free_pages(priv, rxq->pool[i].page); 1097 __iwl_legacy_free_pages(priv, rxq->pool[i].page);
1127 rxq->pool[i].page = NULL; 1098 rxq->pool[i].page = NULL;
1128 } 1099 }
1129 list_add_tail(&rxq->pool[i].list, &rxq->rx_used); 1100 list_add_tail(&rxq->pool[i].list, &rxq->rx_used);
@@ -1170,7 +1141,7 @@ static void iwl3945_rx_queue_free(struct iwl_priv *priv, struct iwl_rx_queue *rx
1170 pci_unmap_page(priv->pci_dev, rxq->pool[i].page_dma, 1141 pci_unmap_page(priv->pci_dev, rxq->pool[i].page_dma,
1171 PAGE_SIZE << priv->hw_params.rx_page_order, 1142 PAGE_SIZE << priv->hw_params.rx_page_order,
1172 PCI_DMA_FROMDEVICE); 1143 PCI_DMA_FROMDEVICE);
1173 __iwl_free_pages(priv, rxq->pool[i].page); 1144 __iwl_legacy_free_pages(priv, rxq->pool[i].page);
1174 rxq->pool[i].page = NULL; 1145 rxq->pool[i].page = NULL;
1175 } 1146 }
1176 } 1147 }
@@ -1275,7 +1246,7 @@ static void iwl3945_rx_handle(struct iwl_priv *priv)
1275 1246
1276 len = le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK; 1247 len = le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK;
1277 len += sizeof(u32); /* account for status word */ 1248 len += sizeof(u32); /* account for status word */
1278 trace_iwlwifi_dev_rx(priv, pkt, len); 1249 trace_iwlwifi_legacy_dev_rx(priv, pkt, len);
1279 1250
1280 /* Reclaim a command buffer only if this packet is a response 1251 /* Reclaim a command buffer only if this packet is a response
1281 * to a (driver-originated) command. 1252 * to a (driver-originated) command.
@@ -1292,14 +1263,14 @@ static void iwl3945_rx_handle(struct iwl_priv *priv)
1292 * rx_handlers table. See iwl3945_setup_rx_handlers() */ 1263 * rx_handlers table. See iwl3945_setup_rx_handlers() */
1293 if (priv->rx_handlers[pkt->hdr.cmd]) { 1264 if (priv->rx_handlers[pkt->hdr.cmd]) {
1294 IWL_DEBUG_RX(priv, "r = %d, i = %d, %s, 0x%02x\n", r, i, 1265 IWL_DEBUG_RX(priv, "r = %d, i = %d, %s, 0x%02x\n", r, i,
1295 get_cmd_string(pkt->hdr.cmd), pkt->hdr.cmd); 1266 iwl_legacy_get_cmd_string(pkt->hdr.cmd), pkt->hdr.cmd);
1296 priv->isr_stats.rx_handlers[pkt->hdr.cmd]++; 1267 priv->isr_stats.rx_handlers[pkt->hdr.cmd]++;
1297 priv->rx_handlers[pkt->hdr.cmd] (priv, rxb); 1268 priv->rx_handlers[pkt->hdr.cmd] (priv, rxb);
1298 } else { 1269 } else {
1299 /* No handling needed */ 1270 /* No handling needed */
1300 IWL_DEBUG_RX(priv, 1271 IWL_DEBUG_RX(priv,
1301 "r %d i %d No handler needed for %s, 0x%02x\n", 1272 "r %d i %d No handler needed for %s, 0x%02x\n",
1302 r, i, get_cmd_string(pkt->hdr.cmd), 1273 r, i, iwl_legacy_get_cmd_string(pkt->hdr.cmd),
1303 pkt->hdr.cmd); 1274 pkt->hdr.cmd);
1304 } 1275 }
1305 1276
@@ -1312,10 +1283,10 @@ static void iwl3945_rx_handle(struct iwl_priv *priv)
1312 1283
1313 if (reclaim) { 1284 if (reclaim) {
1314 /* Invoke any callbacks, transfer the buffer to caller, 1285 /* Invoke any callbacks, transfer the buffer to caller,
1315 * and fire off the (possibly) blocking iwl_send_cmd() 1286 * and fire off the (possibly) blocking iwl_legacy_send_cmd()
1316 * as we reclaim the driver command queue */ 1287 * as we reclaim the driver command queue */
1317 if (rxb->page) 1288 if (rxb->page)
1318 iwl_tx_cmd_complete(priv, rxb); 1289 iwl_legacy_tx_cmd_complete(priv, rxb);
1319 else 1290 else
1320 IWL_WARN(priv, "Claim null rxb?\n"); 1291 IWL_WARN(priv, "Claim null rxb?\n");
1321 } 1292 }
@@ -1357,14 +1328,14 @@ static void iwl3945_rx_handle(struct iwl_priv *priv)
1357} 1328}
1358 1329
1359/* call this function to flush any scheduled tasklet */ 1330/* call this function to flush any scheduled tasklet */
1360static inline void iwl_synchronize_irq(struct iwl_priv *priv) 1331static inline void iwl3945_synchronize_irq(struct iwl_priv *priv)
1361{ 1332{
1362 /* wait to make sure we flush pending tasklet*/ 1333 /* wait to make sure we flush pending tasklet*/
1363 synchronize_irq(priv->pci_dev->irq); 1334 synchronize_irq(priv->pci_dev->irq);
1364 tasklet_kill(&priv->irq_tasklet); 1335 tasklet_kill(&priv->irq_tasklet);
1365} 1336}
1366 1337
1367static const char *desc_lookup(int i) 1338static const char *iwl3945_desc_lookup(int i)
1368{ 1339{
1369 switch (i) { 1340 switch (i) {
1370 case 1: 1341 case 1:
@@ -1401,7 +1372,7 @@ void iwl3945_dump_nic_error_log(struct iwl_priv *priv)
1401 } 1372 }
1402 1373
1403 1374
1404 count = iwl_read_targ_mem(priv, base); 1375 count = iwl_legacy_read_targ_mem(priv, base);
1405 1376
1406 if (ERROR_START_OFFSET <= count * ERROR_ELEM_SIZE) { 1377 if (ERROR_START_OFFSET <= count * ERROR_ELEM_SIZE) {
1407 IWL_ERR(priv, "Start IWL Error Log Dump:\n"); 1378 IWL_ERR(priv, "Start IWL Error Log Dump:\n");
@@ -1414,25 +1385,25 @@ void iwl3945_dump_nic_error_log(struct iwl_priv *priv)
1414 for (i = ERROR_START_OFFSET; 1385 for (i = ERROR_START_OFFSET;
1415 i < (count * ERROR_ELEM_SIZE) + ERROR_START_OFFSET; 1386 i < (count * ERROR_ELEM_SIZE) + ERROR_START_OFFSET;
1416 i += ERROR_ELEM_SIZE) { 1387 i += ERROR_ELEM_SIZE) {
1417 desc = iwl_read_targ_mem(priv, base + i); 1388 desc = iwl_legacy_read_targ_mem(priv, base + i);
1418 time = 1389 time =
1419 iwl_read_targ_mem(priv, base + i + 1 * sizeof(u32)); 1390 iwl_legacy_read_targ_mem(priv, base + i + 1 * sizeof(u32));
1420 blink1 = 1391 blink1 =
1421 iwl_read_targ_mem(priv, base + i + 2 * sizeof(u32)); 1392 iwl_legacy_read_targ_mem(priv, base + i + 2 * sizeof(u32));
1422 blink2 = 1393 blink2 =
1423 iwl_read_targ_mem(priv, base + i + 3 * sizeof(u32)); 1394 iwl_legacy_read_targ_mem(priv, base + i + 3 * sizeof(u32));
1424 ilink1 = 1395 ilink1 =
1425 iwl_read_targ_mem(priv, base + i + 4 * sizeof(u32)); 1396 iwl_legacy_read_targ_mem(priv, base + i + 4 * sizeof(u32));
1426 ilink2 = 1397 ilink2 =
1427 iwl_read_targ_mem(priv, base + i + 5 * sizeof(u32)); 1398 iwl_legacy_read_targ_mem(priv, base + i + 5 * sizeof(u32));
1428 data1 = 1399 data1 =
1429 iwl_read_targ_mem(priv, base + i + 6 * sizeof(u32)); 1400 iwl_legacy_read_targ_mem(priv, base + i + 6 * sizeof(u32));
1430 1401
1431 IWL_ERR(priv, 1402 IWL_ERR(priv,
1432 "%-13s (0x%X) %010u 0x%05X 0x%05X 0x%05X 0x%05X %u\n\n", 1403 "%-13s (0x%X) %010u 0x%05X 0x%05X 0x%05X 0x%05X %u\n\n",
1433 desc_lookup(desc), desc, time, blink1, blink2, 1404 iwl3945_desc_lookup(desc), desc, time, blink1, blink2,
1434 ilink1, ilink2, data1); 1405 ilink1, ilink2, data1);
1435 trace_iwlwifi_dev_ucode_error(priv, desc, time, data1, 0, 1406 trace_iwlwifi_legacy_dev_ucode_error(priv, desc, time, data1, 0,
1436 0, blink1, blink2, ilink1, ilink2); 1407 0, blink1, blink2, ilink1, ilink2);
1437 } 1408 }
1438} 1409}
@@ -1471,14 +1442,14 @@ static int iwl3945_print_event_log(struct iwl_priv *priv, u32 start_idx,
1471 iwl_grab_nic_access(priv); 1442 iwl_grab_nic_access(priv);
1472 1443
1473 /* Set starting address; reads will auto-increment */ 1444 /* Set starting address; reads will auto-increment */
1474 _iwl_write_direct32(priv, HBUS_TARG_MEM_RADDR, ptr); 1445 _iwl_legacy_write_direct32(priv, HBUS_TARG_MEM_RADDR, ptr);
1475 rmb(); 1446 rmb();
1476 1447
1477 /* "time" is actually "data" for mode 0 (no timestamp). 1448 /* "time" is actually "data" for mode 0 (no timestamp).
1478 * place event id # at far right for easier visual parsing. */ 1449 * place event id # at far right for easier visual parsing. */
1479 for (i = 0; i < num_events; i++) { 1450 for (i = 0; i < num_events; i++) {
1480 ev = _iwl_read_direct32(priv, HBUS_TARG_MEM_RDAT); 1451 ev = _iwl_legacy_read_direct32(priv, HBUS_TARG_MEM_RDAT);
1481 time = _iwl_read_direct32(priv, HBUS_TARG_MEM_RDAT); 1452 time = _iwl_legacy_read_direct32(priv, HBUS_TARG_MEM_RDAT);
1482 if (mode == 0) { 1453 if (mode == 0) {
1483 /* data, ev */ 1454 /* data, ev */
1484 if (bufsz) { 1455 if (bufsz) {
@@ -1487,11 +1458,12 @@ static int iwl3945_print_event_log(struct iwl_priv *priv, u32 start_idx,
1487 time, ev); 1458 time, ev);
1488 } else { 1459 } else {
1489 IWL_ERR(priv, "0x%08x\t%04u\n", time, ev); 1460 IWL_ERR(priv, "0x%08x\t%04u\n", time, ev);
1490 trace_iwlwifi_dev_ucode_event(priv, 0, 1461 trace_iwlwifi_legacy_dev_ucode_event(priv, 0,
1491 time, ev); 1462 time, ev);
1492 } 1463 }
1493 } else { 1464 } else {
1494 data = _iwl_read_direct32(priv, HBUS_TARG_MEM_RDAT); 1465 data = _iwl_legacy_read_direct32(priv,
1466 HBUS_TARG_MEM_RDAT);
1495 if (bufsz) { 1467 if (bufsz) {
1496 pos += scnprintf(*buf + pos, bufsz - pos, 1468 pos += scnprintf(*buf + pos, bufsz - pos,
1497 "%010u:0x%08x:%04u\n", 1469 "%010u:0x%08x:%04u\n",
@@ -1499,7 +1471,7 @@ static int iwl3945_print_event_log(struct iwl_priv *priv, u32 start_idx,
1499 } else { 1471 } else {
1500 IWL_ERR(priv, "%010u\t0x%08x\t%04u\n", 1472 IWL_ERR(priv, "%010u\t0x%08x\t%04u\n",
1501 time, data, ev); 1473 time, data, ev);
1502 trace_iwlwifi_dev_ucode_event(priv, time, 1474 trace_iwlwifi_legacy_dev_ucode_event(priv, time,
1503 data, ev); 1475 data, ev);
1504 } 1476 }
1505 } 1477 }
@@ -1570,10 +1542,10 @@ int iwl3945_dump_nic_event_log(struct iwl_priv *priv, bool full_log,
1570 } 1542 }
1571 1543
1572 /* event log header */ 1544 /* event log header */
1573 capacity = iwl_read_targ_mem(priv, base); 1545 capacity = iwl_legacy_read_targ_mem(priv, base);
1574 mode = iwl_read_targ_mem(priv, base + (1 * sizeof(u32))); 1546 mode = iwl_legacy_read_targ_mem(priv, base + (1 * sizeof(u32)));
1575 num_wraps = iwl_read_targ_mem(priv, base + (2 * sizeof(u32))); 1547 num_wraps = iwl_legacy_read_targ_mem(priv, base + (2 * sizeof(u32)));
1576 next_entry = iwl_read_targ_mem(priv, base + (3 * sizeof(u32))); 1548 next_entry = iwl_legacy_read_targ_mem(priv, base + (3 * sizeof(u32)));
1577 1549
1578 if (capacity > priv->cfg->base_params->max_event_log_size) { 1550 if (capacity > priv->cfg->base_params->max_event_log_size) {
1579 IWL_ERR(priv, "Log capacity %d is bogus, limit to %d entries\n", 1551 IWL_ERR(priv, "Log capacity %d is bogus, limit to %d entries\n",
@@ -1595,8 +1567,8 @@ int iwl3945_dump_nic_event_log(struct iwl_priv *priv, bool full_log,
1595 return pos; 1567 return pos;
1596 } 1568 }
1597 1569
1598#ifdef CONFIG_IWLWIFI_DEBUG 1570#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
1599 if (!(iwl_get_debug_level(priv) & IWL_DL_FW_ERRORS) && !full_log) 1571 if (!(iwl_legacy_get_debug_level(priv) & IWL_DL_FW_ERRORS) && !full_log)
1600 size = (size > DEFAULT_IWL3945_DUMP_EVENT_LOG_ENTRIES) 1572 size = (size > DEFAULT_IWL3945_DUMP_EVENT_LOG_ENTRIES)
1601 ? DEFAULT_IWL3945_DUMP_EVENT_LOG_ENTRIES : size; 1573 ? DEFAULT_IWL3945_DUMP_EVENT_LOG_ENTRIES : size;
1602#else 1574#else
@@ -1607,7 +1579,7 @@ int iwl3945_dump_nic_event_log(struct iwl_priv *priv, bool full_log,
1607 IWL_ERR(priv, "Start IWL Event Log Dump: display last %d count\n", 1579 IWL_ERR(priv, "Start IWL Event Log Dump: display last %d count\n",
1608 size); 1580 size);
1609 1581
1610#ifdef CONFIG_IWLWIFI_DEBUG 1582#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
1611 if (display) { 1583 if (display) {
1612 if (full_log) 1584 if (full_log)
1613 bufsz = capacity * 48; 1585 bufsz = capacity * 48;
@@ -1617,7 +1589,7 @@ int iwl3945_dump_nic_event_log(struct iwl_priv *priv, bool full_log,
1617 if (!*buf) 1589 if (!*buf)
1618 return -ENOMEM; 1590 return -ENOMEM;
1619 } 1591 }
1620 if ((iwl_get_debug_level(priv) & IWL_DL_FW_ERRORS) || full_log) { 1592 if ((iwl_legacy_get_debug_level(priv) & IWL_DL_FW_ERRORS) || full_log) {
1621 /* if uCode has wrapped back to top of log, 1593 /* if uCode has wrapped back to top of log,
1622 * start at the oldest entry, 1594 * start at the oldest entry,
1623 * i.e the next one that uCode would fill. 1595 * i.e the next one that uCode would fill.
@@ -1647,7 +1619,7 @@ static void iwl3945_irq_tasklet(struct iwl_priv *priv)
1647 u32 inta, handled = 0; 1619 u32 inta, handled = 0;
1648 u32 inta_fh; 1620 u32 inta_fh;
1649 unsigned long flags; 1621 unsigned long flags;
1650#ifdef CONFIG_IWLWIFI_DEBUG 1622#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
1651 u32 inta_mask; 1623 u32 inta_mask;
1652#endif 1624#endif
1653 1625
@@ -1665,8 +1637,8 @@ static void iwl3945_irq_tasklet(struct iwl_priv *priv)
1665 inta_fh = iwl_read32(priv, CSR_FH_INT_STATUS); 1637 inta_fh = iwl_read32(priv, CSR_FH_INT_STATUS);
1666 iwl_write32(priv, CSR_FH_INT_STATUS, inta_fh); 1638 iwl_write32(priv, CSR_FH_INT_STATUS, inta_fh);
1667 1639
1668#ifdef CONFIG_IWLWIFI_DEBUG 1640#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
1669 if (iwl_get_debug_level(priv) & IWL_DL_ISR) { 1641 if (iwl_legacy_get_debug_level(priv) & IWL_DL_ISR) {
1670 /* just for debug */ 1642 /* just for debug */
1671 inta_mask = iwl_read32(priv, CSR_INT_MASK); 1643 inta_mask = iwl_read32(priv, CSR_INT_MASK);
1672 IWL_DEBUG_ISR(priv, "inta 0x%08x, enabled 0x%08x, fh 0x%08x\n", 1644 IWL_DEBUG_ISR(priv, "inta 0x%08x, enabled 0x%08x, fh 0x%08x\n",
@@ -1690,18 +1662,18 @@ static void iwl3945_irq_tasklet(struct iwl_priv *priv)
1690 IWL_ERR(priv, "Hardware error detected. Restarting.\n"); 1662 IWL_ERR(priv, "Hardware error detected. Restarting.\n");
1691 1663
1692 /* Tell the device to stop sending interrupts */ 1664 /* Tell the device to stop sending interrupts */
1693 iwl_disable_interrupts(priv); 1665 iwl_legacy_disable_interrupts(priv);
1694 1666
1695 priv->isr_stats.hw++; 1667 priv->isr_stats.hw++;
1696 iwl_irq_handle_error(priv); 1668 iwl_legacy_irq_handle_error(priv);
1697 1669
1698 handled |= CSR_INT_BIT_HW_ERR; 1670 handled |= CSR_INT_BIT_HW_ERR;
1699 1671
1700 return; 1672 return;
1701 } 1673 }
1702 1674
1703#ifdef CONFIG_IWLWIFI_DEBUG 1675#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
1704 if (iwl_get_debug_level(priv) & (IWL_DL_ISR)) { 1676 if (iwl_legacy_get_debug_level(priv) & (IWL_DL_ISR)) {
1705 /* NIC fires this, but we don't use it, redundant with WAKEUP */ 1677 /* NIC fires this, but we don't use it, redundant with WAKEUP */
1706 if (inta & CSR_INT_BIT_SCD) { 1678 if (inta & CSR_INT_BIT_SCD) {
1707 IWL_DEBUG_ISR(priv, "Scheduler finished to transmit " 1679 IWL_DEBUG_ISR(priv, "Scheduler finished to transmit "
@@ -1724,20 +1696,20 @@ static void iwl3945_irq_tasklet(struct iwl_priv *priv)
1724 IWL_ERR(priv, "Microcode SW error detected. " 1696 IWL_ERR(priv, "Microcode SW error detected. "
1725 "Restarting 0x%X.\n", inta); 1697 "Restarting 0x%X.\n", inta);
1726 priv->isr_stats.sw++; 1698 priv->isr_stats.sw++;
1727 iwl_irq_handle_error(priv); 1699 iwl_legacy_irq_handle_error(priv);
1728 handled |= CSR_INT_BIT_SW_ERR; 1700 handled |= CSR_INT_BIT_SW_ERR;
1729 } 1701 }
1730 1702
1731 /* uCode wakes up after power-down sleep */ 1703 /* uCode wakes up after power-down sleep */
1732 if (inta & CSR_INT_BIT_WAKEUP) { 1704 if (inta & CSR_INT_BIT_WAKEUP) {
1733 IWL_DEBUG_ISR(priv, "Wakeup interrupt\n"); 1705 IWL_DEBUG_ISR(priv, "Wakeup interrupt\n");
1734 iwl_rx_queue_update_write_ptr(priv, &priv->rxq); 1706 iwl_legacy_rx_queue_update_write_ptr(priv, &priv->rxq);
1735 iwl_txq_update_write_ptr(priv, &priv->txq[0]); 1707 iwl_legacy_txq_update_write_ptr(priv, &priv->txq[0]);
1736 iwl_txq_update_write_ptr(priv, &priv->txq[1]); 1708 iwl_legacy_txq_update_write_ptr(priv, &priv->txq[1]);
1737 iwl_txq_update_write_ptr(priv, &priv->txq[2]); 1709 iwl_legacy_txq_update_write_ptr(priv, &priv->txq[2]);
1738 iwl_txq_update_write_ptr(priv, &priv->txq[3]); 1710 iwl_legacy_txq_update_write_ptr(priv, &priv->txq[3]);
1739 iwl_txq_update_write_ptr(priv, &priv->txq[4]); 1711 iwl_legacy_txq_update_write_ptr(priv, &priv->txq[4]);
1740 iwl_txq_update_write_ptr(priv, &priv->txq[5]); 1712 iwl_legacy_txq_update_write_ptr(priv, &priv->txq[5]);
1741 1713
1742 priv->isr_stats.wakeup++; 1714 priv->isr_stats.wakeup++;
1743 handled |= CSR_INT_BIT_WAKEUP; 1715 handled |= CSR_INT_BIT_WAKEUP;
@@ -1757,7 +1729,7 @@ static void iwl3945_irq_tasklet(struct iwl_priv *priv)
1757 priv->isr_stats.tx++; 1729 priv->isr_stats.tx++;
1758 1730
1759 iwl_write32(priv, CSR_FH_INT_STATUS, (1 << 6)); 1731 iwl_write32(priv, CSR_FH_INT_STATUS, (1 << 6));
1760 iwl_write_direct32(priv, FH39_TCSR_CREDIT 1732 iwl_legacy_write_direct32(priv, FH39_TCSR_CREDIT
1761 (FH39_SRVC_CHNL), 0x0); 1733 (FH39_SRVC_CHNL), 0x0);
1762 handled |= CSR_INT_BIT_FH_TX; 1734 handled |= CSR_INT_BIT_FH_TX;
1763 } 1735 }
@@ -1776,10 +1748,10 @@ static void iwl3945_irq_tasklet(struct iwl_priv *priv)
1776 /* Re-enable all interrupts */ 1748 /* Re-enable all interrupts */
1777 /* only Re-enable if disabled by irq */ 1749 /* only Re-enable if disabled by irq */
1778 if (test_bit(STATUS_INT_ENABLED, &priv->status)) 1750 if (test_bit(STATUS_INT_ENABLED, &priv->status))
1779 iwl_enable_interrupts(priv); 1751 iwl_legacy_enable_interrupts(priv);
1780 1752
1781#ifdef CONFIG_IWLWIFI_DEBUG 1753#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
1782 if (iwl_get_debug_level(priv) & (IWL_DL_ISR)) { 1754 if (iwl_legacy_get_debug_level(priv) & (IWL_DL_ISR)) {
1783 inta = iwl_read32(priv, CSR_INT); 1755 inta = iwl_read32(priv, CSR_INT);
1784 inta_mask = iwl_read32(priv, CSR_INT_MASK); 1756 inta_mask = iwl_read32(priv, CSR_INT_MASK);
1785 inta_fh = iwl_read32(priv, CSR_FH_INT_STATUS); 1757 inta_fh = iwl_read32(priv, CSR_FH_INT_STATUS);
@@ -1806,14 +1778,14 @@ static int iwl3945_get_single_channel_for_scan(struct iwl_priv *priv,
1806 return added; 1778 return added;
1807 } 1779 }
1808 1780
1809 active_dwell = iwl_get_active_dwell_time(priv, band, 0); 1781 active_dwell = iwl_legacy_get_active_dwell_time(priv, band, 0);
1810 passive_dwell = iwl_get_passive_dwell_time(priv, band, vif); 1782 passive_dwell = iwl_legacy_get_passive_dwell_time(priv, band, vif);
1811 1783
1812 if (passive_dwell <= active_dwell) 1784 if (passive_dwell <= active_dwell)
1813 passive_dwell = active_dwell + 1; 1785 passive_dwell = active_dwell + 1;
1814 1786
1815 1787
1816 channel = iwl_get_single_channel_number(priv, band); 1788 channel = iwl_legacy_get_single_channel_number(priv, band);
1817 1789
1818 if (channel) { 1790 if (channel) {
1819 scan_ch->channel = channel; 1791 scan_ch->channel = channel;
@@ -1849,8 +1821,8 @@ static int iwl3945_get_channels_for_scan(struct iwl_priv *priv,
1849 if (!sband) 1821 if (!sband)
1850 return 0; 1822 return 0;
1851 1823
1852 active_dwell = iwl_get_active_dwell_time(priv, band, n_probes); 1824 active_dwell = iwl_legacy_get_active_dwell_time(priv, band, n_probes);
1853 passive_dwell = iwl_get_passive_dwell_time(priv, band, vif); 1825 passive_dwell = iwl_legacy_get_passive_dwell_time(priv, band, vif);
1854 1826
1855 if (passive_dwell <= active_dwell) 1827 if (passive_dwell <= active_dwell)
1856 passive_dwell = active_dwell + 1; 1828 passive_dwell = active_dwell + 1;
@@ -1863,10 +1835,12 @@ static int iwl3945_get_channels_for_scan(struct iwl_priv *priv,
1863 1835
1864 scan_ch->channel = chan->hw_value; 1836 scan_ch->channel = chan->hw_value;
1865 1837
1866 ch_info = iwl_get_channel_info(priv, band, scan_ch->channel); 1838 ch_info = iwl_legacy_get_channel_info(priv, band,
1867 if (!is_channel_valid(ch_info)) { 1839 scan_ch->channel);
1868 IWL_DEBUG_SCAN(priv, "Channel %d is INVALID for this band.\n", 1840 if (!iwl_legacy_is_channel_valid(ch_info)) {
1869 scan_ch->channel); 1841 IWL_DEBUG_SCAN(priv,
1842 "Channel %d is INVALID for this band.\n",
1843 scan_ch->channel);
1870 continue; 1844 continue;
1871 } 1845 }
1872 1846
@@ -1875,7 +1849,7 @@ static int iwl3945_get_channels_for_scan(struct iwl_priv *priv,
1875 /* If passive , set up for auto-switch 1849 /* If passive , set up for auto-switch
1876 * and use long active_dwell time. 1850 * and use long active_dwell time.
1877 */ 1851 */
1878 if (!is_active || is_channel_passive(ch_info) || 1852 if (!is_active || iwl_legacy_is_channel_passive(ch_info) ||
1879 (chan->flags & IEEE80211_CHAN_PASSIVE_SCAN)) { 1853 (chan->flags & IEEE80211_CHAN_PASSIVE_SCAN)) {
1880 scan_ch->type = 0; /* passive */ 1854 scan_ch->type = 0; /* passive */
1881 if (IWL_UCODE_API(priv->ucode_ver) == 1) 1855 if (IWL_UCODE_API(priv->ucode_ver) == 1)
@@ -1955,12 +1929,12 @@ static void iwl3945_init_hw_rates(struct iwl_priv *priv,
1955 1929
1956static void iwl3945_dealloc_ucode_pci(struct iwl_priv *priv) 1930static void iwl3945_dealloc_ucode_pci(struct iwl_priv *priv)
1957{ 1931{
1958 iwl_free_fw_desc(priv->pci_dev, &priv->ucode_code); 1932 iwl_legacy_free_fw_desc(priv->pci_dev, &priv->ucode_code);
1959 iwl_free_fw_desc(priv->pci_dev, &priv->ucode_data); 1933 iwl_legacy_free_fw_desc(priv->pci_dev, &priv->ucode_data);
1960 iwl_free_fw_desc(priv->pci_dev, &priv->ucode_data_backup); 1934 iwl_legacy_free_fw_desc(priv->pci_dev, &priv->ucode_data_backup);
1961 iwl_free_fw_desc(priv->pci_dev, &priv->ucode_init); 1935 iwl_legacy_free_fw_desc(priv->pci_dev, &priv->ucode_init);
1962 iwl_free_fw_desc(priv->pci_dev, &priv->ucode_init_data); 1936 iwl_legacy_free_fw_desc(priv->pci_dev, &priv->ucode_init_data);
1963 iwl_free_fw_desc(priv->pci_dev, &priv->ucode_boot); 1937 iwl_legacy_free_fw_desc(priv->pci_dev, &priv->ucode_boot);
1964} 1938}
1965 1939
1966/** 1940/**
@@ -1976,7 +1950,7 @@ static int iwl3945_verify_inst_full(struct iwl_priv *priv, __le32 *image, u32 le
1976 1950
1977 IWL_DEBUG_INFO(priv, "ucode inst image size is %u\n", len); 1951 IWL_DEBUG_INFO(priv, "ucode inst image size is %u\n", len);
1978 1952
1979 iwl_write_direct32(priv, HBUS_TARG_MEM_RADDR, 1953 iwl_legacy_write_direct32(priv, HBUS_TARG_MEM_RADDR,
1980 IWL39_RTC_INST_LOWER_BOUND); 1954 IWL39_RTC_INST_LOWER_BOUND);
1981 1955
1982 errcnt = 0; 1956 errcnt = 0;
@@ -1984,7 +1958,7 @@ static int iwl3945_verify_inst_full(struct iwl_priv *priv, __le32 *image, u32 le
1984 /* read data comes through single port, auto-incr addr */ 1958 /* read data comes through single port, auto-incr addr */
1985 /* NOTE: Use the debugless read so we don't flood kernel log 1959 /* NOTE: Use the debugless read so we don't flood kernel log
1986 * if IWL_DL_IO is set */ 1960 * if IWL_DL_IO is set */
1987 val = _iwl_read_direct32(priv, HBUS_TARG_MEM_RDAT); 1961 val = _iwl_legacy_read_direct32(priv, HBUS_TARG_MEM_RDAT);
1988 if (val != le32_to_cpu(*image)) { 1962 if (val != le32_to_cpu(*image)) {
1989 IWL_ERR(priv, "uCode INST section is invalid at " 1963 IWL_ERR(priv, "uCode INST section is invalid at "
1990 "offset 0x%x, is 0x%x, s/b 0x%x\n", 1964 "offset 0x%x, is 0x%x, s/b 0x%x\n",
@@ -2023,9 +1997,9 @@ static int iwl3945_verify_inst_sparse(struct iwl_priv *priv, __le32 *image, u32
2023 /* read data comes through single port, auto-incr addr */ 1997 /* read data comes through single port, auto-incr addr */
2024 /* NOTE: Use the debugless read so we don't flood kernel log 1998 /* NOTE: Use the debugless read so we don't flood kernel log
2025 * if IWL_DL_IO is set */ 1999 * if IWL_DL_IO is set */
2026 iwl_write_direct32(priv, HBUS_TARG_MEM_RADDR, 2000 iwl_legacy_write_direct32(priv, HBUS_TARG_MEM_RADDR,
2027 i + IWL39_RTC_INST_LOWER_BOUND); 2001 i + IWL39_RTC_INST_LOWER_BOUND);
2028 val = _iwl_read_direct32(priv, HBUS_TARG_MEM_RDAT); 2002 val = _iwl_legacy_read_direct32(priv, HBUS_TARG_MEM_RDAT);
2029 if (val != le32_to_cpu(*image)) { 2003 if (val != le32_to_cpu(*image)) {
2030#if 0 /* Enable this if you want to see details */ 2004#if 0 /* Enable this if you want to see details */
2031 IWL_ERR(priv, "uCode INST section is invalid at " 2005 IWL_ERR(priv, "uCode INST section is invalid at "
@@ -2101,7 +2075,7 @@ static void iwl3945_nic_start(struct iwl_priv *priv)
2101#define IWL3945_UCODE_GET(item) \ 2075#define IWL3945_UCODE_GET(item) \
2102static u32 iwl3945_ucode_get_##item(const struct iwl_ucode_header *ucode)\ 2076static u32 iwl3945_ucode_get_##item(const struct iwl_ucode_header *ucode)\
2103{ \ 2077{ \
2104 return le32_to_cpu(ucode->u.v1.item); \ 2078 return le32_to_cpu(ucode->v1.item); \
2105} 2079}
2106 2080
2107static u32 iwl3945_ucode_get_header_size(u32 api_ver) 2081static u32 iwl3945_ucode_get_header_size(u32 api_ver)
@@ -2111,7 +2085,7 @@ static u32 iwl3945_ucode_get_header_size(u32 api_ver)
2111 2085
2112static u8 *iwl3945_ucode_get_data(const struct iwl_ucode_header *ucode) 2086static u8 *iwl3945_ucode_get_data(const struct iwl_ucode_header *ucode)
2113{ 2087{
2114 return (u8 *) ucode->u.v1.data; 2088 return (u8 *) ucode->v1.data;
2115} 2089}
2116 2090
2117IWL3945_UCODE_GET(inst_size); 2091IWL3945_UCODE_GET(inst_size);
@@ -2286,13 +2260,13 @@ static int iwl3945_read_ucode(struct iwl_priv *priv)
2286 * 1) unmodified from disk 2260 * 1) unmodified from disk
2287 * 2) backup cache for save/restore during power-downs */ 2261 * 2) backup cache for save/restore during power-downs */
2288 priv->ucode_code.len = inst_size; 2262 priv->ucode_code.len = inst_size;
2289 iwl_alloc_fw_desc(priv->pci_dev, &priv->ucode_code); 2263 iwl_legacy_alloc_fw_desc(priv->pci_dev, &priv->ucode_code);
2290 2264
2291 priv->ucode_data.len = data_size; 2265 priv->ucode_data.len = data_size;
2292 iwl_alloc_fw_desc(priv->pci_dev, &priv->ucode_data); 2266 iwl_legacy_alloc_fw_desc(priv->pci_dev, &priv->ucode_data);
2293 2267
2294 priv->ucode_data_backup.len = data_size; 2268 priv->ucode_data_backup.len = data_size;
2295 iwl_alloc_fw_desc(priv->pci_dev, &priv->ucode_data_backup); 2269 iwl_legacy_alloc_fw_desc(priv->pci_dev, &priv->ucode_data_backup);
2296 2270
2297 if (!priv->ucode_code.v_addr || !priv->ucode_data.v_addr || 2271 if (!priv->ucode_code.v_addr || !priv->ucode_data.v_addr ||
2298 !priv->ucode_data_backup.v_addr) 2272 !priv->ucode_data_backup.v_addr)
@@ -2301,10 +2275,10 @@ static int iwl3945_read_ucode(struct iwl_priv *priv)
2301 /* Initialization instructions and data */ 2275 /* Initialization instructions and data */
2302 if (init_size && init_data_size) { 2276 if (init_size && init_data_size) {
2303 priv->ucode_init.len = init_size; 2277 priv->ucode_init.len = init_size;
2304 iwl_alloc_fw_desc(priv->pci_dev, &priv->ucode_init); 2278 iwl_legacy_alloc_fw_desc(priv->pci_dev, &priv->ucode_init);
2305 2279
2306 priv->ucode_init_data.len = init_data_size; 2280 priv->ucode_init_data.len = init_data_size;
2307 iwl_alloc_fw_desc(priv->pci_dev, &priv->ucode_init_data); 2281 iwl_legacy_alloc_fw_desc(priv->pci_dev, &priv->ucode_init_data);
2308 2282
2309 if (!priv->ucode_init.v_addr || !priv->ucode_init_data.v_addr) 2283 if (!priv->ucode_init.v_addr || !priv->ucode_init_data.v_addr)
2310 goto err_pci_alloc; 2284 goto err_pci_alloc;
@@ -2313,7 +2287,7 @@ static int iwl3945_read_ucode(struct iwl_priv *priv)
2313 /* Bootstrap (instructions only, no data) */ 2287 /* Bootstrap (instructions only, no data) */
2314 if (boot_size) { 2288 if (boot_size) {
2315 priv->ucode_boot.len = boot_size; 2289 priv->ucode_boot.len = boot_size;
2316 iwl_alloc_fw_desc(priv->pci_dev, &priv->ucode_boot); 2290 iwl_legacy_alloc_fw_desc(priv->pci_dev, &priv->ucode_boot);
2317 2291
2318 if (!priv->ucode_boot.v_addr) 2292 if (!priv->ucode_boot.v_addr)
2319 goto err_pci_alloc; 2293 goto err_pci_alloc;
@@ -2400,14 +2374,14 @@ static int iwl3945_set_ucode_ptrs(struct iwl_priv *priv)
2400 pdata = priv->ucode_data_backup.p_addr; 2374 pdata = priv->ucode_data_backup.p_addr;
2401 2375
2402 /* Tell bootstrap uCode where to find image to load */ 2376 /* Tell bootstrap uCode where to find image to load */
2403 iwl_write_prph(priv, BSM_DRAM_INST_PTR_REG, pinst); 2377 iwl_legacy_write_prph(priv, BSM_DRAM_INST_PTR_REG, pinst);
2404 iwl_write_prph(priv, BSM_DRAM_DATA_PTR_REG, pdata); 2378 iwl_legacy_write_prph(priv, BSM_DRAM_DATA_PTR_REG, pdata);
2405 iwl_write_prph(priv, BSM_DRAM_DATA_BYTECOUNT_REG, 2379 iwl_legacy_write_prph(priv, BSM_DRAM_DATA_BYTECOUNT_REG,
2406 priv->ucode_data.len); 2380 priv->ucode_data.len);
2407 2381
2408 /* Inst byte count must be last to set up, bit 31 signals uCode 2382 /* Inst byte count must be last to set up, bit 31 signals uCode
2409 * that all new ptr/size info is in place */ 2383 * that all new ptr/size info is in place */
2410 iwl_write_prph(priv, BSM_DRAM_INST_BYTECOUNT_REG, 2384 iwl_legacy_write_prph(priv, BSM_DRAM_INST_BYTECOUNT_REG,
2411 priv->ucode_code.len | BSM_DRAM_INST_LOAD); 2385 priv->ucode_code.len | BSM_DRAM_INST_LOAD);
2412 2386
2413 IWL_DEBUG_INFO(priv, "Runtime uCode pointers are set.\n"); 2387 IWL_DEBUG_INFO(priv, "Runtime uCode pointers are set.\n");
@@ -2488,7 +2462,7 @@ static void iwl3945_alive_start(struct iwl_priv *priv)
2488 goto restart; 2462 goto restart;
2489 } 2463 }
2490 2464
2491 rfkill = iwl_read_prph(priv, APMG_RFKILL_REG); 2465 rfkill = iwl_legacy_read_prph(priv, APMG_RFKILL_REG);
2492 IWL_DEBUG_INFO(priv, "RFKILL status: 0x%x\n", rfkill); 2466 IWL_DEBUG_INFO(priv, "RFKILL status: 0x%x\n", rfkill);
2493 2467
2494 if (rfkill & 0x1) { 2468 if (rfkill & 0x1) {
@@ -2510,18 +2484,18 @@ static void iwl3945_alive_start(struct iwl_priv *priv)
2510 set_bit(STATUS_ALIVE, &priv->status); 2484 set_bit(STATUS_ALIVE, &priv->status);
2511 2485
2512 /* Enable watchdog to monitor the driver tx queues */ 2486 /* Enable watchdog to monitor the driver tx queues */
2513 iwl_setup_watchdog(priv); 2487 iwl_legacy_setup_watchdog(priv);
2514 2488
2515 if (iwl_is_rfkill(priv)) 2489 if (iwl_legacy_is_rfkill(priv))
2516 return; 2490 return;
2517 2491
2518 ieee80211_wake_queues(priv->hw); 2492 ieee80211_wake_queues(priv->hw);
2519 2493
2520 priv->active_rate = IWL_RATES_MASK_3945; 2494 priv->active_rate = IWL_RATES_MASK_3945;
2521 2495
2522 iwl_power_update_mode(priv, true); 2496 iwl_legacy_power_update_mode(priv, true);
2523 2497
2524 if (iwl_is_associated(priv, IWL_RXON_CTX_BSS)) { 2498 if (iwl_legacy_is_associated(priv, IWL_RXON_CTX_BSS)) {
2525 struct iwl3945_rxon_cmd *active_rxon = 2499 struct iwl3945_rxon_cmd *active_rxon =
2526 (struct iwl3945_rxon_cmd *)(&ctx->active); 2500 (struct iwl3945_rxon_cmd *)(&ctx->active);
2527 2501
@@ -2529,11 +2503,11 @@ static void iwl3945_alive_start(struct iwl_priv *priv)
2529 active_rxon->filter_flags &= ~RXON_FILTER_ASSOC_MSK; 2503 active_rxon->filter_flags &= ~RXON_FILTER_ASSOC_MSK;
2530 } else { 2504 } else {
2531 /* Initialize our rx_config data */ 2505 /* Initialize our rx_config data */
2532 iwl_connection_init_rx_config(priv, ctx); 2506 iwl_legacy_connection_init_rx_config(priv, ctx);
2533 } 2507 }
2534 2508
2535 /* Configure Bluetooth device coexistence support */ 2509 /* Configure Bluetooth device coexistence support */
2536 priv->cfg->ops->hcmd->send_bt_config(priv); 2510 iwl_legacy_send_bt_config(priv);
2537 2511
2538 set_bit(STATUS_READY, &priv->status); 2512 set_bit(STATUS_READY, &priv->status);
2539 2513
@@ -2560,7 +2534,7 @@ static void __iwl3945_down(struct iwl_priv *priv)
2560 2534
2561 IWL_DEBUG_INFO(priv, DRV_NAME " is going down\n"); 2535 IWL_DEBUG_INFO(priv, DRV_NAME " is going down\n");
2562 2536
2563 iwl_scan_cancel_timeout(priv, 200); 2537 iwl_legacy_scan_cancel_timeout(priv, 200);
2564 2538
2565 exit_pending = test_and_set_bit(STATUS_EXIT_PENDING, &priv->status); 2539 exit_pending = test_and_set_bit(STATUS_EXIT_PENDING, &priv->status);
2566 2540
@@ -2569,9 +2543,9 @@ static void __iwl3945_down(struct iwl_priv *priv)
2569 del_timer_sync(&priv->watchdog); 2543 del_timer_sync(&priv->watchdog);
2570 2544
2571 /* Station information will now be cleared in device */ 2545 /* Station information will now be cleared in device */
2572 iwl_clear_ucode_stations(priv, NULL); 2546 iwl_legacy_clear_ucode_stations(priv, NULL);
2573 iwl_dealloc_bcast_stations(priv); 2547 iwl_legacy_dealloc_bcast_stations(priv);
2574 iwl_clear_driver_stations(priv); 2548 iwl_legacy_clear_driver_stations(priv);
2575 2549
2576 /* Unblock any waiting calls */ 2550 /* Unblock any waiting calls */
2577 wake_up_interruptible_all(&priv->wait_command_queue); 2551 wake_up_interruptible_all(&priv->wait_command_queue);
@@ -2586,16 +2560,16 @@ static void __iwl3945_down(struct iwl_priv *priv)
2586 2560
2587 /* tell the device to stop sending interrupts */ 2561 /* tell the device to stop sending interrupts */
2588 spin_lock_irqsave(&priv->lock, flags); 2562 spin_lock_irqsave(&priv->lock, flags);
2589 iwl_disable_interrupts(priv); 2563 iwl_legacy_disable_interrupts(priv);
2590 spin_unlock_irqrestore(&priv->lock, flags); 2564 spin_unlock_irqrestore(&priv->lock, flags);
2591 iwl_synchronize_irq(priv); 2565 iwl3945_synchronize_irq(priv);
2592 2566
2593 if (priv->mac80211_registered) 2567 if (priv->mac80211_registered)
2594 ieee80211_stop_queues(priv->hw); 2568 ieee80211_stop_queues(priv->hw);
2595 2569
2596 /* If we have not previously called iwl3945_init() then 2570 /* If we have not previously called iwl3945_init() then
2597 * clear all bits but the RF Kill bits and return */ 2571 * clear all bits but the RF Kill bits and return */
2598 if (!iwl_is_init(priv)) { 2572 if (!iwl_legacy_is_init(priv)) {
2599 priv->status = test_bit(STATUS_RF_KILL_HW, &priv->status) << 2573 priv->status = test_bit(STATUS_RF_KILL_HW, &priv->status) <<
2600 STATUS_RF_KILL_HW | 2574 STATUS_RF_KILL_HW |
2601 test_bit(STATUS_GEO_CONFIGURED, &priv->status) << 2575 test_bit(STATUS_GEO_CONFIGURED, &priv->status) <<
@@ -2620,11 +2594,11 @@ static void __iwl3945_down(struct iwl_priv *priv)
2620 iwl3945_hw_rxq_stop(priv); 2594 iwl3945_hw_rxq_stop(priv);
2621 2595
2622 /* Power-down device's busmaster DMA clocks */ 2596 /* Power-down device's busmaster DMA clocks */
2623 iwl_write_prph(priv, APMG_CLK_DIS_REG, APMG_CLK_VAL_DMA_CLK_RQT); 2597 iwl_legacy_write_prph(priv, APMG_CLK_DIS_REG, APMG_CLK_VAL_DMA_CLK_RQT);
2624 udelay(5); 2598 udelay(5);
2625 2599
2626 /* Stop the device, and put it in low power state */ 2600 /* Stop the device, and put it in low power state */
2627 iwl_apm_stop(priv); 2601 iwl_legacy_apm_stop(priv);
2628 2602
2629 exit: 2603 exit:
2630 memset(&priv->card_alive, 0, sizeof(struct iwl_alive_resp)); 2604 memset(&priv->card_alive, 0, sizeof(struct iwl_alive_resp));
@@ -2655,7 +2629,8 @@ static int iwl3945_alloc_bcast_station(struct iwl_priv *priv)
2655 u8 sta_id; 2629 u8 sta_id;
2656 2630
2657 spin_lock_irqsave(&priv->sta_lock, flags); 2631 spin_lock_irqsave(&priv->sta_lock, flags);
2658 sta_id = iwl_prep_station(priv, ctx, iwl_bcast_addr, false, NULL); 2632 sta_id = iwl_legacy_prep_station(priv, ctx,
2633 iwl_bcast_addr, false, NULL);
2659 if (sta_id == IWL_INVALID_STATION) { 2634 if (sta_id == IWL_INVALID_STATION) {
2660 IWL_ERR(priv, "Unable to prepare broadcast station\n"); 2635 IWL_ERR(priv, "Unable to prepare broadcast station\n");
2661 spin_unlock_irqrestore(&priv->sta_lock, flags); 2636 spin_unlock_irqrestore(&priv->sta_lock, flags);
@@ -2713,7 +2688,7 @@ static int __iwl3945_up(struct iwl_priv *priv)
2713 2688
2714 /* clear (again), then enable host interrupts */ 2689 /* clear (again), then enable host interrupts */
2715 iwl_write32(priv, CSR_INT, 0xFFFFFFFF); 2690 iwl_write32(priv, CSR_INT, 0xFFFFFFFF);
2716 iwl_enable_interrupts(priv); 2691 iwl_legacy_enable_interrupts(priv);
2717 2692
2718 /* really make sure rfkill handshake bits are cleared */ 2693 /* really make sure rfkill handshake bits are cleared */
2719 iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL); 2694 iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
@@ -2855,7 +2830,7 @@ int iwl3945_request_scan(struct iwl_priv *priv, struct ieee80211_vif *vif)
2855 scan->quiet_plcp_th = IWL_PLCP_QUIET_THRESH; 2830 scan->quiet_plcp_th = IWL_PLCP_QUIET_THRESH;
2856 scan->quiet_time = IWL_ACTIVE_QUIET_TIME; 2831 scan->quiet_time = IWL_ACTIVE_QUIET_TIME;
2857 2832
2858 if (iwl_is_associated(priv, IWL_RXON_CTX_BSS)) { 2833 if (iwl_legacy_is_associated(priv, IWL_RXON_CTX_BSS)) {
2859 u16 interval = 0; 2834 u16 interval = 0;
2860 u32 extra; 2835 u32 extra;
2861 u32 suspend_time = 100; 2836 u32 suspend_time = 100;
@@ -2943,7 +2918,7 @@ int iwl3945_request_scan(struct iwl_priv *priv, struct ieee80211_vif *vif)
2943 2918
2944 if (!priv->is_internal_short_scan) { 2919 if (!priv->is_internal_short_scan) {
2945 scan->tx_cmd.len = cpu_to_le16( 2920 scan->tx_cmd.len = cpu_to_le16(
2946 iwl_fill_probe_req(priv, 2921 iwl_legacy_fill_probe_req(priv,
2947 (struct ieee80211_mgmt *)scan->data, 2922 (struct ieee80211_mgmt *)scan->data,
2948 vif->addr, 2923 vif->addr,
2949 priv->scan_request->ie, 2924 priv->scan_request->ie,
@@ -2952,7 +2927,7 @@ int iwl3945_request_scan(struct iwl_priv *priv, struct ieee80211_vif *vif)
2952 } else { 2927 } else {
2953 /* use bcast addr, will not be transmitted but must be valid */ 2928 /* use bcast addr, will not be transmitted but must be valid */
2954 scan->tx_cmd.len = cpu_to_le16( 2929 scan->tx_cmd.len = cpu_to_le16(
2955 iwl_fill_probe_req(priv, 2930 iwl_legacy_fill_probe_req(priv,
2956 (struct ieee80211_mgmt *)scan->data, 2931 (struct ieee80211_mgmt *)scan->data,
2957 iwl_bcast_addr, NULL, 0, 2932 iwl_bcast_addr, NULL, 0,
2958 IWL_MAX_SCAN_SIZE - sizeof(*scan))); 2933 IWL_MAX_SCAN_SIZE - sizeof(*scan)));
@@ -2982,7 +2957,7 @@ int iwl3945_request_scan(struct iwl_priv *priv, struct ieee80211_vif *vif)
2982 scan->len = cpu_to_le16(cmd.len); 2957 scan->len = cpu_to_le16(cmd.len);
2983 2958
2984 set_bit(STATUS_SCAN_HW, &priv->status); 2959 set_bit(STATUS_SCAN_HW, &priv->status);
2985 ret = iwl_send_cmd_sync(priv, &cmd); 2960 ret = iwl_legacy_send_cmd_sync(priv, &cmd);
2986 if (ret) 2961 if (ret)
2987 clear_bit(STATUS_SCAN_HW, &priv->status); 2962 clear_bit(STATUS_SCAN_HW, &priv->status);
2988 return ret; 2963 return ret;
@@ -3050,25 +3025,20 @@ void iwl3945_post_associate(struct iwl_priv *priv)
3050 if (!ctx->vif || !priv->is_open) 3025 if (!ctx->vif || !priv->is_open)
3051 return; 3026 return;
3052 3027
3053 if (ctx->vif->type == NL80211_IFTYPE_AP) {
3054 IWL_ERR(priv, "%s Should not be called in AP mode\n", __func__);
3055 return;
3056 }
3057
3058 IWL_DEBUG_ASSOC(priv, "Associated as %d to: %pM\n", 3028 IWL_DEBUG_ASSOC(priv, "Associated as %d to: %pM\n",
3059 ctx->vif->bss_conf.aid, ctx->active.bssid_addr); 3029 ctx->vif->bss_conf.aid, ctx->active.bssid_addr);
3060 3030
3061 if (test_bit(STATUS_EXIT_PENDING, &priv->status)) 3031 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
3062 return; 3032 return;
3063 3033
3064 iwl_scan_cancel_timeout(priv, 200); 3034 iwl_legacy_scan_cancel_timeout(priv, 200);
3065 3035
3066 conf = ieee80211_get_hw_conf(priv->hw); 3036 conf = iwl_legacy_ieee80211_get_hw_conf(priv->hw);
3067 3037
3068 ctx->staging.filter_flags &= ~RXON_FILTER_ASSOC_MSK; 3038 ctx->staging.filter_flags &= ~RXON_FILTER_ASSOC_MSK;
3069 iwl3945_commit_rxon(priv, ctx); 3039 iwl3945_commit_rxon(priv, ctx);
3070 3040
3071 rc = iwl_send_rxon_timing(priv, ctx); 3041 rc = iwl_legacy_send_rxon_timing(priv, ctx);
3072 if (rc) 3042 if (rc)
3073 IWL_WARN(priv, "REPLY_RXON_TIMING failed - " 3043 IWL_WARN(priv, "REPLY_RXON_TIMING failed - "
3074 "Attempting to continue.\n"); 3044 "Attempting to continue.\n");
@@ -3226,14 +3196,14 @@ void iwl3945_config_ap(struct iwl_priv *priv)
3226 return; 3196 return;
3227 3197
3228 /* The following should be done only at AP bring up */ 3198 /* The following should be done only at AP bring up */
3229 if (!(iwl_is_associated(priv, IWL_RXON_CTX_BSS))) { 3199 if (!(iwl_legacy_is_associated(priv, IWL_RXON_CTX_BSS))) {
3230 3200
3231 /* RXON - unassoc (to set timing command) */ 3201 /* RXON - unassoc (to set timing command) */
3232 ctx->staging.filter_flags &= ~RXON_FILTER_ASSOC_MSK; 3202 ctx->staging.filter_flags &= ~RXON_FILTER_ASSOC_MSK;
3233 iwl3945_commit_rxon(priv, ctx); 3203 iwl3945_commit_rxon(priv, ctx);
3234 3204
3235 /* RXON Timing */ 3205 /* RXON Timing */
3236 rc = iwl_send_rxon_timing(priv, ctx); 3206 rc = iwl_legacy_send_rxon_timing(priv, ctx);
3237 if (rc) 3207 if (rc)
3238 IWL_WARN(priv, "REPLY_RXON_TIMING failed - " 3208 IWL_WARN(priv, "REPLY_RXON_TIMING failed - "
3239 "Attempting to continue.\n"); 3209 "Attempting to continue.\n");
@@ -3260,10 +3230,6 @@ void iwl3945_config_ap(struct iwl_priv *priv)
3260 iwl3945_commit_rxon(priv, ctx); 3230 iwl3945_commit_rxon(priv, ctx);
3261 } 3231 }
3262 iwl3945_send_beacon_cmd(priv); 3232 iwl3945_send_beacon_cmd(priv);
3263
3264 /* FIXME - we need to add code here to detect a totally new
3265 * configuration, reset the AP, unassoc, rxon timing, assoc,
3266 * clear sta table, add BCAST sta... */
3267} 3233}
3268 3234
3269static int iwl3945_mac_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd, 3235static int iwl3945_mac_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
@@ -3291,17 +3257,17 @@ static int iwl3945_mac_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
3291 !(key->flags & IEEE80211_KEY_FLAG_PAIRWISE)) 3257 !(key->flags & IEEE80211_KEY_FLAG_PAIRWISE))
3292 return -EOPNOTSUPP; 3258 return -EOPNOTSUPP;
3293 3259
3294 static_key = !iwl_is_associated(priv, IWL_RXON_CTX_BSS); 3260 static_key = !iwl_legacy_is_associated(priv, IWL_RXON_CTX_BSS);
3295 3261
3296 if (!static_key) { 3262 if (!static_key) {
3297 sta_id = iwl_sta_id_or_broadcast( 3263 sta_id = iwl_legacy_sta_id_or_broadcast(
3298 priv, &priv->contexts[IWL_RXON_CTX_BSS], sta); 3264 priv, &priv->contexts[IWL_RXON_CTX_BSS], sta);
3299 if (sta_id == IWL_INVALID_STATION) 3265 if (sta_id == IWL_INVALID_STATION)
3300 return -EINVAL; 3266 return -EINVAL;
3301 } 3267 }
3302 3268
3303 mutex_lock(&priv->mutex); 3269 mutex_lock(&priv->mutex);
3304 iwl_scan_cancel_timeout(priv, 100); 3270 iwl_legacy_scan_cancel_timeout(priv, 100);
3305 3271
3306 switch (cmd) { 3272 switch (cmd) {
3307 case SET_KEY: 3273 case SET_KEY:
@@ -3346,7 +3312,8 @@ static int iwl3945_mac_sta_add(struct ieee80211_hw *hw,
3346 sta_priv->common.sta_id = IWL_INVALID_STATION; 3312 sta_priv->common.sta_id = IWL_INVALID_STATION;
3347 3313
3348 3314
3349 ret = iwl_add_station_common(priv, &priv->contexts[IWL_RXON_CTX_BSS], 3315 ret = iwl_legacy_add_station_common(priv,
3316 &priv->contexts[IWL_RXON_CTX_BSS],
3350 sta->addr, is_ap, sta, &sta_id); 3317 sta->addr, is_ap, sta, &sta_id);
3351 if (ret) { 3318 if (ret) {
3352 IWL_ERR(priv, "Unable to add station %pM (%d)\n", 3319 IWL_ERR(priv, "Unable to add station %pM (%d)\n",
@@ -3407,7 +3374,7 @@ static void iwl3945_configure_filter(struct ieee80211_hw *hw,
3407 3374
3408 /* 3375 /*
3409 * Receiving all multicast frames is always enabled by the 3376 * Receiving all multicast frames is always enabled by the
3410 * default flags setup in iwl_connection_init_rx_config() 3377 * default flags setup in iwl_legacy_connection_init_rx_config()
3411 * since we currently do not support programming multicast 3378 * since we currently do not support programming multicast
3412 * filters into the device. 3379 * filters into the device.
3413 */ 3380 */
@@ -3422,7 +3389,7 @@ static void iwl3945_configure_filter(struct ieee80211_hw *hw,
3422 * 3389 *
3423 *****************************************************************************/ 3390 *****************************************************************************/
3424 3391
3425#ifdef CONFIG_IWLWIFI_DEBUG 3392#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
3426 3393
3427/* 3394/*
3428 * The following adds a new attribute to the sysfs representation 3395 * The following adds a new attribute to the sysfs representation
@@ -3435,13 +3402,13 @@ static void iwl3945_configure_filter(struct ieee80211_hw *hw,
3435 * level that is used instead of the global debug level if it (the per 3402 * level that is used instead of the global debug level if it (the per
3436 * device debug level) is set. 3403 * device debug level) is set.
3437 */ 3404 */
3438static ssize_t show_debug_level(struct device *d, 3405static ssize_t iwl3945_show_debug_level(struct device *d,
3439 struct device_attribute *attr, char *buf) 3406 struct device_attribute *attr, char *buf)
3440{ 3407{
3441 struct iwl_priv *priv = dev_get_drvdata(d); 3408 struct iwl_priv *priv = dev_get_drvdata(d);
3442 return sprintf(buf, "0x%08X\n", iwl_get_debug_level(priv)); 3409 return sprintf(buf, "0x%08X\n", iwl_legacy_get_debug_level(priv));
3443} 3410}
3444static ssize_t store_debug_level(struct device *d, 3411static ssize_t iwl3945_store_debug_level(struct device *d,
3445 struct device_attribute *attr, 3412 struct device_attribute *attr,
3446 const char *buf, size_t count) 3413 const char *buf, size_t count)
3447{ 3414{
@@ -3454,7 +3421,7 @@ static ssize_t store_debug_level(struct device *d,
3454 IWL_INFO(priv, "%s is not in hex or decimal form.\n", buf); 3421 IWL_INFO(priv, "%s is not in hex or decimal form.\n", buf);
3455 else { 3422 else {
3456 priv->debug_level = val; 3423 priv->debug_level = val;
3457 if (iwl_alloc_traffic_mem(priv)) 3424 if (iwl_legacy_alloc_traffic_mem(priv))
3458 IWL_ERR(priv, 3425 IWL_ERR(priv,
3459 "Not enough memory to generate traffic log\n"); 3426 "Not enough memory to generate traffic log\n");
3460 } 3427 }
@@ -3462,31 +3429,31 @@ static ssize_t store_debug_level(struct device *d,
3462} 3429}
3463 3430
3464static DEVICE_ATTR(debug_level, S_IWUSR | S_IRUGO, 3431static DEVICE_ATTR(debug_level, S_IWUSR | S_IRUGO,
3465 show_debug_level, store_debug_level); 3432 iwl3945_show_debug_level, iwl3945_store_debug_level);
3466 3433
3467#endif /* CONFIG_IWLWIFI_DEBUG */ 3434#endif /* CONFIG_IWLWIFI_LEGACY_DEBUG */
3468 3435
3469static ssize_t show_temperature(struct device *d, 3436static ssize_t iwl3945_show_temperature(struct device *d,
3470 struct device_attribute *attr, char *buf) 3437 struct device_attribute *attr, char *buf)
3471{ 3438{
3472 struct iwl_priv *priv = dev_get_drvdata(d); 3439 struct iwl_priv *priv = dev_get_drvdata(d);
3473 3440
3474 if (!iwl_is_alive(priv)) 3441 if (!iwl_legacy_is_alive(priv))
3475 return -EAGAIN; 3442 return -EAGAIN;
3476 3443
3477 return sprintf(buf, "%d\n", iwl3945_hw_get_temperature(priv)); 3444 return sprintf(buf, "%d\n", iwl3945_hw_get_temperature(priv));
3478} 3445}
3479 3446
3480static DEVICE_ATTR(temperature, S_IRUGO, show_temperature, NULL); 3447static DEVICE_ATTR(temperature, S_IRUGO, iwl3945_show_temperature, NULL);
3481 3448
3482static ssize_t show_tx_power(struct device *d, 3449static ssize_t iwl3945_show_tx_power(struct device *d,
3483 struct device_attribute *attr, char *buf) 3450 struct device_attribute *attr, char *buf)
3484{ 3451{
3485 struct iwl_priv *priv = dev_get_drvdata(d); 3452 struct iwl_priv *priv = dev_get_drvdata(d);
3486 return sprintf(buf, "%d\n", priv->tx_power_user_lmt); 3453 return sprintf(buf, "%d\n", priv->tx_power_user_lmt);
3487} 3454}
3488 3455
3489static ssize_t store_tx_power(struct device *d, 3456static ssize_t iwl3945_store_tx_power(struct device *d,
3490 struct device_attribute *attr, 3457 struct device_attribute *attr,
3491 const char *buf, size_t count) 3458 const char *buf, size_t count)
3492{ 3459{
@@ -3503,9 +3470,9 @@ static ssize_t store_tx_power(struct device *d,
3503 return count; 3470 return count;
3504} 3471}
3505 3472
3506static DEVICE_ATTR(tx_power, S_IWUSR | S_IRUGO, show_tx_power, store_tx_power); 3473static DEVICE_ATTR(tx_power, S_IWUSR | S_IRUGO, iwl3945_show_tx_power, iwl3945_store_tx_power);
3507 3474
3508static ssize_t show_flags(struct device *d, 3475static ssize_t iwl3945_show_flags(struct device *d,
3509 struct device_attribute *attr, char *buf) 3476 struct device_attribute *attr, char *buf)
3510{ 3477{
3511 struct iwl_priv *priv = dev_get_drvdata(d); 3478 struct iwl_priv *priv = dev_get_drvdata(d);
@@ -3514,7 +3481,7 @@ static ssize_t show_flags(struct device *d,
3514 return sprintf(buf, "0x%04X\n", ctx->active.flags); 3481 return sprintf(buf, "0x%04X\n", ctx->active.flags);
3515} 3482}
3516 3483
3517static ssize_t store_flags(struct device *d, 3484static ssize_t iwl3945_store_flags(struct device *d,
3518 struct device_attribute *attr, 3485 struct device_attribute *attr,
3519 const char *buf, size_t count) 3486 const char *buf, size_t count)
3520{ 3487{
@@ -3525,7 +3492,7 @@ static ssize_t store_flags(struct device *d,
3525 mutex_lock(&priv->mutex); 3492 mutex_lock(&priv->mutex);
3526 if (le32_to_cpu(ctx->staging.flags) != flags) { 3493 if (le32_to_cpu(ctx->staging.flags) != flags) {
3527 /* Cancel any currently running scans... */ 3494 /* Cancel any currently running scans... */
3528 if (iwl_scan_cancel_timeout(priv, 100)) 3495 if (iwl_legacy_scan_cancel_timeout(priv, 100))
3529 IWL_WARN(priv, "Could not cancel scan.\n"); 3496 IWL_WARN(priv, "Could not cancel scan.\n");
3530 else { 3497 else {
3531 IWL_DEBUG_INFO(priv, "Committing rxon.flags = 0x%04X\n", 3498 IWL_DEBUG_INFO(priv, "Committing rxon.flags = 0x%04X\n",
@@ -3539,9 +3506,9 @@ static ssize_t store_flags(struct device *d,
3539 return count; 3506 return count;
3540} 3507}
3541 3508
3542static DEVICE_ATTR(flags, S_IWUSR | S_IRUGO, show_flags, store_flags); 3509static DEVICE_ATTR(flags, S_IWUSR | S_IRUGO, iwl3945_show_flags, iwl3945_store_flags);
3543 3510
3544static ssize_t show_filter_flags(struct device *d, 3511static ssize_t iwl3945_show_filter_flags(struct device *d,
3545 struct device_attribute *attr, char *buf) 3512 struct device_attribute *attr, char *buf)
3546{ 3513{
3547 struct iwl_priv *priv = dev_get_drvdata(d); 3514 struct iwl_priv *priv = dev_get_drvdata(d);
@@ -3551,7 +3518,7 @@ static ssize_t show_filter_flags(struct device *d,
3551 le32_to_cpu(ctx->active.filter_flags)); 3518 le32_to_cpu(ctx->active.filter_flags));
3552} 3519}
3553 3520
3554static ssize_t store_filter_flags(struct device *d, 3521static ssize_t iwl3945_store_filter_flags(struct device *d,
3555 struct device_attribute *attr, 3522 struct device_attribute *attr,
3556 const char *buf, size_t count) 3523 const char *buf, size_t count)
3557{ 3524{
@@ -3562,7 +3529,7 @@ static ssize_t store_filter_flags(struct device *d,
3562 mutex_lock(&priv->mutex); 3529 mutex_lock(&priv->mutex);
3563 if (le32_to_cpu(ctx->staging.filter_flags) != filter_flags) { 3530 if (le32_to_cpu(ctx->staging.filter_flags) != filter_flags) {
3564 /* Cancel any currently running scans... */ 3531 /* Cancel any currently running scans... */
3565 if (iwl_scan_cancel_timeout(priv, 100)) 3532 if (iwl_legacy_scan_cancel_timeout(priv, 100))
3566 IWL_WARN(priv, "Could not cancel scan.\n"); 3533 IWL_WARN(priv, "Could not cancel scan.\n");
3567 else { 3534 else {
3568 IWL_DEBUG_INFO(priv, "Committing rxon.filter_flags = " 3535 IWL_DEBUG_INFO(priv, "Committing rxon.filter_flags = "
@@ -3577,10 +3544,10 @@ static ssize_t store_filter_flags(struct device *d,
3577 return count; 3544 return count;
3578} 3545}
3579 3546
3580static DEVICE_ATTR(filter_flags, S_IWUSR | S_IRUGO, show_filter_flags, 3547static DEVICE_ATTR(filter_flags, S_IWUSR | S_IRUGO, iwl3945_show_filter_flags,
3581 store_filter_flags); 3548 iwl3945_store_filter_flags);
3582 3549
3583static ssize_t show_measurement(struct device *d, 3550static ssize_t iwl3945_show_measurement(struct device *d,
3584 struct device_attribute *attr, char *buf) 3551 struct device_attribute *attr, char *buf)
3585{ 3552{
3586 struct iwl_priv *priv = dev_get_drvdata(d); 3553 struct iwl_priv *priv = dev_get_drvdata(d);
@@ -3612,7 +3579,7 @@ static ssize_t show_measurement(struct device *d,
3612 return len; 3579 return len;
3613} 3580}
3614 3581
3615static ssize_t store_measurement(struct device *d, 3582static ssize_t iwl3945_store_measurement(struct device *d,
3616 struct device_attribute *attr, 3583 struct device_attribute *attr,
3617 const char *buf, size_t count) 3584 const char *buf, size_t count)
3618{ 3585{
@@ -3649,9 +3616,9 @@ static ssize_t store_measurement(struct device *d,
3649} 3616}
3650 3617
3651static DEVICE_ATTR(measurement, S_IRUSR | S_IWUSR, 3618static DEVICE_ATTR(measurement, S_IRUSR | S_IWUSR,
3652 show_measurement, store_measurement); 3619 iwl3945_show_measurement, iwl3945_store_measurement);
3653 3620
3654static ssize_t store_retry_rate(struct device *d, 3621static ssize_t iwl3945_store_retry_rate(struct device *d,
3655 struct device_attribute *attr, 3622 struct device_attribute *attr,
3656 const char *buf, size_t count) 3623 const char *buf, size_t count)
3657{ 3624{
@@ -3664,38 +3631,38 @@ static ssize_t store_retry_rate(struct device *d,
3664 return count; 3631 return count;
3665} 3632}
3666 3633
3667static ssize_t show_retry_rate(struct device *d, 3634static ssize_t iwl3945_show_retry_rate(struct device *d,
3668 struct device_attribute *attr, char *buf) 3635 struct device_attribute *attr, char *buf)
3669{ 3636{
3670 struct iwl_priv *priv = dev_get_drvdata(d); 3637 struct iwl_priv *priv = dev_get_drvdata(d);
3671 return sprintf(buf, "%d", priv->retry_rate); 3638 return sprintf(buf, "%d", priv->retry_rate);
3672} 3639}
3673 3640
3674static DEVICE_ATTR(retry_rate, S_IWUSR | S_IRUSR, show_retry_rate, 3641static DEVICE_ATTR(retry_rate, S_IWUSR | S_IRUSR, iwl3945_show_retry_rate,
3675 store_retry_rate); 3642 iwl3945_store_retry_rate);
3676 3643
3677 3644
3678static ssize_t show_channels(struct device *d, 3645static ssize_t iwl3945_show_channels(struct device *d,
3679 struct device_attribute *attr, char *buf) 3646 struct device_attribute *attr, char *buf)
3680{ 3647{
3681 /* all this shit doesn't belong into sysfs anyway */ 3648 /* all this shit doesn't belong into sysfs anyway */
3682 return 0; 3649 return 0;
3683} 3650}
3684 3651
3685static DEVICE_ATTR(channels, S_IRUSR, show_channels, NULL); 3652static DEVICE_ATTR(channels, S_IRUSR, iwl3945_show_channels, NULL);
3686 3653
3687static ssize_t show_antenna(struct device *d, 3654static ssize_t iwl3945_show_antenna(struct device *d,
3688 struct device_attribute *attr, char *buf) 3655 struct device_attribute *attr, char *buf)
3689{ 3656{
3690 struct iwl_priv *priv = dev_get_drvdata(d); 3657 struct iwl_priv *priv = dev_get_drvdata(d);
3691 3658
3692 if (!iwl_is_alive(priv)) 3659 if (!iwl_legacy_is_alive(priv))
3693 return -EAGAIN; 3660 return -EAGAIN;
3694 3661
3695 return sprintf(buf, "%d\n", iwl3945_mod_params.antenna); 3662 return sprintf(buf, "%d\n", iwl3945_mod_params.antenna);
3696} 3663}
3697 3664
3698static ssize_t store_antenna(struct device *d, 3665static ssize_t iwl3945_store_antenna(struct device *d,
3699 struct device_attribute *attr, 3666 struct device_attribute *attr,
3700 const char *buf, size_t count) 3667 const char *buf, size_t count)
3701{ 3668{
@@ -3720,20 +3687,20 @@ static ssize_t store_antenna(struct device *d,
3720 return count; 3687 return count;
3721} 3688}
3722 3689
3723static DEVICE_ATTR(antenna, S_IWUSR | S_IRUGO, show_antenna, store_antenna); 3690static DEVICE_ATTR(antenna, S_IWUSR | S_IRUGO, iwl3945_show_antenna, iwl3945_store_antenna);
3724 3691
3725static ssize_t show_status(struct device *d, 3692static ssize_t iwl3945_show_status(struct device *d,
3726 struct device_attribute *attr, char *buf) 3693 struct device_attribute *attr, char *buf)
3727{ 3694{
3728 struct iwl_priv *priv = dev_get_drvdata(d); 3695 struct iwl_priv *priv = dev_get_drvdata(d);
3729 if (!iwl_is_alive(priv)) 3696 if (!iwl_legacy_is_alive(priv))
3730 return -EAGAIN; 3697 return -EAGAIN;
3731 return sprintf(buf, "0x%08x\n", (int)priv->status); 3698 return sprintf(buf, "0x%08x\n", (int)priv->status);
3732} 3699}
3733 3700
3734static DEVICE_ATTR(status, S_IRUGO, show_status, NULL); 3701static DEVICE_ATTR(status, S_IRUGO, iwl3945_show_status, NULL);
3735 3702
3736static ssize_t dump_error_log(struct device *d, 3703static ssize_t iwl3945_dump_error_log(struct device *d,
3737 struct device_attribute *attr, 3704 struct device_attribute *attr,
3738 const char *buf, size_t count) 3705 const char *buf, size_t count)
3739{ 3706{
@@ -3746,7 +3713,7 @@ static ssize_t dump_error_log(struct device *d,
3746 return strnlen(buf, count); 3713 return strnlen(buf, count);
3747} 3714}
3748 3715
3749static DEVICE_ATTR(dump_errors, S_IWUSR, NULL, dump_error_log); 3716static DEVICE_ATTR(dump_errors, S_IWUSR, NULL, iwl3945_dump_error_log);
3750 3717
3751/***************************************************************************** 3718/*****************************************************************************
3752 * 3719 *
@@ -3762,18 +3729,17 @@ static void iwl3945_setup_deferred_work(struct iwl_priv *priv)
3762 3729
3763 INIT_WORK(&priv->restart, iwl3945_bg_restart); 3730 INIT_WORK(&priv->restart, iwl3945_bg_restart);
3764 INIT_WORK(&priv->rx_replenish, iwl3945_bg_rx_replenish); 3731 INIT_WORK(&priv->rx_replenish, iwl3945_bg_rx_replenish);
3765 INIT_WORK(&priv->beacon_update, iwl3945_bg_beacon_update);
3766 INIT_DELAYED_WORK(&priv->init_alive_start, iwl3945_bg_init_alive_start); 3732 INIT_DELAYED_WORK(&priv->init_alive_start, iwl3945_bg_init_alive_start);
3767 INIT_DELAYED_WORK(&priv->alive_start, iwl3945_bg_alive_start); 3733 INIT_DELAYED_WORK(&priv->alive_start, iwl3945_bg_alive_start);
3768 INIT_DELAYED_WORK(&priv->_3945.rfkill_poll, iwl3945_rfkill_poll); 3734 INIT_DELAYED_WORK(&priv->_3945.rfkill_poll, iwl3945_rfkill_poll);
3769 3735
3770 iwl_setup_scan_deferred_work(priv); 3736 iwl_legacy_setup_scan_deferred_work(priv);
3771 3737
3772 iwl3945_hw_setup_deferred_work(priv); 3738 iwl3945_hw_setup_deferred_work(priv);
3773 3739
3774 init_timer(&priv->watchdog); 3740 init_timer(&priv->watchdog);
3775 priv->watchdog.data = (unsigned long)priv; 3741 priv->watchdog.data = (unsigned long)priv;
3776 priv->watchdog.function = iwl_bg_watchdog; 3742 priv->watchdog.function = iwl_legacy_bg_watchdog;
3777 3743
3778 tasklet_init(&priv->irq_tasklet, (void (*)(unsigned long)) 3744 tasklet_init(&priv->irq_tasklet, (void (*)(unsigned long))
3779 iwl3945_irq_tasklet, (unsigned long)priv); 3745 iwl3945_irq_tasklet, (unsigned long)priv);
@@ -3785,9 +3751,8 @@ static void iwl3945_cancel_deferred_work(struct iwl_priv *priv)
3785 3751
3786 cancel_delayed_work_sync(&priv->init_alive_start); 3752 cancel_delayed_work_sync(&priv->init_alive_start);
3787 cancel_delayed_work(&priv->alive_start); 3753 cancel_delayed_work(&priv->alive_start);
3788 cancel_work_sync(&priv->beacon_update);
3789 3754
3790 iwl_cancel_scan_deferred_work(priv); 3755 iwl_legacy_cancel_scan_deferred_work(priv);
3791} 3756}
3792 3757
3793static struct attribute *iwl3945_sysfs_entries[] = { 3758static struct attribute *iwl3945_sysfs_entries[] = {
@@ -3801,7 +3766,7 @@ static struct attribute *iwl3945_sysfs_entries[] = {
3801 &dev_attr_status.attr, 3766 &dev_attr_status.attr,
3802 &dev_attr_temperature.attr, 3767 &dev_attr_temperature.attr,
3803 &dev_attr_tx_power.attr, 3768 &dev_attr_tx_power.attr,
3804#ifdef CONFIG_IWLWIFI_DEBUG 3769#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
3805 &dev_attr_debug_level.attr, 3770 &dev_attr_debug_level.attr,
3806#endif 3771#endif
3807 NULL 3772 NULL
@@ -3816,19 +3781,19 @@ struct ieee80211_ops iwl3945_hw_ops = {
3816 .tx = iwl3945_mac_tx, 3781 .tx = iwl3945_mac_tx,
3817 .start = iwl3945_mac_start, 3782 .start = iwl3945_mac_start,
3818 .stop = iwl3945_mac_stop, 3783 .stop = iwl3945_mac_stop,
3819 .add_interface = iwl_mac_add_interface, 3784 .add_interface = iwl_legacy_mac_add_interface,
3820 .remove_interface = iwl_mac_remove_interface, 3785 .remove_interface = iwl_legacy_mac_remove_interface,
3821 .change_interface = iwl_mac_change_interface, 3786 .change_interface = iwl_legacy_mac_change_interface,
3822 .config = iwl_legacy_mac_config, 3787 .config = iwl_legacy_mac_config,
3823 .configure_filter = iwl3945_configure_filter, 3788 .configure_filter = iwl3945_configure_filter,
3824 .set_key = iwl3945_mac_set_key, 3789 .set_key = iwl3945_mac_set_key,
3825 .conf_tx = iwl_mac_conf_tx, 3790 .conf_tx = iwl_legacy_mac_conf_tx,
3826 .reset_tsf = iwl_legacy_mac_reset_tsf, 3791 .reset_tsf = iwl_legacy_mac_reset_tsf,
3827 .bss_info_changed = iwl_legacy_mac_bss_info_changed, 3792 .bss_info_changed = iwl_legacy_mac_bss_info_changed,
3828 .hw_scan = iwl_mac_hw_scan, 3793 .hw_scan = iwl_legacy_mac_hw_scan,
3829 .sta_add = iwl3945_mac_sta_add, 3794 .sta_add = iwl3945_mac_sta_add,
3830 .sta_remove = iwl_mac_sta_remove, 3795 .sta_remove = iwl_legacy_mac_sta_remove,
3831 .tx_last_beacon = iwl_mac_tx_last_beacon, 3796 .tx_last_beacon = iwl_legacy_mac_tx_last_beacon,
3832}; 3797};
3833 3798
3834static int iwl3945_init_drv(struct iwl_priv *priv) 3799static int iwl3945_init_drv(struct iwl_priv *priv)
@@ -3870,7 +3835,7 @@ static int iwl3945_init_drv(struct iwl_priv *priv)
3870 ret = -EINVAL; 3835 ret = -EINVAL;
3871 goto err; 3836 goto err;
3872 } 3837 }
3873 ret = iwl_init_channel_map(priv); 3838 ret = iwl_legacy_init_channel_map(priv);
3874 if (ret) { 3839 if (ret) {
3875 IWL_ERR(priv, "initializing regulatory failed: %d\n", ret); 3840 IWL_ERR(priv, "initializing regulatory failed: %d\n", ret);
3876 goto err; 3841 goto err;
@@ -3882,7 +3847,7 @@ static int iwl3945_init_drv(struct iwl_priv *priv)
3882 goto err_free_channel_map; 3847 goto err_free_channel_map;
3883 } 3848 }
3884 3849
3885 ret = iwlcore_init_geos(priv); 3850 ret = iwl_legacy_init_geos(priv);
3886 if (ret) { 3851 if (ret) {
3887 IWL_ERR(priv, "initializing geos failed: %d\n", ret); 3852 IWL_ERR(priv, "initializing geos failed: %d\n", ret);
3888 goto err_free_channel_map; 3853 goto err_free_channel_map;
@@ -3892,7 +3857,7 @@ static int iwl3945_init_drv(struct iwl_priv *priv)
3892 return 0; 3857 return 0;
3893 3858
3894err_free_channel_map: 3859err_free_channel_map:
3895 iwl_free_channel_map(priv); 3860 iwl_legacy_free_channel_map(priv);
3896err: 3861err:
3897 return ret; 3862 return ret;
3898} 3863}
@@ -3912,10 +3877,6 @@ static int iwl3945_setup_mac(struct iwl_priv *priv)
3912 hw->flags = IEEE80211_HW_SIGNAL_DBM | 3877 hw->flags = IEEE80211_HW_SIGNAL_DBM |
3913 IEEE80211_HW_SPECTRUM_MGMT; 3878 IEEE80211_HW_SPECTRUM_MGMT;
3914 3879
3915 if (!priv->cfg->base_params->broken_powersave)
3916 hw->flags |= IEEE80211_HW_SUPPORTS_PS |
3917 IEEE80211_HW_SUPPORTS_DYNAMIC_PS;
3918
3919 hw->wiphy->interface_modes = 3880 hw->wiphy->interface_modes =
3920 priv->contexts[IWL_RXON_CTX_BSS].interface_modes; 3881 priv->contexts[IWL_RXON_CTX_BSS].interface_modes;
3921 3882
@@ -3938,7 +3899,7 @@ static int iwl3945_setup_mac(struct iwl_priv *priv)
3938 priv->hw->wiphy->bands[IEEE80211_BAND_5GHZ] = 3899 priv->hw->wiphy->bands[IEEE80211_BAND_5GHZ] =
3939 &priv->bands[IEEE80211_BAND_5GHZ]; 3900 &priv->bands[IEEE80211_BAND_5GHZ];
3940 3901
3941 iwl_leds_init(priv); 3902 iwl_legacy_leds_init(priv);
3942 3903
3943 ret = ieee80211_register_hw(priv->hw); 3904 ret = ieee80211_register_hw(priv->hw);
3944 if (ret) { 3905 if (ret) {
@@ -3965,7 +3926,7 @@ static int iwl3945_pci_probe(struct pci_dev *pdev, const struct pci_device_id *e
3965 3926
3966 /* mac80211 allocates memory for this device instance, including 3927 /* mac80211 allocates memory for this device instance, including
3967 * space for this driver's private structure */ 3928 * space for this driver's private structure */
3968 hw = iwl_alloc_all(cfg); 3929 hw = iwl_legacy_alloc_all(cfg);
3969 if (hw == NULL) { 3930 if (hw == NULL) {
3970 pr_err("Can not allocate network device\n"); 3931 pr_err("Can not allocate network device\n");
3971 err = -ENOMEM; 3932 err = -ENOMEM;
@@ -4005,13 +3966,12 @@ static int iwl3945_pci_probe(struct pci_dev *pdev, const struct pci_device_id *e
4005 iwl3945_hw_ops.hw_scan = NULL; 3966 iwl3945_hw_ops.hw_scan = NULL;
4006 } 3967 }
4007 3968
4008
4009 IWL_DEBUG_INFO(priv, "*** LOAD DRIVER ***\n"); 3969 IWL_DEBUG_INFO(priv, "*** LOAD DRIVER ***\n");
4010 priv->cfg = cfg; 3970 priv->cfg = cfg;
4011 priv->pci_dev = pdev; 3971 priv->pci_dev = pdev;
4012 priv->inta_mask = CSR_INI_SET_MASK; 3972 priv->inta_mask = CSR_INI_SET_MASK;
4013 3973
4014 if (iwl_alloc_traffic_mem(priv)) 3974 if (iwl_legacy_alloc_traffic_mem(priv))
4015 IWL_ERR(priv, "Not enough memory to generate traffic log\n"); 3975 IWL_ERR(priv, "Not enough memory to generate traffic log\n");
4016 3976
4017 /*************************** 3977 /***************************
@@ -4075,7 +4035,7 @@ static int iwl3945_pci_probe(struct pci_dev *pdev, const struct pci_device_id *e
4075 * ********************/ 4035 * ********************/
4076 4036
4077 /* Read the EEPROM */ 4037 /* Read the EEPROM */
4078 err = iwl_eeprom_init(priv); 4038 err = iwl_legacy_eeprom_init(priv);
4079 if (err) { 4039 if (err) {
4080 IWL_ERR(priv, "Unable to init EEPROM\n"); 4040 IWL_ERR(priv, "Unable to init EEPROM\n");
4081 goto out_iounmap; 4041 goto out_iounmap;
@@ -4112,12 +4072,12 @@ static int iwl3945_pci_probe(struct pci_dev *pdev, const struct pci_device_id *e
4112 * ********************/ 4072 * ********************/
4113 4073
4114 spin_lock_irqsave(&priv->lock, flags); 4074 spin_lock_irqsave(&priv->lock, flags);
4115 iwl_disable_interrupts(priv); 4075 iwl_legacy_disable_interrupts(priv);
4116 spin_unlock_irqrestore(&priv->lock, flags); 4076 spin_unlock_irqrestore(&priv->lock, flags);
4117 4077
4118 pci_enable_msi(priv->pci_dev); 4078 pci_enable_msi(priv->pci_dev);
4119 4079
4120 err = request_irq(priv->pci_dev->irq, priv->cfg->ops->lib->isr_ops.isr, 4080 err = request_irq(priv->pci_dev->irq, iwl_legacy_isr,
4121 IRQF_SHARED, DRV_NAME, priv); 4081 IRQF_SHARED, DRV_NAME, priv);
4122 if (err) { 4082 if (err) {
4123 IWL_ERR(priv, "Error allocating IRQ %d\n", priv->pci_dev->irq); 4083 IWL_ERR(priv, "Error allocating IRQ %d\n", priv->pci_dev->irq);
@@ -4130,24 +4090,24 @@ static int iwl3945_pci_probe(struct pci_dev *pdev, const struct pci_device_id *e
4130 goto out_release_irq; 4090 goto out_release_irq;
4131 } 4091 }
4132 4092
4133 iwl_set_rxon_channel(priv, 4093 iwl_legacy_set_rxon_channel(priv,
4134 &priv->bands[IEEE80211_BAND_2GHZ].channels[5], 4094 &priv->bands[IEEE80211_BAND_2GHZ].channels[5],
4135 &priv->contexts[IWL_RXON_CTX_BSS]); 4095 &priv->contexts[IWL_RXON_CTX_BSS]);
4136 iwl3945_setup_deferred_work(priv); 4096 iwl3945_setup_deferred_work(priv);
4137 iwl3945_setup_rx_handlers(priv); 4097 iwl3945_setup_rx_handlers(priv);
4138 iwl_power_initialize(priv); 4098 iwl_legacy_power_initialize(priv);
4139 4099
4140 /********************************* 4100 /*********************************
4141 * 8. Setup and Register mac80211 4101 * 8. Setup and Register mac80211
4142 * *******************************/ 4102 * *******************************/
4143 4103
4144 iwl_enable_interrupts(priv); 4104 iwl_legacy_enable_interrupts(priv);
4145 4105
4146 err = iwl3945_setup_mac(priv); 4106 err = iwl3945_setup_mac(priv);
4147 if (err) 4107 if (err)
4148 goto out_remove_sysfs; 4108 goto out_remove_sysfs;
4149 4109
4150 err = iwl_dbgfs_register(priv, DRV_NAME); 4110 err = iwl_legacy_dbgfs_register(priv, DRV_NAME);
4151 if (err) 4111 if (err)
4152 IWL_ERR(priv, "failed to create debugfs files. Ignoring error: %d\n", err); 4112 IWL_ERR(priv, "failed to create debugfs files. Ignoring error: %d\n", err);
4153 4113
@@ -4165,12 +4125,12 @@ static int iwl3945_pci_probe(struct pci_dev *pdev, const struct pci_device_id *e
4165 free_irq(priv->pci_dev->irq, priv); 4125 free_irq(priv->pci_dev->irq, priv);
4166 out_disable_msi: 4126 out_disable_msi:
4167 pci_disable_msi(priv->pci_dev); 4127 pci_disable_msi(priv->pci_dev);
4168 iwlcore_free_geos(priv); 4128 iwl_legacy_free_geos(priv);
4169 iwl_free_channel_map(priv); 4129 iwl_legacy_free_channel_map(priv);
4170 out_unset_hw_params: 4130 out_unset_hw_params:
4171 iwl3945_unset_hw_params(priv); 4131 iwl3945_unset_hw_params(priv);
4172 out_eeprom_free: 4132 out_eeprom_free:
4173 iwl_eeprom_free(priv); 4133 iwl_legacy_eeprom_free(priv);
4174 out_iounmap: 4134 out_iounmap:
4175 pci_iounmap(pdev, priv->hw_base); 4135 pci_iounmap(pdev, priv->hw_base);
4176 out_pci_release_regions: 4136 out_pci_release_regions:
@@ -4179,7 +4139,7 @@ static int iwl3945_pci_probe(struct pci_dev *pdev, const struct pci_device_id *e
4179 pci_set_drvdata(pdev, NULL); 4139 pci_set_drvdata(pdev, NULL);
4180 pci_disable_device(pdev); 4140 pci_disable_device(pdev);
4181 out_ieee80211_free_hw: 4141 out_ieee80211_free_hw:
4182 iwl_free_traffic_mem(priv); 4142 iwl_legacy_free_traffic_mem(priv);
4183 ieee80211_free_hw(priv->hw); 4143 ieee80211_free_hw(priv->hw);
4184 out: 4144 out:
4185 return err; 4145 return err;
@@ -4195,11 +4155,11 @@ static void __devexit iwl3945_pci_remove(struct pci_dev *pdev)
4195 4155
4196 IWL_DEBUG_INFO(priv, "*** UNLOAD DRIVER ***\n"); 4156 IWL_DEBUG_INFO(priv, "*** UNLOAD DRIVER ***\n");
4197 4157
4198 iwl_dbgfs_unregister(priv); 4158 iwl_legacy_dbgfs_unregister(priv);
4199 4159
4200 set_bit(STATUS_EXIT_PENDING, &priv->status); 4160 set_bit(STATUS_EXIT_PENDING, &priv->status);
4201 4161
4202 iwl_leds_exit(priv); 4162 iwl_legacy_leds_exit(priv);
4203 4163
4204 if (priv->mac80211_registered) { 4164 if (priv->mac80211_registered) {
4205 ieee80211_unregister_hw(priv->hw); 4165 ieee80211_unregister_hw(priv->hw);
@@ -4215,16 +4175,16 @@ static void __devexit iwl3945_pci_remove(struct pci_dev *pdev)
4215 * paths to avoid running iwl_down() at all before leaving driver. 4175 * paths to avoid running iwl_down() at all before leaving driver.
4216 * This (inexpensive) call *makes sure* device is reset. 4176 * This (inexpensive) call *makes sure* device is reset.
4217 */ 4177 */
4218 iwl_apm_stop(priv); 4178 iwl_legacy_apm_stop(priv);
4219 4179
4220 /* make sure we flush any pending irq or 4180 /* make sure we flush any pending irq or
4221 * tasklet for the driver 4181 * tasklet for the driver
4222 */ 4182 */
4223 spin_lock_irqsave(&priv->lock, flags); 4183 spin_lock_irqsave(&priv->lock, flags);
4224 iwl_disable_interrupts(priv); 4184 iwl_legacy_disable_interrupts(priv);
4225 spin_unlock_irqrestore(&priv->lock, flags); 4185 spin_unlock_irqrestore(&priv->lock, flags);
4226 4186
4227 iwl_synchronize_irq(priv); 4187 iwl3945_synchronize_irq(priv);
4228 4188
4229 sysfs_remove_group(&pdev->dev.kobj, &iwl3945_attribute_group); 4189 sysfs_remove_group(&pdev->dev.kobj, &iwl3945_attribute_group);
4230 4190
@@ -4246,7 +4206,7 @@ static void __devexit iwl3945_pci_remove(struct pci_dev *pdev)
4246 * until now... */ 4206 * until now... */
4247 destroy_workqueue(priv->workqueue); 4207 destroy_workqueue(priv->workqueue);
4248 priv->workqueue = NULL; 4208 priv->workqueue = NULL;
4249 iwl_free_traffic_mem(priv); 4209 iwl_legacy_free_traffic_mem(priv);
4250 4210
4251 free_irq(pdev->irq, priv); 4211 free_irq(pdev->irq, priv);
4252 pci_disable_msi(pdev); 4212 pci_disable_msi(pdev);
@@ -4256,8 +4216,8 @@ static void __devexit iwl3945_pci_remove(struct pci_dev *pdev)
4256 pci_disable_device(pdev); 4216 pci_disable_device(pdev);
4257 pci_set_drvdata(pdev, NULL); 4217 pci_set_drvdata(pdev, NULL);
4258 4218
4259 iwl_free_channel_map(priv); 4219 iwl_legacy_free_channel_map(priv);
4260 iwlcore_free_geos(priv); 4220 iwl_legacy_free_geos(priv);
4261 kfree(priv->scan_cmd); 4221 kfree(priv->scan_cmd);
4262 if (priv->beacon_skb) 4222 if (priv->beacon_skb)
4263 dev_kfree_skb(priv->beacon_skb); 4223 dev_kfree_skb(priv->beacon_skb);
@@ -4277,7 +4237,7 @@ static struct pci_driver iwl3945_driver = {
4277 .id_table = iwl3945_hw_card_ids, 4237 .id_table = iwl3945_hw_card_ids,
4278 .probe = iwl3945_pci_probe, 4238 .probe = iwl3945_pci_probe,
4279 .remove = __devexit_p(iwl3945_pci_remove), 4239 .remove = __devexit_p(iwl3945_pci_remove),
4280 .driver.pm = IWL_PM_OPS, 4240 .driver.pm = IWL_LEGACY_PM_OPS,
4281}; 4241};
4282 4242
4283static int __init iwl3945_init(void) 4243static int __init iwl3945_init(void)
@@ -4318,17 +4278,17 @@ module_param_named(antenna, iwl3945_mod_params.antenna, int, S_IRUGO);
4318MODULE_PARM_DESC(antenna, "select antenna (1=Main, 2=Aux, default 0 [both])"); 4278MODULE_PARM_DESC(antenna, "select antenna (1=Main, 2=Aux, default 0 [both])");
4319module_param_named(swcrypto, iwl3945_mod_params.sw_crypto, int, S_IRUGO); 4279module_param_named(swcrypto, iwl3945_mod_params.sw_crypto, int, S_IRUGO);
4320MODULE_PARM_DESC(swcrypto, 4280MODULE_PARM_DESC(swcrypto,
4321 "using software crypto (default 1 [software])\n"); 4281 "using software crypto (default 1 [software])");
4322#ifdef CONFIG_IWLWIFI_DEBUG 4282module_param_named(disable_hw_scan, iwl3945_mod_params.disable_hw_scan,
4283 int, S_IRUGO);
4284MODULE_PARM_DESC(disable_hw_scan,
4285 "disable hardware scanning (default 0) (deprecated)");
4286#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
4323module_param_named(debug, iwl_debug_level, uint, S_IRUGO | S_IWUSR); 4287module_param_named(debug, iwl_debug_level, uint, S_IRUGO | S_IWUSR);
4324MODULE_PARM_DESC(debug, "debug output mask"); 4288MODULE_PARM_DESC(debug, "debug output mask");
4325#endif 4289#endif
4326module_param_named(disable_hw_scan, iwl3945_mod_params.disable_hw_scan, 4290module_param_named(fw_restart, iwl3945_mod_params.restart_fw, int, S_IRUGO);
4327 int, S_IRUGO); 4291MODULE_PARM_DESC(fw_restart, "restart firmware in case of error");
4328MODULE_PARM_DESC(disable_hw_scan,
4329 "disable hardware scanning (default 0) (deprecated)");
4330module_param_named(fw_restart3945, iwl3945_mod_params.restart_fw, int, S_IRUGO);
4331MODULE_PARM_DESC(fw_restart3945, "restart firmware in case of error");
4332 4292
4333module_exit(iwl3945_exit); 4293module_exit(iwl3945_exit);
4334module_init(iwl3945_init); 4294module_init(iwl3945_init);
diff --git a/drivers/net/wireless/iwlegacy/iwl4965-base.c b/drivers/net/wireless/iwlegacy/iwl4965-base.c
new file mode 100644
index 000000000000..c0e07685059a
--- /dev/null
+++ b/drivers/net/wireless/iwlegacy/iwl4965-base.c
@@ -0,0 +1,3633 @@
1/******************************************************************************
2 *
3 * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved.
4 *
5 * Portions of this file are derived from the ipw3945 project, as well
6 * as portions of the ieee80211 subsystem header files.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of version 2 of the GNU General Public License as
10 * published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it will be useful, but WITHOUT
13 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 * more details.
16 *
17 * You should have received a copy of the GNU General Public License along with
18 * this program; if not, write to the Free Software Foundation, Inc.,
19 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
20 *
21 * The full GNU General Public License is included in this distribution in the
22 * file called LICENSE.
23 *
24 * Contact Information:
25 * Intel Linux Wireless <ilw@linux.intel.com>
26 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
27 *
28 *****************************************************************************/
29
30#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
31
32#include <linux/kernel.h>
33#include <linux/module.h>
34#include <linux/init.h>
35#include <linux/pci.h>
36#include <linux/pci-aspm.h>
37#include <linux/slab.h>
38#include <linux/dma-mapping.h>
39#include <linux/delay.h>
40#include <linux/sched.h>
41#include <linux/skbuff.h>
42#include <linux/netdevice.h>
43#include <linux/wireless.h>
44#include <linux/firmware.h>
45#include <linux/etherdevice.h>
46#include <linux/if_arp.h>
47
48#include <net/mac80211.h>
49
50#include <asm/div64.h>
51
52#define DRV_NAME "iwl4965"
53
54#include "iwl-eeprom.h"
55#include "iwl-dev.h"
56#include "iwl-core.h"
57#include "iwl-io.h"
58#include "iwl-helpers.h"
59#include "iwl-sta.h"
60#include "iwl-4965-calib.h"
61#include "iwl-4965.h"
62#include "iwl-4965-led.h"
63
64
65/******************************************************************************
66 *
67 * module boiler plate
68 *
69 ******************************************************************************/
70
71/*
72 * module name, copyright, version, etc.
73 */
74#define DRV_DESCRIPTION "Intel(R) Wireless WiFi 4965 driver for Linux"
75
76#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
77#define VD "d"
78#else
79#define VD
80#endif
81
82#define DRV_VERSION IWLWIFI_VERSION VD
83
84
85MODULE_DESCRIPTION(DRV_DESCRIPTION);
86MODULE_VERSION(DRV_VERSION);
87MODULE_AUTHOR(DRV_COPYRIGHT " " DRV_AUTHOR);
88MODULE_LICENSE("GPL");
89MODULE_ALIAS("iwl4965");
90
91void iwl4965_update_chain_flags(struct iwl_priv *priv)
92{
93 struct iwl_rxon_context *ctx;
94
95 if (priv->cfg->ops->hcmd->set_rxon_chain) {
96 for_each_context(priv, ctx) {
97 priv->cfg->ops->hcmd->set_rxon_chain(priv, ctx);
98 if (ctx->active.rx_chain != ctx->staging.rx_chain)
99 iwl_legacy_commit_rxon(priv, ctx);
100 }
101 }
102}
103
104static void iwl4965_clear_free_frames(struct iwl_priv *priv)
105{
106 struct list_head *element;
107
108 IWL_DEBUG_INFO(priv, "%d frames on pre-allocated heap on clear.\n",
109 priv->frames_count);
110
111 while (!list_empty(&priv->free_frames)) {
112 element = priv->free_frames.next;
113 list_del(element);
114 kfree(list_entry(element, struct iwl_frame, list));
115 priv->frames_count--;
116 }
117
118 if (priv->frames_count) {
119 IWL_WARN(priv, "%d frames still in use. Did we lose one?\n",
120 priv->frames_count);
121 priv->frames_count = 0;
122 }
123}
124
125static struct iwl_frame *iwl4965_get_free_frame(struct iwl_priv *priv)
126{
127 struct iwl_frame *frame;
128 struct list_head *element;
129 if (list_empty(&priv->free_frames)) {
130 frame = kzalloc(sizeof(*frame), GFP_KERNEL);
131 if (!frame) {
132 IWL_ERR(priv, "Could not allocate frame!\n");
133 return NULL;
134 }
135
136 priv->frames_count++;
137 return frame;
138 }
139
140 element = priv->free_frames.next;
141 list_del(element);
142 return list_entry(element, struct iwl_frame, list);
143}
144
145static void iwl4965_free_frame(struct iwl_priv *priv, struct iwl_frame *frame)
146{
147 memset(frame, 0, sizeof(*frame));
148 list_add(&frame->list, &priv->free_frames);
149}
150
151static u32 iwl4965_fill_beacon_frame(struct iwl_priv *priv,
152 struct ieee80211_hdr *hdr,
153 int left)
154{
155 lockdep_assert_held(&priv->mutex);
156
157 if (!priv->beacon_skb)
158 return 0;
159
160 if (priv->beacon_skb->len > left)
161 return 0;
162
163 memcpy(hdr, priv->beacon_skb->data, priv->beacon_skb->len);
164
165 return priv->beacon_skb->len;
166}
167
168/* Parse the beacon frame to find the TIM element and set tim_idx & tim_size */
169static void iwl4965_set_beacon_tim(struct iwl_priv *priv,
170 struct iwl_tx_beacon_cmd *tx_beacon_cmd,
171 u8 *beacon, u32 frame_size)
172{
173 u16 tim_idx;
174 struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *)beacon;
175
176 /*
177 * The index is relative to frame start but we start looking at the
178 * variable-length part of the beacon.
179 */
180 tim_idx = mgmt->u.beacon.variable - beacon;
181
182 /* Parse variable-length elements of beacon to find WLAN_EID_TIM */
183 while ((tim_idx < (frame_size - 2)) &&
184 (beacon[tim_idx] != WLAN_EID_TIM))
185 tim_idx += beacon[tim_idx+1] + 2;
186
187 /* If TIM field was found, set variables */
188 if ((tim_idx < (frame_size - 1)) && (beacon[tim_idx] == WLAN_EID_TIM)) {
189 tx_beacon_cmd->tim_idx = cpu_to_le16(tim_idx);
190 tx_beacon_cmd->tim_size = beacon[tim_idx+1];
191 } else
192 IWL_WARN(priv, "Unable to find TIM Element in beacon\n");
193}
194
195static unsigned int iwl4965_hw_get_beacon_cmd(struct iwl_priv *priv,
196 struct iwl_frame *frame)
197{
198 struct iwl_tx_beacon_cmd *tx_beacon_cmd;
199 u32 frame_size;
200 u32 rate_flags;
201 u32 rate;
202 /*
203 * We have to set up the TX command, the TX Beacon command, and the
204 * beacon contents.
205 */
206
207 lockdep_assert_held(&priv->mutex);
208
209 if (!priv->beacon_ctx) {
210 IWL_ERR(priv, "trying to build beacon w/o beacon context!\n");
211 return 0;
212 }
213
214 /* Initialize memory */
215 tx_beacon_cmd = &frame->u.beacon;
216 memset(tx_beacon_cmd, 0, sizeof(*tx_beacon_cmd));
217
218 /* Set up TX beacon contents */
219 frame_size = iwl4965_fill_beacon_frame(priv, tx_beacon_cmd->frame,
220 sizeof(frame->u) - sizeof(*tx_beacon_cmd));
221 if (WARN_ON_ONCE(frame_size > MAX_MPDU_SIZE))
222 return 0;
223 if (!frame_size)
224 return 0;
225
226 /* Set up TX command fields */
227 tx_beacon_cmd->tx.len = cpu_to_le16((u16)frame_size);
228 tx_beacon_cmd->tx.sta_id = priv->beacon_ctx->bcast_sta_id;
229 tx_beacon_cmd->tx.stop_time.life_time = TX_CMD_LIFE_TIME_INFINITE;
230 tx_beacon_cmd->tx.tx_flags = TX_CMD_FLG_SEQ_CTL_MSK |
231 TX_CMD_FLG_TSF_MSK | TX_CMD_FLG_STA_RATE_MSK;
232
233 /* Set up TX beacon command fields */
234 iwl4965_set_beacon_tim(priv, tx_beacon_cmd, (u8 *)tx_beacon_cmd->frame,
235 frame_size);
236
237 /* Set up packet rate and flags */
238 rate = iwl_legacy_get_lowest_plcp(priv, priv->beacon_ctx);
239 priv->mgmt_tx_ant = iwl4965_toggle_tx_ant(priv, priv->mgmt_tx_ant,
240 priv->hw_params.valid_tx_ant);
241 rate_flags = iwl4965_ant_idx_to_flags(priv->mgmt_tx_ant);
242 if ((rate >= IWL_FIRST_CCK_RATE) && (rate <= IWL_LAST_CCK_RATE))
243 rate_flags |= RATE_MCS_CCK_MSK;
244 tx_beacon_cmd->tx.rate_n_flags = iwl4965_hw_set_rate_n_flags(rate,
245 rate_flags);
246
247 return sizeof(*tx_beacon_cmd) + frame_size;
248}
249
250int iwl4965_send_beacon_cmd(struct iwl_priv *priv)
251{
252 struct iwl_frame *frame;
253 unsigned int frame_size;
254 int rc;
255
256 frame = iwl4965_get_free_frame(priv);
257 if (!frame) {
258 IWL_ERR(priv, "Could not obtain free frame buffer for beacon "
259 "command.\n");
260 return -ENOMEM;
261 }
262
263 frame_size = iwl4965_hw_get_beacon_cmd(priv, frame);
264 if (!frame_size) {
265 IWL_ERR(priv, "Error configuring the beacon command\n");
266 iwl4965_free_frame(priv, frame);
267 return -EINVAL;
268 }
269
270 rc = iwl_legacy_send_cmd_pdu(priv, REPLY_TX_BEACON, frame_size,
271 &frame->u.cmd[0]);
272
273 iwl4965_free_frame(priv, frame);
274
275 return rc;
276}
277
278static inline dma_addr_t iwl4965_tfd_tb_get_addr(struct iwl_tfd *tfd, u8 idx)
279{
280 struct iwl_tfd_tb *tb = &tfd->tbs[idx];
281
282 dma_addr_t addr = get_unaligned_le32(&tb->lo);
283 if (sizeof(dma_addr_t) > sizeof(u32))
284 addr |=
285 ((dma_addr_t)(le16_to_cpu(tb->hi_n_len) & 0xF) << 16) << 16;
286
287 return addr;
288}
289
290static inline u16 iwl4965_tfd_tb_get_len(struct iwl_tfd *tfd, u8 idx)
291{
292 struct iwl_tfd_tb *tb = &tfd->tbs[idx];
293
294 return le16_to_cpu(tb->hi_n_len) >> 4;
295}
296
297static inline void iwl4965_tfd_set_tb(struct iwl_tfd *tfd, u8 idx,
298 dma_addr_t addr, u16 len)
299{
300 struct iwl_tfd_tb *tb = &tfd->tbs[idx];
301 u16 hi_n_len = len << 4;
302
303 put_unaligned_le32(addr, &tb->lo);
304 if (sizeof(dma_addr_t) > sizeof(u32))
305 hi_n_len |= ((addr >> 16) >> 16) & 0xF;
306
307 tb->hi_n_len = cpu_to_le16(hi_n_len);
308
309 tfd->num_tbs = idx + 1;
310}
311
312static inline u8 iwl4965_tfd_get_num_tbs(struct iwl_tfd *tfd)
313{
314 return tfd->num_tbs & 0x1f;
315}
316
317/**
318 * iwl4965_hw_txq_free_tfd - Free all chunks referenced by TFD [txq->q.read_ptr]
319 * @priv - driver private data
320 * @txq - tx queue
321 *
322 * Does NOT advance any TFD circular buffer read/write indexes
323 * Does NOT free the TFD itself (which is within circular buffer)
324 */
325void iwl4965_hw_txq_free_tfd(struct iwl_priv *priv, struct iwl_tx_queue *txq)
326{
327 struct iwl_tfd *tfd_tmp = (struct iwl_tfd *)txq->tfds;
328 struct iwl_tfd *tfd;
329 struct pci_dev *dev = priv->pci_dev;
330 int index = txq->q.read_ptr;
331 int i;
332 int num_tbs;
333
334 tfd = &tfd_tmp[index];
335
336 /* Sanity check on number of chunks */
337 num_tbs = iwl4965_tfd_get_num_tbs(tfd);
338
339 if (num_tbs >= IWL_NUM_OF_TBS) {
340 IWL_ERR(priv, "Too many chunks: %i\n", num_tbs);
341 /* @todo issue fatal error, it is quite serious situation */
342 return;
343 }
344
345 /* Unmap tx_cmd */
346 if (num_tbs)
347 pci_unmap_single(dev,
348 dma_unmap_addr(&txq->meta[index], mapping),
349 dma_unmap_len(&txq->meta[index], len),
350 PCI_DMA_BIDIRECTIONAL);
351
352 /* Unmap chunks, if any. */
353 for (i = 1; i < num_tbs; i++)
354 pci_unmap_single(dev, iwl4965_tfd_tb_get_addr(tfd, i),
355 iwl4965_tfd_tb_get_len(tfd, i),
356 PCI_DMA_TODEVICE);
357
358 /* free SKB */
359 if (txq->txb) {
360 struct sk_buff *skb;
361
362 skb = txq->txb[txq->q.read_ptr].skb;
363
364 /* can be called from irqs-disabled context */
365 if (skb) {
366 dev_kfree_skb_any(skb);
367 txq->txb[txq->q.read_ptr].skb = NULL;
368 }
369 }
370}
371
372int iwl4965_hw_txq_attach_buf_to_tfd(struct iwl_priv *priv,
373 struct iwl_tx_queue *txq,
374 dma_addr_t addr, u16 len,
375 u8 reset, u8 pad)
376{
377 struct iwl_queue *q;
378 struct iwl_tfd *tfd, *tfd_tmp;
379 u32 num_tbs;
380
381 q = &txq->q;
382 tfd_tmp = (struct iwl_tfd *)txq->tfds;
383 tfd = &tfd_tmp[q->write_ptr];
384
385 if (reset)
386 memset(tfd, 0, sizeof(*tfd));
387
388 num_tbs = iwl4965_tfd_get_num_tbs(tfd);
389
390 /* Each TFD can point to a maximum 20 Tx buffers */
391 if (num_tbs >= IWL_NUM_OF_TBS) {
392 IWL_ERR(priv, "Error can not send more than %d chunks\n",
393 IWL_NUM_OF_TBS);
394 return -EINVAL;
395 }
396
397 BUG_ON(addr & ~DMA_BIT_MASK(36));
398 if (unlikely(addr & ~IWL_TX_DMA_MASK))
399 IWL_ERR(priv, "Unaligned address = %llx\n",
400 (unsigned long long)addr);
401
402 iwl4965_tfd_set_tb(tfd, num_tbs, addr, len);
403
404 return 0;
405}
406
407/*
408 * Tell nic where to find circular buffer of Tx Frame Descriptors for
409 * given Tx queue, and enable the DMA channel used for that queue.
410 *
411 * 4965 supports up to 16 Tx queues in DRAM, mapped to up to 8 Tx DMA
412 * channels supported in hardware.
413 */
414int iwl4965_hw_tx_queue_init(struct iwl_priv *priv,
415 struct iwl_tx_queue *txq)
416{
417 int txq_id = txq->q.id;
418
419 /* Circular buffer (TFD queue in DRAM) physical base address */
420 iwl_legacy_write_direct32(priv, FH_MEM_CBBC_QUEUE(txq_id),
421 txq->q.dma_addr >> 8);
422
423 return 0;
424}
425
426/******************************************************************************
427 *
428 * Generic RX handler implementations
429 *
430 ******************************************************************************/
431static void iwl4965_rx_reply_alive(struct iwl_priv *priv,
432 struct iwl_rx_mem_buffer *rxb)
433{
434 struct iwl_rx_packet *pkt = rxb_addr(rxb);
435 struct iwl_alive_resp *palive;
436 struct delayed_work *pwork;
437
438 palive = &pkt->u.alive_frame;
439
440 IWL_DEBUG_INFO(priv, "Alive ucode status 0x%08X revision "
441 "0x%01X 0x%01X\n",
442 palive->is_valid, palive->ver_type,
443 palive->ver_subtype);
444
445 if (palive->ver_subtype == INITIALIZE_SUBTYPE) {
446 IWL_DEBUG_INFO(priv, "Initialization Alive received.\n");
447 memcpy(&priv->card_alive_init,
448 &pkt->u.alive_frame,
449 sizeof(struct iwl_init_alive_resp));
450 pwork = &priv->init_alive_start;
451 } else {
452 IWL_DEBUG_INFO(priv, "Runtime Alive received.\n");
453 memcpy(&priv->card_alive, &pkt->u.alive_frame,
454 sizeof(struct iwl_alive_resp));
455 pwork = &priv->alive_start;
456 }
457
458 /* We delay the ALIVE response by 5ms to
459 * give the HW RF Kill time to activate... */
460 if (palive->is_valid == UCODE_VALID_OK)
461 queue_delayed_work(priv->workqueue, pwork,
462 msecs_to_jiffies(5));
463 else
464 IWL_WARN(priv, "uCode did not respond OK.\n");
465}
466
467/**
468 * iwl4965_bg_statistics_periodic - Timer callback to queue statistics
469 *
470 * This callback is provided in order to send a statistics request.
471 *
472 * This timer function is continually reset to execute within
473 * REG_RECALIB_PERIOD seconds since the last STATISTICS_NOTIFICATION
474 * was received. We need to ensure we receive the statistics in order
475 * to update the temperature used for calibrating the TXPOWER.
476 */
477static void iwl4965_bg_statistics_periodic(unsigned long data)
478{
479 struct iwl_priv *priv = (struct iwl_priv *)data;
480
481 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
482 return;
483
484 /* dont send host command if rf-kill is on */
485 if (!iwl_legacy_is_ready_rf(priv))
486 return;
487
488 iwl_legacy_send_statistics_request(priv, CMD_ASYNC, false);
489}
490
491
492static void iwl4965_print_cont_event_trace(struct iwl_priv *priv, u32 base,
493 u32 start_idx, u32 num_events,
494 u32 mode)
495{
496 u32 i;
497 u32 ptr; /* SRAM byte address of log data */
498 u32 ev, time, data; /* event log data */
499 unsigned long reg_flags;
500
501 if (mode == 0)
502 ptr = base + (4 * sizeof(u32)) + (start_idx * 2 * sizeof(u32));
503 else
504 ptr = base + (4 * sizeof(u32)) + (start_idx * 3 * sizeof(u32));
505
506 /* Make sure device is powered up for SRAM reads */
507 spin_lock_irqsave(&priv->reg_lock, reg_flags);
508 if (iwl_grab_nic_access(priv)) {
509 spin_unlock_irqrestore(&priv->reg_lock, reg_flags);
510 return;
511 }
512
513 /* Set starting address; reads will auto-increment */
514 _iwl_legacy_write_direct32(priv, HBUS_TARG_MEM_RADDR, ptr);
515 rmb();
516
517 /*
518 * "time" is actually "data" for mode 0 (no timestamp).
519 * place event id # at far right for easier visual parsing.
520 */
521 for (i = 0; i < num_events; i++) {
522 ev = _iwl_legacy_read_direct32(priv, HBUS_TARG_MEM_RDAT);
523 time = _iwl_legacy_read_direct32(priv, HBUS_TARG_MEM_RDAT);
524 if (mode == 0) {
525 trace_iwlwifi_legacy_dev_ucode_cont_event(priv,
526 0, time, ev);
527 } else {
528 data = _iwl_legacy_read_direct32(priv,
529 HBUS_TARG_MEM_RDAT);
530 trace_iwlwifi_legacy_dev_ucode_cont_event(priv,
531 time, data, ev);
532 }
533 }
534 /* Allow device to power down */
535 iwl_release_nic_access(priv);
536 spin_unlock_irqrestore(&priv->reg_lock, reg_flags);
537}
538
539static void iwl4965_continuous_event_trace(struct iwl_priv *priv)
540{
541 u32 capacity; /* event log capacity in # entries */
542 u32 base; /* SRAM byte address of event log header */
543 u32 mode; /* 0 - no timestamp, 1 - timestamp recorded */
544 u32 num_wraps; /* # times uCode wrapped to top of log */
545 u32 next_entry; /* index of next entry to be written by uCode */
546
547 if (priv->ucode_type == UCODE_INIT)
548 base = le32_to_cpu(priv->card_alive_init.error_event_table_ptr);
549 else
550 base = le32_to_cpu(priv->card_alive.log_event_table_ptr);
551 if (priv->cfg->ops->lib->is_valid_rtc_data_addr(base)) {
552 capacity = iwl_legacy_read_targ_mem(priv, base);
553 num_wraps = iwl_legacy_read_targ_mem(priv,
554 base + (2 * sizeof(u32)));
555 mode = iwl_legacy_read_targ_mem(priv, base + (1 * sizeof(u32)));
556 next_entry = iwl_legacy_read_targ_mem(priv,
557 base + (3 * sizeof(u32)));
558 } else
559 return;
560
561 if (num_wraps == priv->event_log.num_wraps) {
562 iwl4965_print_cont_event_trace(priv,
563 base, priv->event_log.next_entry,
564 next_entry - priv->event_log.next_entry,
565 mode);
566 priv->event_log.non_wraps_count++;
567 } else {
568 if ((num_wraps - priv->event_log.num_wraps) > 1)
569 priv->event_log.wraps_more_count++;
570 else
571 priv->event_log.wraps_once_count++;
572 trace_iwlwifi_legacy_dev_ucode_wrap_event(priv,
573 num_wraps - priv->event_log.num_wraps,
574 next_entry, priv->event_log.next_entry);
575 if (next_entry < priv->event_log.next_entry) {
576 iwl4965_print_cont_event_trace(priv, base,
577 priv->event_log.next_entry,
578 capacity - priv->event_log.next_entry,
579 mode);
580
581 iwl4965_print_cont_event_trace(priv, base, 0,
582 next_entry, mode);
583 } else {
584 iwl4965_print_cont_event_trace(priv, base,
585 next_entry, capacity - next_entry,
586 mode);
587
588 iwl4965_print_cont_event_trace(priv, base, 0,
589 next_entry, mode);
590 }
591 }
592 priv->event_log.num_wraps = num_wraps;
593 priv->event_log.next_entry = next_entry;
594}
595
596/**
597 * iwl4965_bg_ucode_trace - Timer callback to log ucode event
598 *
599 * The timer is continually set to execute every
600 * UCODE_TRACE_PERIOD milliseconds after the last timer expired
601 * this function is to perform continuous uCode event logging operation
602 * if enabled
603 */
604static void iwl4965_bg_ucode_trace(unsigned long data)
605{
606 struct iwl_priv *priv = (struct iwl_priv *)data;
607
608 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
609 return;
610
611 if (priv->event_log.ucode_trace) {
612 iwl4965_continuous_event_trace(priv);
613 /* Reschedule the timer to occur in UCODE_TRACE_PERIOD */
614 mod_timer(&priv->ucode_trace,
615 jiffies + msecs_to_jiffies(UCODE_TRACE_PERIOD));
616 }
617}
618
619static void iwl4965_rx_beacon_notif(struct iwl_priv *priv,
620 struct iwl_rx_mem_buffer *rxb)
621{
622 struct iwl_rx_packet *pkt = rxb_addr(rxb);
623 struct iwl4965_beacon_notif *beacon =
624 (struct iwl4965_beacon_notif *)pkt->u.raw;
625#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
626 u8 rate = iwl4965_hw_get_rate(beacon->beacon_notify_hdr.rate_n_flags);
627
628 IWL_DEBUG_RX(priv, "beacon status %x retries %d iss %d "
629 "tsf %d %d rate %d\n",
630 le32_to_cpu(beacon->beacon_notify_hdr.u.status) & TX_STATUS_MSK,
631 beacon->beacon_notify_hdr.failure_frame,
632 le32_to_cpu(beacon->ibss_mgr_status),
633 le32_to_cpu(beacon->high_tsf),
634 le32_to_cpu(beacon->low_tsf), rate);
635#endif
636
637 priv->ibss_manager = le32_to_cpu(beacon->ibss_mgr_status);
638}
639
640static void iwl4965_perform_ct_kill_task(struct iwl_priv *priv)
641{
642 unsigned long flags;
643
644 IWL_DEBUG_POWER(priv, "Stop all queues\n");
645
646 if (priv->mac80211_registered)
647 ieee80211_stop_queues(priv->hw);
648
649 iwl_write32(priv, CSR_UCODE_DRV_GP1_SET,
650 CSR_UCODE_DRV_GP1_REG_BIT_CT_KILL_EXIT);
651 iwl_read32(priv, CSR_UCODE_DRV_GP1);
652
653 spin_lock_irqsave(&priv->reg_lock, flags);
654 if (!iwl_grab_nic_access(priv))
655 iwl_release_nic_access(priv);
656 spin_unlock_irqrestore(&priv->reg_lock, flags);
657}
658
659/* Handle notification from uCode that card's power state is changing
660 * due to software, hardware, or critical temperature RFKILL */
661static void iwl4965_rx_card_state_notif(struct iwl_priv *priv,
662 struct iwl_rx_mem_buffer *rxb)
663{
664 struct iwl_rx_packet *pkt = rxb_addr(rxb);
665 u32 flags = le32_to_cpu(pkt->u.card_state_notif.flags);
666 unsigned long status = priv->status;
667
668 IWL_DEBUG_RF_KILL(priv, "Card state received: HW:%s SW:%s CT:%s\n",
669 (flags & HW_CARD_DISABLED) ? "Kill" : "On",
670 (flags & SW_CARD_DISABLED) ? "Kill" : "On",
671 (flags & CT_CARD_DISABLED) ?
672 "Reached" : "Not reached");
673
674 if (flags & (SW_CARD_DISABLED | HW_CARD_DISABLED |
675 CT_CARD_DISABLED)) {
676
677 iwl_write32(priv, CSR_UCODE_DRV_GP1_SET,
678 CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED);
679
680 iwl_legacy_write_direct32(priv, HBUS_TARG_MBX_C,
681 HBUS_TARG_MBX_C_REG_BIT_CMD_BLOCKED);
682
683 if (!(flags & RXON_CARD_DISABLED)) {
684 iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR,
685 CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED);
686 iwl_legacy_write_direct32(priv, HBUS_TARG_MBX_C,
687 HBUS_TARG_MBX_C_REG_BIT_CMD_BLOCKED);
688 }
689 }
690
691 if (flags & CT_CARD_DISABLED)
692 iwl4965_perform_ct_kill_task(priv);
693
694 if (flags & HW_CARD_DISABLED)
695 set_bit(STATUS_RF_KILL_HW, &priv->status);
696 else
697 clear_bit(STATUS_RF_KILL_HW, &priv->status);
698
699 if (!(flags & RXON_CARD_DISABLED))
700 iwl_legacy_scan_cancel(priv);
701
702 if ((test_bit(STATUS_RF_KILL_HW, &status) !=
703 test_bit(STATUS_RF_KILL_HW, &priv->status)))
704 wiphy_rfkill_set_hw_state(priv->hw->wiphy,
705 test_bit(STATUS_RF_KILL_HW, &priv->status));
706 else
707 wake_up_interruptible(&priv->wait_command_queue);
708}
709
710/**
711 * iwl4965_setup_rx_handlers - Initialize Rx handler callbacks
712 *
713 * Setup the RX handlers for each of the reply types sent from the uCode
714 * to the host.
715 *
716 * This function chains into the hardware specific files for them to setup
717 * any hardware specific handlers as well.
718 */
719static void iwl4965_setup_rx_handlers(struct iwl_priv *priv)
720{
721 priv->rx_handlers[REPLY_ALIVE] = iwl4965_rx_reply_alive;
722 priv->rx_handlers[REPLY_ERROR] = iwl_legacy_rx_reply_error;
723 priv->rx_handlers[CHANNEL_SWITCH_NOTIFICATION] = iwl_legacy_rx_csa;
724 priv->rx_handlers[SPECTRUM_MEASURE_NOTIFICATION] =
725 iwl_legacy_rx_spectrum_measure_notif;
726 priv->rx_handlers[PM_SLEEP_NOTIFICATION] = iwl_legacy_rx_pm_sleep_notif;
727 priv->rx_handlers[PM_DEBUG_STATISTIC_NOTIFIC] =
728 iwl_legacy_rx_pm_debug_statistics_notif;
729 priv->rx_handlers[BEACON_NOTIFICATION] = iwl4965_rx_beacon_notif;
730
731 /*
732 * The same handler is used for both the REPLY to a discrete
733 * statistics request from the host as well as for the periodic
734 * statistics notifications (after received beacons) from the uCode.
735 */
736 priv->rx_handlers[REPLY_STATISTICS_CMD] = iwl4965_reply_statistics;
737 priv->rx_handlers[STATISTICS_NOTIFICATION] = iwl4965_rx_statistics;
738
739 iwl_legacy_setup_rx_scan_handlers(priv);
740
741 /* status change handler */
742 priv->rx_handlers[CARD_STATE_NOTIFICATION] =
743 iwl4965_rx_card_state_notif;
744
745 priv->rx_handlers[MISSED_BEACONS_NOTIFICATION] =
746 iwl4965_rx_missed_beacon_notif;
747 /* Rx handlers */
748 priv->rx_handlers[REPLY_RX_PHY_CMD] = iwl4965_rx_reply_rx_phy;
749 priv->rx_handlers[REPLY_RX_MPDU_CMD] = iwl4965_rx_reply_rx;
750 /* block ack */
751 priv->rx_handlers[REPLY_COMPRESSED_BA] = iwl4965_rx_reply_compressed_ba;
752 /* Set up hardware specific Rx handlers */
753 priv->cfg->ops->lib->rx_handler_setup(priv);
754}
755
756/**
757 * iwl4965_rx_handle - Main entry function for receiving responses from uCode
758 *
759 * Uses the priv->rx_handlers callback function array to invoke
760 * the appropriate handlers, including command responses,
761 * frame-received notifications, and other notifications.
762 */
763void iwl4965_rx_handle(struct iwl_priv *priv)
764{
765 struct iwl_rx_mem_buffer *rxb;
766 struct iwl_rx_packet *pkt;
767 struct iwl_rx_queue *rxq = &priv->rxq;
768 u32 r, i;
769 int reclaim;
770 unsigned long flags;
771 u8 fill_rx = 0;
772 u32 count = 8;
773 int total_empty;
774
775 /* uCode's read index (stored in shared DRAM) indicates the last Rx
776 * buffer that the driver may process (last buffer filled by ucode). */
777 r = le16_to_cpu(rxq->rb_stts->closed_rb_num) & 0x0FFF;
778 i = rxq->read;
779
780 /* Rx interrupt, but nothing sent from uCode */
781 if (i == r)
782 IWL_DEBUG_RX(priv, "r = %d, i = %d\n", r, i);
783
784 /* calculate total frames need to be restock after handling RX */
785 total_empty = r - rxq->write_actual;
786 if (total_empty < 0)
787 total_empty += RX_QUEUE_SIZE;
788
789 if (total_empty > (RX_QUEUE_SIZE / 2))
790 fill_rx = 1;
791
792 while (i != r) {
793 int len;
794
795 rxb = rxq->queue[i];
796
797 /* If an RXB doesn't have a Rx queue slot associated with it,
798 * then a bug has been introduced in the queue refilling
799 * routines -- catch it here */
800 BUG_ON(rxb == NULL);
801
802 rxq->queue[i] = NULL;
803
804 pci_unmap_page(priv->pci_dev, rxb->page_dma,
805 PAGE_SIZE << priv->hw_params.rx_page_order,
806 PCI_DMA_FROMDEVICE);
807 pkt = rxb_addr(rxb);
808
809 len = le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK;
810 len += sizeof(u32); /* account for status word */
811 trace_iwlwifi_legacy_dev_rx(priv, pkt, len);
812
813 /* Reclaim a command buffer only if this packet is a response
814 * to a (driver-originated) command.
815 * If the packet (e.g. Rx frame) originated from uCode,
816 * there is no command buffer to reclaim.
817 * Ucode should set SEQ_RX_FRAME bit if ucode-originated,
818 * but apparently a few don't get set; catch them here. */
819 reclaim = !(pkt->hdr.sequence & SEQ_RX_FRAME) &&
820 (pkt->hdr.cmd != REPLY_RX_PHY_CMD) &&
821 (pkt->hdr.cmd != REPLY_RX) &&
822 (pkt->hdr.cmd != REPLY_RX_MPDU_CMD) &&
823 (pkt->hdr.cmd != REPLY_COMPRESSED_BA) &&
824 (pkt->hdr.cmd != STATISTICS_NOTIFICATION) &&
825 (pkt->hdr.cmd != REPLY_TX);
826
827 /* Based on type of command response or notification,
828 * handle those that need handling via function in
829 * rx_handlers table. See iwl4965_setup_rx_handlers() */
830 if (priv->rx_handlers[pkt->hdr.cmd]) {
831 IWL_DEBUG_RX(priv, "r = %d, i = %d, %s, 0x%02x\n", r,
832 i, iwl_legacy_get_cmd_string(pkt->hdr.cmd),
833 pkt->hdr.cmd);
834 priv->isr_stats.rx_handlers[pkt->hdr.cmd]++;
835 priv->rx_handlers[pkt->hdr.cmd] (priv, rxb);
836 } else {
837 /* No handling needed */
838 IWL_DEBUG_RX(priv,
839 "r %d i %d No handler needed for %s, 0x%02x\n",
840 r, i, iwl_legacy_get_cmd_string(pkt->hdr.cmd),
841 pkt->hdr.cmd);
842 }
843
844 /*
845 * XXX: After here, we should always check rxb->page
846 * against NULL before touching it or its virtual
847 * memory (pkt). Because some rx_handler might have
848 * already taken or freed the pages.
849 */
850
851 if (reclaim) {
852 /* Invoke any callbacks, transfer the buffer to caller,
853 * and fire off the (possibly) blocking iwl_legacy_send_cmd()
854 * as we reclaim the driver command queue */
855 if (rxb->page)
856 iwl_legacy_tx_cmd_complete(priv, rxb);
857 else
858 IWL_WARN(priv, "Claim null rxb?\n");
859 }
860
861 /* Reuse the page if possible. For notification packets and
862 * SKBs that fail to Rx correctly, add them back into the
863 * rx_free list for reuse later. */
864 spin_lock_irqsave(&rxq->lock, flags);
865 if (rxb->page != NULL) {
866 rxb->page_dma = pci_map_page(priv->pci_dev, rxb->page,
867 0, PAGE_SIZE << priv->hw_params.rx_page_order,
868 PCI_DMA_FROMDEVICE);
869 list_add_tail(&rxb->list, &rxq->rx_free);
870 rxq->free_count++;
871 } else
872 list_add_tail(&rxb->list, &rxq->rx_used);
873
874 spin_unlock_irqrestore(&rxq->lock, flags);
875
876 i = (i + 1) & RX_QUEUE_MASK;
877 /* If there are a lot of unused frames,
878 * restock the Rx queue so ucode wont assert. */
879 if (fill_rx) {
880 count++;
881 if (count >= 8) {
882 rxq->read = i;
883 iwl4965_rx_replenish_now(priv);
884 count = 0;
885 }
886 }
887 }
888
889 /* Backtrack one entry */
890 rxq->read = i;
891 if (fill_rx)
892 iwl4965_rx_replenish_now(priv);
893 else
894 iwl4965_rx_queue_restock(priv);
895}
896
897/* call this function to flush any scheduled tasklet */
898static inline void iwl4965_synchronize_irq(struct iwl_priv *priv)
899{
900 /* wait to make sure we flush pending tasklet*/
901 synchronize_irq(priv->pci_dev->irq);
902 tasklet_kill(&priv->irq_tasklet);
903}
904
905static void iwl4965_irq_tasklet(struct iwl_priv *priv)
906{
907 u32 inta, handled = 0;
908 u32 inta_fh;
909 unsigned long flags;
910 u32 i;
911#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
912 u32 inta_mask;
913#endif
914
915 spin_lock_irqsave(&priv->lock, flags);
916
917 /* Ack/clear/reset pending uCode interrupts.
918 * Note: Some bits in CSR_INT are "OR" of bits in CSR_FH_INT_STATUS,
919 * and will clear only when CSR_FH_INT_STATUS gets cleared. */
920 inta = iwl_read32(priv, CSR_INT);
921 iwl_write32(priv, CSR_INT, inta);
922
923 /* Ack/clear/reset pending flow-handler (DMA) interrupts.
924 * Any new interrupts that happen after this, either while we're
925 * in this tasklet, or later, will show up in next ISR/tasklet. */
926 inta_fh = iwl_read32(priv, CSR_FH_INT_STATUS);
927 iwl_write32(priv, CSR_FH_INT_STATUS, inta_fh);
928
929#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
930 if (iwl_legacy_get_debug_level(priv) & IWL_DL_ISR) {
931 /* just for debug */
932 inta_mask = iwl_read32(priv, CSR_INT_MASK);
933 IWL_DEBUG_ISR(priv, "inta 0x%08x, enabled 0x%08x, fh 0x%08x\n",
934 inta, inta_mask, inta_fh);
935 }
936#endif
937
938 spin_unlock_irqrestore(&priv->lock, flags);
939
940 /* Since CSR_INT and CSR_FH_INT_STATUS reads and clears are not
941 * atomic, make sure that inta covers all the interrupts that
942 * we've discovered, even if FH interrupt came in just after
943 * reading CSR_INT. */
944 if (inta_fh & CSR49_FH_INT_RX_MASK)
945 inta |= CSR_INT_BIT_FH_RX;
946 if (inta_fh & CSR49_FH_INT_TX_MASK)
947 inta |= CSR_INT_BIT_FH_TX;
948
949 /* Now service all interrupt bits discovered above. */
950 if (inta & CSR_INT_BIT_HW_ERR) {
951 IWL_ERR(priv, "Hardware error detected. Restarting.\n");
952
953 /* Tell the device to stop sending interrupts */
954 iwl_legacy_disable_interrupts(priv);
955
956 priv->isr_stats.hw++;
957 iwl_legacy_irq_handle_error(priv);
958
959 handled |= CSR_INT_BIT_HW_ERR;
960
961 return;
962 }
963
964#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
965 if (iwl_legacy_get_debug_level(priv) & (IWL_DL_ISR)) {
966 /* NIC fires this, but we don't use it, redundant with WAKEUP */
967 if (inta & CSR_INT_BIT_SCD) {
968 IWL_DEBUG_ISR(priv, "Scheduler finished to transmit "
969 "the frame/frames.\n");
970 priv->isr_stats.sch++;
971 }
972
973 /* Alive notification via Rx interrupt will do the real work */
974 if (inta & CSR_INT_BIT_ALIVE) {
975 IWL_DEBUG_ISR(priv, "Alive interrupt\n");
976 priv->isr_stats.alive++;
977 }
978 }
979#endif
980 /* Safely ignore these bits for debug checks below */
981 inta &= ~(CSR_INT_BIT_SCD | CSR_INT_BIT_ALIVE);
982
983 /* HW RF KILL switch toggled */
984 if (inta & CSR_INT_BIT_RF_KILL) {
985 int hw_rf_kill = 0;
986 if (!(iwl_read32(priv, CSR_GP_CNTRL) &
987 CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW))
988 hw_rf_kill = 1;
989
990 IWL_WARN(priv, "RF_KILL bit toggled to %s.\n",
991 hw_rf_kill ? "disable radio" : "enable radio");
992
993 priv->isr_stats.rfkill++;
994
995 /* driver only loads ucode once setting the interface up.
996 * the driver allows loading the ucode even if the radio
997 * is killed. Hence update the killswitch state here. The
998 * rfkill handler will care about restarting if needed.
999 */
1000 if (!test_bit(STATUS_ALIVE, &priv->status)) {
1001 if (hw_rf_kill)
1002 set_bit(STATUS_RF_KILL_HW, &priv->status);
1003 else
1004 clear_bit(STATUS_RF_KILL_HW, &priv->status);
1005 wiphy_rfkill_set_hw_state(priv->hw->wiphy, hw_rf_kill);
1006 }
1007
1008 handled |= CSR_INT_BIT_RF_KILL;
1009 }
1010
1011 /* Chip got too hot and stopped itself */
1012 if (inta & CSR_INT_BIT_CT_KILL) {
1013 IWL_ERR(priv, "Microcode CT kill error detected.\n");
1014 priv->isr_stats.ctkill++;
1015 handled |= CSR_INT_BIT_CT_KILL;
1016 }
1017
1018 /* Error detected by uCode */
1019 if (inta & CSR_INT_BIT_SW_ERR) {
1020 IWL_ERR(priv, "Microcode SW error detected. "
1021 " Restarting 0x%X.\n", inta);
1022 priv->isr_stats.sw++;
1023 iwl_legacy_irq_handle_error(priv);
1024 handled |= CSR_INT_BIT_SW_ERR;
1025 }
1026
1027 /*
1028 * uCode wakes up after power-down sleep.
1029 * Tell device about any new tx or host commands enqueued,
1030 * and about any Rx buffers made available while asleep.
1031 */
1032 if (inta & CSR_INT_BIT_WAKEUP) {
1033 IWL_DEBUG_ISR(priv, "Wakeup interrupt\n");
1034 iwl_legacy_rx_queue_update_write_ptr(priv, &priv->rxq);
1035 for (i = 0; i < priv->hw_params.max_txq_num; i++)
1036 iwl_legacy_txq_update_write_ptr(priv, &priv->txq[i]);
1037 priv->isr_stats.wakeup++;
1038 handled |= CSR_INT_BIT_WAKEUP;
1039 }
1040
1041 /* All uCode command responses, including Tx command responses,
1042 * Rx "responses" (frame-received notification), and other
1043 * notifications from uCode come through here*/
1044 if (inta & (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX)) {
1045 iwl4965_rx_handle(priv);
1046 priv->isr_stats.rx++;
1047 handled |= (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX);
1048 }
1049
1050 /* This "Tx" DMA channel is used only for loading uCode */
1051 if (inta & CSR_INT_BIT_FH_TX) {
1052 IWL_DEBUG_ISR(priv, "uCode load interrupt\n");
1053 priv->isr_stats.tx++;
1054 handled |= CSR_INT_BIT_FH_TX;
1055 /* Wake up uCode load routine, now that load is complete */
1056 priv->ucode_write_complete = 1;
1057 wake_up_interruptible(&priv->wait_command_queue);
1058 }
1059
1060 if (inta & ~handled) {
1061 IWL_ERR(priv, "Unhandled INTA bits 0x%08x\n", inta & ~handled);
1062 priv->isr_stats.unhandled++;
1063 }
1064
1065 if (inta & ~(priv->inta_mask)) {
1066 IWL_WARN(priv, "Disabled INTA bits 0x%08x were pending\n",
1067 inta & ~priv->inta_mask);
1068 IWL_WARN(priv, " with FH_INT = 0x%08x\n", inta_fh);
1069 }
1070
1071 /* Re-enable all interrupts */
1072 /* only Re-enable if diabled by irq */
1073 if (test_bit(STATUS_INT_ENABLED, &priv->status))
1074 iwl_legacy_enable_interrupts(priv);
1075
1076#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
1077 if (iwl_legacy_get_debug_level(priv) & (IWL_DL_ISR)) {
1078 inta = iwl_read32(priv, CSR_INT);
1079 inta_mask = iwl_read32(priv, CSR_INT_MASK);
1080 inta_fh = iwl_read32(priv, CSR_FH_INT_STATUS);
1081 IWL_DEBUG_ISR(priv,
1082 "End inta 0x%08x, enabled 0x%08x, fh 0x%08x, "
1083 "flags 0x%08lx\n", inta, inta_mask, inta_fh, flags);
1084 }
1085#endif
1086}
1087
1088/*****************************************************************************
1089 *
1090 * sysfs attributes
1091 *
1092 *****************************************************************************/
1093
1094#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
1095
1096/*
1097 * The following adds a new attribute to the sysfs representation
1098 * of this device driver (i.e. a new file in /sys/class/net/wlan0/device/)
1099 * used for controlling the debug level.
1100 *
1101 * See the level definitions in iwl for details.
1102 *
1103 * The debug_level being managed using sysfs below is a per device debug
1104 * level that is used instead of the global debug level if it (the per
1105 * device debug level) is set.
1106 */
1107static ssize_t iwl4965_show_debug_level(struct device *d,
1108 struct device_attribute *attr, char *buf)
1109{
1110 struct iwl_priv *priv = dev_get_drvdata(d);
1111 return sprintf(buf, "0x%08X\n", iwl_legacy_get_debug_level(priv));
1112}
1113static ssize_t iwl4965_store_debug_level(struct device *d,
1114 struct device_attribute *attr,
1115 const char *buf, size_t count)
1116{
1117 struct iwl_priv *priv = dev_get_drvdata(d);
1118 unsigned long val;
1119 int ret;
1120
1121 ret = strict_strtoul(buf, 0, &val);
1122 if (ret)
1123 IWL_ERR(priv, "%s is not in hex or decimal form.\n", buf);
1124 else {
1125 priv->debug_level = val;
1126 if (iwl_legacy_alloc_traffic_mem(priv))
1127 IWL_ERR(priv,
1128 "Not enough memory to generate traffic log\n");
1129 }
1130 return strnlen(buf, count);
1131}
1132
1133static DEVICE_ATTR(debug_level, S_IWUSR | S_IRUGO,
1134 iwl4965_show_debug_level, iwl4965_store_debug_level);
1135
1136
1137#endif /* CONFIG_IWLWIFI_LEGACY_DEBUG */
1138
1139
1140static ssize_t iwl4965_show_temperature(struct device *d,
1141 struct device_attribute *attr, char *buf)
1142{
1143 struct iwl_priv *priv = dev_get_drvdata(d);
1144
1145 if (!iwl_legacy_is_alive(priv))
1146 return -EAGAIN;
1147
1148 return sprintf(buf, "%d\n", priv->temperature);
1149}
1150
1151static DEVICE_ATTR(temperature, S_IRUGO, iwl4965_show_temperature, NULL);
1152
1153static ssize_t iwl4965_show_tx_power(struct device *d,
1154 struct device_attribute *attr, char *buf)
1155{
1156 struct iwl_priv *priv = dev_get_drvdata(d);
1157
1158 if (!iwl_legacy_is_ready_rf(priv))
1159 return sprintf(buf, "off\n");
1160 else
1161 return sprintf(buf, "%d\n", priv->tx_power_user_lmt);
1162}
1163
1164static ssize_t iwl4965_store_tx_power(struct device *d,
1165 struct device_attribute *attr,
1166 const char *buf, size_t count)
1167{
1168 struct iwl_priv *priv = dev_get_drvdata(d);
1169 unsigned long val;
1170 int ret;
1171
1172 ret = strict_strtoul(buf, 10, &val);
1173 if (ret)
1174 IWL_INFO(priv, "%s is not in decimal form.\n", buf);
1175 else {
1176 ret = iwl_legacy_set_tx_power(priv, val, false);
1177 if (ret)
1178 IWL_ERR(priv, "failed setting tx power (0x%d).\n",
1179 ret);
1180 else
1181 ret = count;
1182 }
1183 return ret;
1184}
1185
1186static DEVICE_ATTR(tx_power, S_IWUSR | S_IRUGO,
1187 iwl4965_show_tx_power, iwl4965_store_tx_power);
1188
1189static struct attribute *iwl_sysfs_entries[] = {
1190 &dev_attr_temperature.attr,
1191 &dev_attr_tx_power.attr,
1192#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
1193 &dev_attr_debug_level.attr,
1194#endif
1195 NULL
1196};
1197
1198static struct attribute_group iwl_attribute_group = {
1199 .name = NULL, /* put in device directory */
1200 .attrs = iwl_sysfs_entries,
1201};
1202
1203/******************************************************************************
1204 *
1205 * uCode download functions
1206 *
1207 ******************************************************************************/
1208
1209static void iwl4965_dealloc_ucode_pci(struct iwl_priv *priv)
1210{
1211 iwl_legacy_free_fw_desc(priv->pci_dev, &priv->ucode_code);
1212 iwl_legacy_free_fw_desc(priv->pci_dev, &priv->ucode_data);
1213 iwl_legacy_free_fw_desc(priv->pci_dev, &priv->ucode_data_backup);
1214 iwl_legacy_free_fw_desc(priv->pci_dev, &priv->ucode_init);
1215 iwl_legacy_free_fw_desc(priv->pci_dev, &priv->ucode_init_data);
1216 iwl_legacy_free_fw_desc(priv->pci_dev, &priv->ucode_boot);
1217}
1218
1219static void iwl4965_nic_start(struct iwl_priv *priv)
1220{
1221 /* Remove all resets to allow NIC to operate */
1222 iwl_write32(priv, CSR_RESET, 0);
1223}
1224
1225static void iwl4965_ucode_callback(const struct firmware *ucode_raw,
1226 void *context);
1227static int iwl4965_mac_setup_register(struct iwl_priv *priv,
1228 u32 max_probe_length);
1229
1230static int __must_check iwl4965_request_firmware(struct iwl_priv *priv, bool first)
1231{
1232 const char *name_pre = priv->cfg->fw_name_pre;
1233 char tag[8];
1234
1235 if (first) {
1236 priv->fw_index = priv->cfg->ucode_api_max;
1237 sprintf(tag, "%d", priv->fw_index);
1238 } else {
1239 priv->fw_index--;
1240 sprintf(tag, "%d", priv->fw_index);
1241 }
1242
1243 if (priv->fw_index < priv->cfg->ucode_api_min) {
1244 IWL_ERR(priv, "no suitable firmware found!\n");
1245 return -ENOENT;
1246 }
1247
1248 sprintf(priv->firmware_name, "%s%s%s", name_pre, tag, ".ucode");
1249
1250 IWL_DEBUG_INFO(priv, "attempting to load firmware '%s'\n",
1251 priv->firmware_name);
1252
1253 return request_firmware_nowait(THIS_MODULE, 1, priv->firmware_name,
1254 &priv->pci_dev->dev, GFP_KERNEL, priv,
1255 iwl4965_ucode_callback);
1256}
1257
1258struct iwl4965_firmware_pieces {
1259 const void *inst, *data, *init, *init_data, *boot;
1260 size_t inst_size, data_size, init_size, init_data_size, boot_size;
1261};
1262
1263static int iwl4965_load_firmware(struct iwl_priv *priv,
1264 const struct firmware *ucode_raw,
1265 struct iwl4965_firmware_pieces *pieces)
1266{
1267 struct iwl_ucode_header *ucode = (void *)ucode_raw->data;
1268 u32 api_ver, hdr_size;
1269 const u8 *src;
1270
1271 priv->ucode_ver = le32_to_cpu(ucode->ver);
1272 api_ver = IWL_UCODE_API(priv->ucode_ver);
1273
1274 switch (api_ver) {
1275 default:
1276 case 0:
1277 case 1:
1278 case 2:
1279 hdr_size = 24;
1280 if (ucode_raw->size < hdr_size) {
1281 IWL_ERR(priv, "File size too small!\n");
1282 return -EINVAL;
1283 }
1284 pieces->inst_size = le32_to_cpu(ucode->v1.inst_size);
1285 pieces->data_size = le32_to_cpu(ucode->v1.data_size);
1286 pieces->init_size = le32_to_cpu(ucode->v1.init_size);
1287 pieces->init_data_size =
1288 le32_to_cpu(ucode->v1.init_data_size);
1289 pieces->boot_size = le32_to_cpu(ucode->v1.boot_size);
1290 src = ucode->v1.data;
1291 break;
1292 }
1293
1294 /* Verify size of file vs. image size info in file's header */
1295 if (ucode_raw->size != hdr_size + pieces->inst_size +
1296 pieces->data_size + pieces->init_size +
1297 pieces->init_data_size + pieces->boot_size) {
1298
1299 IWL_ERR(priv,
1300 "uCode file size %d does not match expected size\n",
1301 (int)ucode_raw->size);
1302 return -EINVAL;
1303 }
1304
1305 pieces->inst = src;
1306 src += pieces->inst_size;
1307 pieces->data = src;
1308 src += pieces->data_size;
1309 pieces->init = src;
1310 src += pieces->init_size;
1311 pieces->init_data = src;
1312 src += pieces->init_data_size;
1313 pieces->boot = src;
1314 src += pieces->boot_size;
1315
1316 return 0;
1317}
1318
1319/**
1320 * iwl4965_ucode_callback - callback when firmware was loaded
1321 *
1322 * If loaded successfully, copies the firmware into buffers
1323 * for the card to fetch (via DMA).
1324 */
1325static void
1326iwl4965_ucode_callback(const struct firmware *ucode_raw, void *context)
1327{
1328 struct iwl_priv *priv = context;
1329 struct iwl_ucode_header *ucode;
1330 int err;
1331 struct iwl4965_firmware_pieces pieces;
1332 const unsigned int api_max = priv->cfg->ucode_api_max;
1333 const unsigned int api_min = priv->cfg->ucode_api_min;
1334 u32 api_ver;
1335
1336 u32 max_probe_length = 200;
1337 u32 standard_phy_calibration_size =
1338 IWL_DEFAULT_STANDARD_PHY_CALIBRATE_TBL_SIZE;
1339
1340 memset(&pieces, 0, sizeof(pieces));
1341
1342 if (!ucode_raw) {
1343 if (priv->fw_index <= priv->cfg->ucode_api_max)
1344 IWL_ERR(priv,
1345 "request for firmware file '%s' failed.\n",
1346 priv->firmware_name);
1347 goto try_again;
1348 }
1349
1350 IWL_DEBUG_INFO(priv, "Loaded firmware file '%s' (%zd bytes).\n",
1351 priv->firmware_name, ucode_raw->size);
1352
1353 /* Make sure that we got at least the API version number */
1354 if (ucode_raw->size < 4) {
1355 IWL_ERR(priv, "File size way too small!\n");
1356 goto try_again;
1357 }
1358
1359 /* Data from ucode file: header followed by uCode images */
1360 ucode = (struct iwl_ucode_header *)ucode_raw->data;
1361
1362 err = iwl4965_load_firmware(priv, ucode_raw, &pieces);
1363
1364 if (err)
1365 goto try_again;
1366
1367 api_ver = IWL_UCODE_API(priv->ucode_ver);
1368
1369 /*
1370 * api_ver should match the api version forming part of the
1371 * firmware filename ... but we don't check for that and only rely
1372 * on the API version read from firmware header from here on forward
1373 */
1374 if (api_ver < api_min || api_ver > api_max) {
1375 IWL_ERR(priv,
1376 "Driver unable to support your firmware API. "
1377 "Driver supports v%u, firmware is v%u.\n",
1378 api_max, api_ver);
1379 goto try_again;
1380 }
1381
1382 if (api_ver != api_max)
1383 IWL_ERR(priv,
1384 "Firmware has old API version. Expected v%u, "
1385 "got v%u. New firmware can be obtained "
1386 "from http://www.intellinuxwireless.org.\n",
1387 api_max, api_ver);
1388
1389 IWL_INFO(priv, "loaded firmware version %u.%u.%u.%u\n",
1390 IWL_UCODE_MAJOR(priv->ucode_ver),
1391 IWL_UCODE_MINOR(priv->ucode_ver),
1392 IWL_UCODE_API(priv->ucode_ver),
1393 IWL_UCODE_SERIAL(priv->ucode_ver));
1394
1395 snprintf(priv->hw->wiphy->fw_version,
1396 sizeof(priv->hw->wiphy->fw_version),
1397 "%u.%u.%u.%u",
1398 IWL_UCODE_MAJOR(priv->ucode_ver),
1399 IWL_UCODE_MINOR(priv->ucode_ver),
1400 IWL_UCODE_API(priv->ucode_ver),
1401 IWL_UCODE_SERIAL(priv->ucode_ver));
1402
1403 /*
1404 * For any of the failures below (before allocating pci memory)
1405 * we will try to load a version with a smaller API -- maybe the
1406 * user just got a corrupted version of the latest API.
1407 */
1408
1409 IWL_DEBUG_INFO(priv, "f/w package hdr ucode version raw = 0x%x\n",
1410 priv->ucode_ver);
1411 IWL_DEBUG_INFO(priv, "f/w package hdr runtime inst size = %Zd\n",
1412 pieces.inst_size);
1413 IWL_DEBUG_INFO(priv, "f/w package hdr runtime data size = %Zd\n",
1414 pieces.data_size);
1415 IWL_DEBUG_INFO(priv, "f/w package hdr init inst size = %Zd\n",
1416 pieces.init_size);
1417 IWL_DEBUG_INFO(priv, "f/w package hdr init data size = %Zd\n",
1418 pieces.init_data_size);
1419 IWL_DEBUG_INFO(priv, "f/w package hdr boot inst size = %Zd\n",
1420 pieces.boot_size);
1421
1422 /* Verify that uCode images will fit in card's SRAM */
1423 if (pieces.inst_size > priv->hw_params.max_inst_size) {
1424 IWL_ERR(priv, "uCode instr len %Zd too large to fit in\n",
1425 pieces.inst_size);
1426 goto try_again;
1427 }
1428
1429 if (pieces.data_size > priv->hw_params.max_data_size) {
1430 IWL_ERR(priv, "uCode data len %Zd too large to fit in\n",
1431 pieces.data_size);
1432 goto try_again;
1433 }
1434
1435 if (pieces.init_size > priv->hw_params.max_inst_size) {
1436 IWL_ERR(priv, "uCode init instr len %Zd too large to fit in\n",
1437 pieces.init_size);
1438 goto try_again;
1439 }
1440
1441 if (pieces.init_data_size > priv->hw_params.max_data_size) {
1442 IWL_ERR(priv, "uCode init data len %Zd too large to fit in\n",
1443 pieces.init_data_size);
1444 goto try_again;
1445 }
1446
1447 if (pieces.boot_size > priv->hw_params.max_bsm_size) {
1448 IWL_ERR(priv, "uCode boot instr len %Zd too large to fit in\n",
1449 pieces.boot_size);
1450 goto try_again;
1451 }
1452
1453 /* Allocate ucode buffers for card's bus-master loading ... */
1454
1455 /* Runtime instructions and 2 copies of data:
1456 * 1) unmodified from disk
1457 * 2) backup cache for save/restore during power-downs */
1458 priv->ucode_code.len = pieces.inst_size;
1459 iwl_legacy_alloc_fw_desc(priv->pci_dev, &priv->ucode_code);
1460
1461 priv->ucode_data.len = pieces.data_size;
1462 iwl_legacy_alloc_fw_desc(priv->pci_dev, &priv->ucode_data);
1463
1464 priv->ucode_data_backup.len = pieces.data_size;
1465 iwl_legacy_alloc_fw_desc(priv->pci_dev, &priv->ucode_data_backup);
1466
1467 if (!priv->ucode_code.v_addr || !priv->ucode_data.v_addr ||
1468 !priv->ucode_data_backup.v_addr)
1469 goto err_pci_alloc;
1470
1471 /* Initialization instructions and data */
1472 if (pieces.init_size && pieces.init_data_size) {
1473 priv->ucode_init.len = pieces.init_size;
1474 iwl_legacy_alloc_fw_desc(priv->pci_dev, &priv->ucode_init);
1475
1476 priv->ucode_init_data.len = pieces.init_data_size;
1477 iwl_legacy_alloc_fw_desc(priv->pci_dev, &priv->ucode_init_data);
1478
1479 if (!priv->ucode_init.v_addr || !priv->ucode_init_data.v_addr)
1480 goto err_pci_alloc;
1481 }
1482
1483 /* Bootstrap (instructions only, no data) */
1484 if (pieces.boot_size) {
1485 priv->ucode_boot.len = pieces.boot_size;
1486 iwl_legacy_alloc_fw_desc(priv->pci_dev, &priv->ucode_boot);
1487
1488 if (!priv->ucode_boot.v_addr)
1489 goto err_pci_alloc;
1490 }
1491
1492 /* Now that we can no longer fail, copy information */
1493
1494 priv->sta_key_max_num = STA_KEY_MAX_NUM;
1495
1496 /* Copy images into buffers for card's bus-master reads ... */
1497
1498 /* Runtime instructions (first block of data in file) */
1499 IWL_DEBUG_INFO(priv, "Copying (but not loading) uCode instr len %Zd\n",
1500 pieces.inst_size);
1501 memcpy(priv->ucode_code.v_addr, pieces.inst, pieces.inst_size);
1502
1503 IWL_DEBUG_INFO(priv, "uCode instr buf vaddr = 0x%p, paddr = 0x%08x\n",
1504 priv->ucode_code.v_addr, (u32)priv->ucode_code.p_addr);
1505
1506 /*
1507 * Runtime data
1508 * NOTE: Copy into backup buffer will be done in iwl_up()
1509 */
1510 IWL_DEBUG_INFO(priv, "Copying (but not loading) uCode data len %Zd\n",
1511 pieces.data_size);
1512 memcpy(priv->ucode_data.v_addr, pieces.data, pieces.data_size);
1513 memcpy(priv->ucode_data_backup.v_addr, pieces.data, pieces.data_size);
1514
1515 /* Initialization instructions */
1516 if (pieces.init_size) {
1517 IWL_DEBUG_INFO(priv,
1518 "Copying (but not loading) init instr len %Zd\n",
1519 pieces.init_size);
1520 memcpy(priv->ucode_init.v_addr, pieces.init, pieces.init_size);
1521 }
1522
1523 /* Initialization data */
1524 if (pieces.init_data_size) {
1525 IWL_DEBUG_INFO(priv,
1526 "Copying (but not loading) init data len %Zd\n",
1527 pieces.init_data_size);
1528 memcpy(priv->ucode_init_data.v_addr, pieces.init_data,
1529 pieces.init_data_size);
1530 }
1531
1532 /* Bootstrap instructions */
1533 IWL_DEBUG_INFO(priv, "Copying (but not loading) boot instr len %Zd\n",
1534 pieces.boot_size);
1535 memcpy(priv->ucode_boot.v_addr, pieces.boot, pieces.boot_size);
1536
1537 /*
1538 * figure out the offset of chain noise reset and gain commands
1539 * base on the size of standard phy calibration commands table size
1540 */
1541 priv->_4965.phy_calib_chain_noise_reset_cmd =
1542 standard_phy_calibration_size;
1543 priv->_4965.phy_calib_chain_noise_gain_cmd =
1544 standard_phy_calibration_size + 1;
1545
1546 /**************************************************
1547 * This is still part of probe() in a sense...
1548 *
1549 * 9. Setup and register with mac80211 and debugfs
1550 **************************************************/
1551 err = iwl4965_mac_setup_register(priv, max_probe_length);
1552 if (err)
1553 goto out_unbind;
1554
1555 err = iwl_legacy_dbgfs_register(priv, DRV_NAME);
1556 if (err)
1557 IWL_ERR(priv,
1558 "failed to create debugfs files. Ignoring error: %d\n", err);
1559
1560 err = sysfs_create_group(&priv->pci_dev->dev.kobj,
1561 &iwl_attribute_group);
1562 if (err) {
1563 IWL_ERR(priv, "failed to create sysfs device attributes\n");
1564 goto out_unbind;
1565 }
1566
1567 /* We have our copies now, allow OS release its copies */
1568 release_firmware(ucode_raw);
1569 complete(&priv->_4965.firmware_loading_complete);
1570 return;
1571
1572 try_again:
1573 /* try next, if any */
1574 if (iwl4965_request_firmware(priv, false))
1575 goto out_unbind;
1576 release_firmware(ucode_raw);
1577 return;
1578
1579 err_pci_alloc:
1580 IWL_ERR(priv, "failed to allocate pci memory\n");
1581 iwl4965_dealloc_ucode_pci(priv);
1582 out_unbind:
1583 complete(&priv->_4965.firmware_loading_complete);
1584 device_release_driver(&priv->pci_dev->dev);
1585 release_firmware(ucode_raw);
1586}
1587
1588static const char * const desc_lookup_text[] = {
1589 "OK",
1590 "FAIL",
1591 "BAD_PARAM",
1592 "BAD_CHECKSUM",
1593 "NMI_INTERRUPT_WDG",
1594 "SYSASSERT",
1595 "FATAL_ERROR",
1596 "BAD_COMMAND",
1597 "HW_ERROR_TUNE_LOCK",
1598 "HW_ERROR_TEMPERATURE",
1599 "ILLEGAL_CHAN_FREQ",
1600 "VCC_NOT_STABLE",
1601 "FH_ERROR",
1602 "NMI_INTERRUPT_HOST",
1603 "NMI_INTERRUPT_ACTION_PT",
1604 "NMI_INTERRUPT_UNKNOWN",
1605 "UCODE_VERSION_MISMATCH",
1606 "HW_ERROR_ABS_LOCK",
1607 "HW_ERROR_CAL_LOCK_FAIL",
1608 "NMI_INTERRUPT_INST_ACTION_PT",
1609 "NMI_INTERRUPT_DATA_ACTION_PT",
1610 "NMI_TRM_HW_ER",
1611 "NMI_INTERRUPT_TRM",
1612 "NMI_INTERRUPT_BREAK_POINT"
1613 "DEBUG_0",
1614 "DEBUG_1",
1615 "DEBUG_2",
1616 "DEBUG_3",
1617};
1618
1619static struct { char *name; u8 num; } advanced_lookup[] = {
1620 { "NMI_INTERRUPT_WDG", 0x34 },
1621 { "SYSASSERT", 0x35 },
1622 { "UCODE_VERSION_MISMATCH", 0x37 },
1623 { "BAD_COMMAND", 0x38 },
1624 { "NMI_INTERRUPT_DATA_ACTION_PT", 0x3C },
1625 { "FATAL_ERROR", 0x3D },
1626 { "NMI_TRM_HW_ERR", 0x46 },
1627 { "NMI_INTERRUPT_TRM", 0x4C },
1628 { "NMI_INTERRUPT_BREAK_POINT", 0x54 },
1629 { "NMI_INTERRUPT_WDG_RXF_FULL", 0x5C },
1630 { "NMI_INTERRUPT_WDG_NO_RBD_RXF_FULL", 0x64 },
1631 { "NMI_INTERRUPT_HOST", 0x66 },
1632 { "NMI_INTERRUPT_ACTION_PT", 0x7C },
1633 { "NMI_INTERRUPT_UNKNOWN", 0x84 },
1634 { "NMI_INTERRUPT_INST_ACTION_PT", 0x86 },
1635 { "ADVANCED_SYSASSERT", 0 },
1636};
1637
1638static const char *iwl4965_desc_lookup(u32 num)
1639{
1640 int i;
1641 int max = ARRAY_SIZE(desc_lookup_text);
1642
1643 if (num < max)
1644 return desc_lookup_text[num];
1645
1646 max = ARRAY_SIZE(advanced_lookup) - 1;
1647 for (i = 0; i < max; i++) {
1648 if (advanced_lookup[i].num == num)
1649 break;
1650 }
1651 return advanced_lookup[i].name;
1652}
1653
1654#define ERROR_START_OFFSET (1 * sizeof(u32))
1655#define ERROR_ELEM_SIZE (7 * sizeof(u32))
1656
1657void iwl4965_dump_nic_error_log(struct iwl_priv *priv)
1658{
1659 u32 data2, line;
1660 u32 desc, time, count, base, data1;
1661 u32 blink1, blink2, ilink1, ilink2;
1662 u32 pc, hcmd;
1663
1664 if (priv->ucode_type == UCODE_INIT) {
1665 base = le32_to_cpu(priv->card_alive_init.error_event_table_ptr);
1666 } else {
1667 base = le32_to_cpu(priv->card_alive.error_event_table_ptr);
1668 }
1669
1670 if (!priv->cfg->ops->lib->is_valid_rtc_data_addr(base)) {
1671 IWL_ERR(priv,
1672 "Not valid error log pointer 0x%08X for %s uCode\n",
1673 base, (priv->ucode_type == UCODE_INIT) ? "Init" : "RT");
1674 return;
1675 }
1676
1677 count = iwl_legacy_read_targ_mem(priv, base);
1678
1679 if (ERROR_START_OFFSET <= count * ERROR_ELEM_SIZE) {
1680 IWL_ERR(priv, "Start IWL Error Log Dump:\n");
1681 IWL_ERR(priv, "Status: 0x%08lX, count: %d\n",
1682 priv->status, count);
1683 }
1684
1685 desc = iwl_legacy_read_targ_mem(priv, base + 1 * sizeof(u32));
1686 priv->isr_stats.err_code = desc;
1687 pc = iwl_legacy_read_targ_mem(priv, base + 2 * sizeof(u32));
1688 blink1 = iwl_legacy_read_targ_mem(priv, base + 3 * sizeof(u32));
1689 blink2 = iwl_legacy_read_targ_mem(priv, base + 4 * sizeof(u32));
1690 ilink1 = iwl_legacy_read_targ_mem(priv, base + 5 * sizeof(u32));
1691 ilink2 = iwl_legacy_read_targ_mem(priv, base + 6 * sizeof(u32));
1692 data1 = iwl_legacy_read_targ_mem(priv, base + 7 * sizeof(u32));
1693 data2 = iwl_legacy_read_targ_mem(priv, base + 8 * sizeof(u32));
1694 line = iwl_legacy_read_targ_mem(priv, base + 9 * sizeof(u32));
1695 time = iwl_legacy_read_targ_mem(priv, base + 11 * sizeof(u32));
1696 hcmd = iwl_legacy_read_targ_mem(priv, base + 22 * sizeof(u32));
1697
1698 trace_iwlwifi_legacy_dev_ucode_error(priv, desc,
1699 time, data1, data2, line,
1700 blink1, blink2, ilink1, ilink2);
1701
1702 IWL_ERR(priv, "Desc Time "
1703 "data1 data2 line\n");
1704 IWL_ERR(priv, "%-28s (0x%04X) %010u 0x%08X 0x%08X %u\n",
1705 iwl4965_desc_lookup(desc), desc, time, data1, data2, line);
1706 IWL_ERR(priv, "pc blink1 blink2 ilink1 ilink2 hcmd\n");
1707 IWL_ERR(priv, "0x%05X 0x%05X 0x%05X 0x%05X 0x%05X 0x%05X\n",
1708 pc, blink1, blink2, ilink1, ilink2, hcmd);
1709}
1710
1711#define EVENT_START_OFFSET (4 * sizeof(u32))
1712
1713/**
1714 * iwl4965_print_event_log - Dump error event log to syslog
1715 *
1716 */
1717static int iwl4965_print_event_log(struct iwl_priv *priv, u32 start_idx,
1718 u32 num_events, u32 mode,
1719 int pos, char **buf, size_t bufsz)
1720{
1721 u32 i;
1722 u32 base; /* SRAM byte address of event log header */
1723 u32 event_size; /* 2 u32s, or 3 u32s if timestamp recorded */
1724 u32 ptr; /* SRAM byte address of log data */
1725 u32 ev, time, data; /* event log data */
1726 unsigned long reg_flags;
1727
1728 if (num_events == 0)
1729 return pos;
1730
1731 if (priv->ucode_type == UCODE_INIT) {
1732 base = le32_to_cpu(priv->card_alive_init.log_event_table_ptr);
1733 } else {
1734 base = le32_to_cpu(priv->card_alive.log_event_table_ptr);
1735 }
1736
1737 if (mode == 0)
1738 event_size = 2 * sizeof(u32);
1739 else
1740 event_size = 3 * sizeof(u32);
1741
1742 ptr = base + EVENT_START_OFFSET + (start_idx * event_size);
1743
1744 /* Make sure device is powered up for SRAM reads */
1745 spin_lock_irqsave(&priv->reg_lock, reg_flags);
1746 iwl_grab_nic_access(priv);
1747
1748 /* Set starting address; reads will auto-increment */
1749 _iwl_legacy_write_direct32(priv, HBUS_TARG_MEM_RADDR, ptr);
1750 rmb();
1751
1752 /* "time" is actually "data" for mode 0 (no timestamp).
1753 * place event id # at far right for easier visual parsing. */
1754 for (i = 0; i < num_events; i++) {
1755 ev = _iwl_legacy_read_direct32(priv, HBUS_TARG_MEM_RDAT);
1756 time = _iwl_legacy_read_direct32(priv, HBUS_TARG_MEM_RDAT);
1757 if (mode == 0) {
1758 /* data, ev */
1759 if (bufsz) {
1760 pos += scnprintf(*buf + pos, bufsz - pos,
1761 "EVT_LOG:0x%08x:%04u\n",
1762 time, ev);
1763 } else {
1764 trace_iwlwifi_legacy_dev_ucode_event(priv, 0,
1765 time, ev);
1766 IWL_ERR(priv, "EVT_LOG:0x%08x:%04u\n",
1767 time, ev);
1768 }
1769 } else {
1770 data = _iwl_legacy_read_direct32(priv,
1771 HBUS_TARG_MEM_RDAT);
1772 if (bufsz) {
1773 pos += scnprintf(*buf + pos, bufsz - pos,
1774 "EVT_LOGT:%010u:0x%08x:%04u\n",
1775 time, data, ev);
1776 } else {
1777 IWL_ERR(priv, "EVT_LOGT:%010u:0x%08x:%04u\n",
1778 time, data, ev);
1779 trace_iwlwifi_legacy_dev_ucode_event(priv, time,
1780 data, ev);
1781 }
1782 }
1783 }
1784
1785 /* Allow device to power down */
1786 iwl_release_nic_access(priv);
1787 spin_unlock_irqrestore(&priv->reg_lock, reg_flags);
1788 return pos;
1789}
1790
1791/**
1792 * iwl4965_print_last_event_logs - Dump the newest # of event log to syslog
1793 */
1794static int iwl4965_print_last_event_logs(struct iwl_priv *priv, u32 capacity,
1795 u32 num_wraps, u32 next_entry,
1796 u32 size, u32 mode,
1797 int pos, char **buf, size_t bufsz)
1798{
1799 /*
1800 * display the newest DEFAULT_LOG_ENTRIES entries
1801 * i.e the entries just before the next ont that uCode would fill.
1802 */
1803 if (num_wraps) {
1804 if (next_entry < size) {
1805 pos = iwl4965_print_event_log(priv,
1806 capacity - (size - next_entry),
1807 size - next_entry, mode,
1808 pos, buf, bufsz);
1809 pos = iwl4965_print_event_log(priv, 0,
1810 next_entry, mode,
1811 pos, buf, bufsz);
1812 } else
1813 pos = iwl4965_print_event_log(priv, next_entry - size,
1814 size, mode, pos, buf, bufsz);
1815 } else {
1816 if (next_entry < size) {
1817 pos = iwl4965_print_event_log(priv, 0, next_entry,
1818 mode, pos, buf, bufsz);
1819 } else {
1820 pos = iwl4965_print_event_log(priv, next_entry - size,
1821 size, mode, pos, buf, bufsz);
1822 }
1823 }
1824 return pos;
1825}
1826
1827#define DEFAULT_DUMP_EVENT_LOG_ENTRIES (20)
1828
1829int iwl4965_dump_nic_event_log(struct iwl_priv *priv, bool full_log,
1830 char **buf, bool display)
1831{
1832 u32 base; /* SRAM byte address of event log header */
1833 u32 capacity; /* event log capacity in # entries */
1834 u32 mode; /* 0 - no timestamp, 1 - timestamp recorded */
1835 u32 num_wraps; /* # times uCode wrapped to top of log */
1836 u32 next_entry; /* index of next entry to be written by uCode */
1837 u32 size; /* # entries that we'll print */
1838 int pos = 0;
1839 size_t bufsz = 0;
1840
1841 if (priv->ucode_type == UCODE_INIT) {
1842 base = le32_to_cpu(priv->card_alive_init.log_event_table_ptr);
1843 } else {
1844 base = le32_to_cpu(priv->card_alive.log_event_table_ptr);
1845 }
1846
1847 if (!priv->cfg->ops->lib->is_valid_rtc_data_addr(base)) {
1848 IWL_ERR(priv,
1849 "Invalid event log pointer 0x%08X for %s uCode\n",
1850 base, (priv->ucode_type == UCODE_INIT) ? "Init" : "RT");
1851 return -EINVAL;
1852 }
1853
1854 /* event log header */
1855 capacity = iwl_legacy_read_targ_mem(priv, base);
1856 mode = iwl_legacy_read_targ_mem(priv, base + (1 * sizeof(u32)));
1857 num_wraps = iwl_legacy_read_targ_mem(priv, base + (2 * sizeof(u32)));
1858 next_entry = iwl_legacy_read_targ_mem(priv, base + (3 * sizeof(u32)));
1859
1860 size = num_wraps ? capacity : next_entry;
1861
1862 /* bail out if nothing in log */
1863 if (size == 0) {
1864 IWL_ERR(priv, "Start IWL Event Log Dump: nothing in log\n");
1865 return pos;
1866 }
1867
1868#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
1869 if (!(iwl_legacy_get_debug_level(priv) & IWL_DL_FW_ERRORS) && !full_log)
1870 size = (size > DEFAULT_DUMP_EVENT_LOG_ENTRIES)
1871 ? DEFAULT_DUMP_EVENT_LOG_ENTRIES : size;
1872#else
1873 size = (size > DEFAULT_DUMP_EVENT_LOG_ENTRIES)
1874 ? DEFAULT_DUMP_EVENT_LOG_ENTRIES : size;
1875#endif
1876 IWL_ERR(priv, "Start IWL Event Log Dump: display last %u entries\n",
1877 size);
1878
1879#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
1880 if (display) {
1881 if (full_log)
1882 bufsz = capacity * 48;
1883 else
1884 bufsz = size * 48;
1885 *buf = kmalloc(bufsz, GFP_KERNEL);
1886 if (!*buf)
1887 return -ENOMEM;
1888 }
1889 if ((iwl_legacy_get_debug_level(priv) & IWL_DL_FW_ERRORS) || full_log) {
1890 /*
1891 * if uCode has wrapped back to top of log,
1892 * start at the oldest entry,
1893 * i.e the next one that uCode would fill.
1894 */
1895 if (num_wraps)
1896 pos = iwl4965_print_event_log(priv, next_entry,
1897 capacity - next_entry, mode,
1898 pos, buf, bufsz);
1899 /* (then/else) start at top of log */
1900 pos = iwl4965_print_event_log(priv, 0,
1901 next_entry, mode, pos, buf, bufsz);
1902 } else
1903 pos = iwl4965_print_last_event_logs(priv, capacity, num_wraps,
1904 next_entry, size, mode,
1905 pos, buf, bufsz);
1906#else
1907 pos = iwl4965_print_last_event_logs(priv, capacity, num_wraps,
1908 next_entry, size, mode,
1909 pos, buf, bufsz);
1910#endif
1911 return pos;
1912}
1913
1914static void iwl4965_rf_kill_ct_config(struct iwl_priv *priv)
1915{
1916 struct iwl_ct_kill_config cmd;
1917 unsigned long flags;
1918 int ret = 0;
1919
1920 spin_lock_irqsave(&priv->lock, flags);
1921 iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR,
1922 CSR_UCODE_DRV_GP1_REG_BIT_CT_KILL_EXIT);
1923 spin_unlock_irqrestore(&priv->lock, flags);
1924
1925 cmd.critical_temperature_R =
1926 cpu_to_le32(priv->hw_params.ct_kill_threshold);
1927
1928 ret = iwl_legacy_send_cmd_pdu(priv, REPLY_CT_KILL_CONFIG_CMD,
1929 sizeof(cmd), &cmd);
1930 if (ret)
1931 IWL_ERR(priv, "REPLY_CT_KILL_CONFIG_CMD failed\n");
1932 else
1933 IWL_DEBUG_INFO(priv, "REPLY_CT_KILL_CONFIG_CMD "
1934 "succeeded, "
1935 "critical temperature is %d\n",
1936 priv->hw_params.ct_kill_threshold);
1937}
1938
1939static const s8 default_queue_to_tx_fifo[] = {
1940 IWL_TX_FIFO_VO,
1941 IWL_TX_FIFO_VI,
1942 IWL_TX_FIFO_BE,
1943 IWL_TX_FIFO_BK,
1944 IWL49_CMD_FIFO_NUM,
1945 IWL_TX_FIFO_UNUSED,
1946 IWL_TX_FIFO_UNUSED,
1947};
1948
1949static int iwl4965_alive_notify(struct iwl_priv *priv)
1950{
1951 u32 a;
1952 unsigned long flags;
1953 int i, chan;
1954 u32 reg_val;
1955
1956 spin_lock_irqsave(&priv->lock, flags);
1957
1958 /* Clear 4965's internal Tx Scheduler data base */
1959 priv->scd_base_addr = iwl_legacy_read_prph(priv,
1960 IWL49_SCD_SRAM_BASE_ADDR);
1961 a = priv->scd_base_addr + IWL49_SCD_CONTEXT_DATA_OFFSET;
1962 for (; a < priv->scd_base_addr + IWL49_SCD_TX_STTS_BITMAP_OFFSET; a += 4)
1963 iwl_legacy_write_targ_mem(priv, a, 0);
1964 for (; a < priv->scd_base_addr + IWL49_SCD_TRANSLATE_TBL_OFFSET; a += 4)
1965 iwl_legacy_write_targ_mem(priv, a, 0);
1966 for (; a < priv->scd_base_addr +
1967 IWL49_SCD_TRANSLATE_TBL_OFFSET_QUEUE(priv->hw_params.max_txq_num); a += 4)
1968 iwl_legacy_write_targ_mem(priv, a, 0);
1969
1970 /* Tel 4965 where to find Tx byte count tables */
1971 iwl_legacy_write_prph(priv, IWL49_SCD_DRAM_BASE_ADDR,
1972 priv->scd_bc_tbls.dma >> 10);
1973
1974 /* Enable DMA channel */
1975 for (chan = 0; chan < FH49_TCSR_CHNL_NUM ; chan++)
1976 iwl_legacy_write_direct32(priv,
1977 FH_TCSR_CHNL_TX_CONFIG_REG(chan),
1978 FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE |
1979 FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE);
1980
1981 /* Update FH chicken bits */
1982 reg_val = iwl_legacy_read_direct32(priv, FH_TX_CHICKEN_BITS_REG);
1983 iwl_legacy_write_direct32(priv, FH_TX_CHICKEN_BITS_REG,
1984 reg_val | FH_TX_CHICKEN_BITS_SCD_AUTO_RETRY_EN);
1985
1986 /* Disable chain mode for all queues */
1987 iwl_legacy_write_prph(priv, IWL49_SCD_QUEUECHAIN_SEL, 0);
1988
1989 /* Initialize each Tx queue (including the command queue) */
1990 for (i = 0; i < priv->hw_params.max_txq_num; i++) {
1991
1992 /* TFD circular buffer read/write indexes */
1993 iwl_legacy_write_prph(priv, IWL49_SCD_QUEUE_RDPTR(i), 0);
1994 iwl_legacy_write_direct32(priv, HBUS_TARG_WRPTR, 0 | (i << 8));
1995
1996 /* Max Tx Window size for Scheduler-ACK mode */
1997 iwl_legacy_write_targ_mem(priv, priv->scd_base_addr +
1998 IWL49_SCD_CONTEXT_QUEUE_OFFSET(i),
1999 (SCD_WIN_SIZE <<
2000 IWL49_SCD_QUEUE_CTX_REG1_WIN_SIZE_POS) &
2001 IWL49_SCD_QUEUE_CTX_REG1_WIN_SIZE_MSK);
2002
2003 /* Frame limit */
2004 iwl_legacy_write_targ_mem(priv, priv->scd_base_addr +
2005 IWL49_SCD_CONTEXT_QUEUE_OFFSET(i) +
2006 sizeof(u32),
2007 (SCD_FRAME_LIMIT <<
2008 IWL49_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS) &
2009 IWL49_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK);
2010
2011 }
2012 iwl_legacy_write_prph(priv, IWL49_SCD_INTERRUPT_MASK,
2013 (1 << priv->hw_params.max_txq_num) - 1);
2014
2015 /* Activate all Tx DMA/FIFO channels */
2016 iwl4965_txq_set_sched(priv, IWL_MASK(0, 6));
2017
2018 iwl4965_set_wr_ptrs(priv, IWL_DEFAULT_CMD_QUEUE_NUM, 0);
2019
2020 /* make sure all queue are not stopped */
2021 memset(&priv->queue_stopped[0], 0, sizeof(priv->queue_stopped));
2022 for (i = 0; i < 4; i++)
2023 atomic_set(&priv->queue_stop_count[i], 0);
2024
2025 /* reset to 0 to enable all the queue first */
2026 priv->txq_ctx_active_msk = 0;
2027 /* Map each Tx/cmd queue to its corresponding fifo */
2028 BUILD_BUG_ON(ARRAY_SIZE(default_queue_to_tx_fifo) != 7);
2029
2030 for (i = 0; i < ARRAY_SIZE(default_queue_to_tx_fifo); i++) {
2031 int ac = default_queue_to_tx_fifo[i];
2032
2033 iwl_txq_ctx_activate(priv, i);
2034
2035 if (ac == IWL_TX_FIFO_UNUSED)
2036 continue;
2037
2038 iwl4965_tx_queue_set_status(priv, &priv->txq[i], ac, 0);
2039 }
2040
2041 spin_unlock_irqrestore(&priv->lock, flags);
2042
2043 return 0;
2044}
2045
2046/**
2047 * iwl4965_alive_start - called after REPLY_ALIVE notification received
2048 * from protocol/runtime uCode (initialization uCode's
2049 * Alive gets handled by iwl_init_alive_start()).
2050 */
2051static void iwl4965_alive_start(struct iwl_priv *priv)
2052{
2053 int ret = 0;
2054 struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
2055
2056 IWL_DEBUG_INFO(priv, "Runtime Alive received.\n");
2057
2058 if (priv->card_alive.is_valid != UCODE_VALID_OK) {
2059 /* We had an error bringing up the hardware, so take it
2060 * all the way back down so we can try again */
2061 IWL_DEBUG_INFO(priv, "Alive failed.\n");
2062 goto restart;
2063 }
2064
2065 /* Initialize uCode has loaded Runtime uCode ... verify inst image.
2066 * This is a paranoid check, because we would not have gotten the
2067 * "runtime" alive if code weren't properly loaded. */
2068 if (iwl4965_verify_ucode(priv)) {
2069 /* Runtime instruction load was bad;
2070 * take it all the way back down so we can try again */
2071 IWL_DEBUG_INFO(priv, "Bad runtime uCode load.\n");
2072 goto restart;
2073 }
2074
2075 ret = iwl4965_alive_notify(priv);
2076 if (ret) {
2077 IWL_WARN(priv,
2078 "Could not complete ALIVE transition [ntf]: %d\n", ret);
2079 goto restart;
2080 }
2081
2082
2083 /* After the ALIVE response, we can send host commands to the uCode */
2084 set_bit(STATUS_ALIVE, &priv->status);
2085
2086 /* Enable watchdog to monitor the driver tx queues */
2087 iwl_legacy_setup_watchdog(priv);
2088
2089 if (iwl_legacy_is_rfkill(priv))
2090 return;
2091
2092 ieee80211_wake_queues(priv->hw);
2093
2094 priv->active_rate = IWL_RATES_MASK;
2095
2096 if (iwl_legacy_is_associated_ctx(ctx)) {
2097 struct iwl_legacy_rxon_cmd *active_rxon =
2098 (struct iwl_legacy_rxon_cmd *)&ctx->active;
2099 /* apply any changes in staging */
2100 ctx->staging.filter_flags |= RXON_FILTER_ASSOC_MSK;
2101 active_rxon->filter_flags &= ~RXON_FILTER_ASSOC_MSK;
2102 } else {
2103 struct iwl_rxon_context *tmp;
2104 /* Initialize our rx_config data */
2105 for_each_context(priv, tmp)
2106 iwl_legacy_connection_init_rx_config(priv, tmp);
2107
2108 if (priv->cfg->ops->hcmd->set_rxon_chain)
2109 priv->cfg->ops->hcmd->set_rxon_chain(priv, ctx);
2110 }
2111
2112 /* Configure bluetooth coexistence if enabled */
2113 iwl_legacy_send_bt_config(priv);
2114
2115 iwl4965_reset_run_time_calib(priv);
2116
2117 set_bit(STATUS_READY, &priv->status);
2118
2119 /* Configure the adapter for unassociated operation */
2120 iwl_legacy_commit_rxon(priv, ctx);
2121
2122 /* At this point, the NIC is initialized and operational */
2123 iwl4965_rf_kill_ct_config(priv);
2124
2125 IWL_DEBUG_INFO(priv, "ALIVE processing complete.\n");
2126 wake_up_interruptible(&priv->wait_command_queue);
2127
2128 iwl_legacy_power_update_mode(priv, true);
2129 IWL_DEBUG_INFO(priv, "Updated power mode\n");
2130
2131 return;
2132
2133 restart:
2134 queue_work(priv->workqueue, &priv->restart);
2135}
2136
2137static void iwl4965_cancel_deferred_work(struct iwl_priv *priv);
2138
2139static void __iwl4965_down(struct iwl_priv *priv)
2140{
2141 unsigned long flags;
2142 int exit_pending = test_bit(STATUS_EXIT_PENDING, &priv->status);
2143
2144 IWL_DEBUG_INFO(priv, DRV_NAME " is going down\n");
2145
2146 iwl_legacy_scan_cancel_timeout(priv, 200);
2147
2148 exit_pending = test_and_set_bit(STATUS_EXIT_PENDING, &priv->status);
2149
2150 /* Stop TX queues watchdog. We need to have STATUS_EXIT_PENDING bit set
2151 * to prevent rearm timer */
2152 del_timer_sync(&priv->watchdog);
2153
2154 iwl_legacy_clear_ucode_stations(priv, NULL);
2155 iwl_legacy_dealloc_bcast_stations(priv);
2156 iwl_legacy_clear_driver_stations(priv);
2157
2158 /* Unblock any waiting calls */
2159 wake_up_interruptible_all(&priv->wait_command_queue);
2160
2161 /* Wipe out the EXIT_PENDING status bit if we are not actually
2162 * exiting the module */
2163 if (!exit_pending)
2164 clear_bit(STATUS_EXIT_PENDING, &priv->status);
2165
2166 /* stop and reset the on-board processor */
2167 iwl_write32(priv, CSR_RESET, CSR_RESET_REG_FLAG_NEVO_RESET);
2168
2169 /* tell the device to stop sending interrupts */
2170 spin_lock_irqsave(&priv->lock, flags);
2171 iwl_legacy_disable_interrupts(priv);
2172 spin_unlock_irqrestore(&priv->lock, flags);
2173 iwl4965_synchronize_irq(priv);
2174
2175 if (priv->mac80211_registered)
2176 ieee80211_stop_queues(priv->hw);
2177
2178 /* If we have not previously called iwl_init() then
2179 * clear all bits but the RF Kill bit and return */
2180 if (!iwl_legacy_is_init(priv)) {
2181 priv->status = test_bit(STATUS_RF_KILL_HW, &priv->status) <<
2182 STATUS_RF_KILL_HW |
2183 test_bit(STATUS_GEO_CONFIGURED, &priv->status) <<
2184 STATUS_GEO_CONFIGURED |
2185 test_bit(STATUS_EXIT_PENDING, &priv->status) <<
2186 STATUS_EXIT_PENDING;
2187 goto exit;
2188 }
2189
2190 /* ...otherwise clear out all the status bits but the RF Kill
2191 * bit and continue taking the NIC down. */
2192 priv->status &= test_bit(STATUS_RF_KILL_HW, &priv->status) <<
2193 STATUS_RF_KILL_HW |
2194 test_bit(STATUS_GEO_CONFIGURED, &priv->status) <<
2195 STATUS_GEO_CONFIGURED |
2196 test_bit(STATUS_FW_ERROR, &priv->status) <<
2197 STATUS_FW_ERROR |
2198 test_bit(STATUS_EXIT_PENDING, &priv->status) <<
2199 STATUS_EXIT_PENDING;
2200
2201 iwl4965_txq_ctx_stop(priv);
2202 iwl4965_rxq_stop(priv);
2203
2204 /* Power-down device's busmaster DMA clocks */
2205 iwl_legacy_write_prph(priv, APMG_CLK_DIS_REG, APMG_CLK_VAL_DMA_CLK_RQT);
2206 udelay(5);
2207
2208 /* Make sure (redundant) we've released our request to stay awake */
2209 iwl_legacy_clear_bit(priv, CSR_GP_CNTRL,
2210 CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
2211
2212 /* Stop the device, and put it in low power state */
2213 iwl_legacy_apm_stop(priv);
2214
2215 exit:
2216 memset(&priv->card_alive, 0, sizeof(struct iwl_alive_resp));
2217
2218 dev_kfree_skb(priv->beacon_skb);
2219 priv->beacon_skb = NULL;
2220
2221 /* clear out any free frames */
2222 iwl4965_clear_free_frames(priv);
2223}
2224
2225static void iwl4965_down(struct iwl_priv *priv)
2226{
2227 mutex_lock(&priv->mutex);
2228 __iwl4965_down(priv);
2229 mutex_unlock(&priv->mutex);
2230
2231 iwl4965_cancel_deferred_work(priv);
2232}
2233
2234#define HW_READY_TIMEOUT (50)
2235
2236static int iwl4965_set_hw_ready(struct iwl_priv *priv)
2237{
2238 int ret = 0;
2239
2240 iwl_legacy_set_bit(priv, CSR_HW_IF_CONFIG_REG,
2241 CSR_HW_IF_CONFIG_REG_BIT_NIC_READY);
2242
2243 /* See if we got it */
2244 ret = iwl_poll_bit(priv, CSR_HW_IF_CONFIG_REG,
2245 CSR_HW_IF_CONFIG_REG_BIT_NIC_READY,
2246 CSR_HW_IF_CONFIG_REG_BIT_NIC_READY,
2247 HW_READY_TIMEOUT);
2248 if (ret != -ETIMEDOUT)
2249 priv->hw_ready = true;
2250 else
2251 priv->hw_ready = false;
2252
2253 IWL_DEBUG_INFO(priv, "hardware %s\n",
2254 (priv->hw_ready == 1) ? "ready" : "not ready");
2255 return ret;
2256}
2257
2258static int iwl4965_prepare_card_hw(struct iwl_priv *priv)
2259{
2260 int ret = 0;
2261
2262 IWL_DEBUG_INFO(priv, "iwl4965_prepare_card_hw enter\n");
2263
2264 ret = iwl4965_set_hw_ready(priv);
2265 if (priv->hw_ready)
2266 return ret;
2267
2268 /* If HW is not ready, prepare the conditions to check again */
2269 iwl_legacy_set_bit(priv, CSR_HW_IF_CONFIG_REG,
2270 CSR_HW_IF_CONFIG_REG_PREPARE);
2271
2272 ret = iwl_poll_bit(priv, CSR_HW_IF_CONFIG_REG,
2273 ~CSR_HW_IF_CONFIG_REG_BIT_NIC_PREPARE_DONE,
2274 CSR_HW_IF_CONFIG_REG_BIT_NIC_PREPARE_DONE, 150000);
2275
2276 /* HW should be ready by now, check again. */
2277 if (ret != -ETIMEDOUT)
2278 iwl4965_set_hw_ready(priv);
2279
2280 return ret;
2281}
2282
2283#define MAX_HW_RESTARTS 5
2284
2285static int __iwl4965_up(struct iwl_priv *priv)
2286{
2287 struct iwl_rxon_context *ctx;
2288 int i;
2289 int ret;
2290
2291 if (test_bit(STATUS_EXIT_PENDING, &priv->status)) {
2292 IWL_WARN(priv, "Exit pending; will not bring the NIC up\n");
2293 return -EIO;
2294 }
2295
2296 if (!priv->ucode_data_backup.v_addr || !priv->ucode_data.v_addr) {
2297 IWL_ERR(priv, "ucode not available for device bringup\n");
2298 return -EIO;
2299 }
2300
2301 for_each_context(priv, ctx) {
2302 ret = iwl4965_alloc_bcast_station(priv, ctx);
2303 if (ret) {
2304 iwl_legacy_dealloc_bcast_stations(priv);
2305 return ret;
2306 }
2307 }
2308
2309 iwl4965_prepare_card_hw(priv);
2310
2311 if (!priv->hw_ready) {
2312 IWL_WARN(priv, "Exit HW not ready\n");
2313 return -EIO;
2314 }
2315
2316 /* If platform's RF_KILL switch is NOT set to KILL */
2317 if (iwl_read32(priv,
2318 CSR_GP_CNTRL) & CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW)
2319 clear_bit(STATUS_RF_KILL_HW, &priv->status);
2320 else
2321 set_bit(STATUS_RF_KILL_HW, &priv->status);
2322
2323 if (iwl_legacy_is_rfkill(priv)) {
2324 wiphy_rfkill_set_hw_state(priv->hw->wiphy, true);
2325
2326 iwl_legacy_enable_interrupts(priv);
2327 IWL_WARN(priv, "Radio disabled by HW RF Kill switch\n");
2328 return 0;
2329 }
2330
2331 iwl_write32(priv, CSR_INT, 0xFFFFFFFF);
2332
2333 /* must be initialised before iwl_hw_nic_init */
2334 priv->cmd_queue = IWL_DEFAULT_CMD_QUEUE_NUM;
2335
2336 ret = iwl4965_hw_nic_init(priv);
2337 if (ret) {
2338 IWL_ERR(priv, "Unable to init nic\n");
2339 return ret;
2340 }
2341
2342 /* make sure rfkill handshake bits are cleared */
2343 iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
2344 iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR,
2345 CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED);
2346
2347 /* clear (again), then enable host interrupts */
2348 iwl_write32(priv, CSR_INT, 0xFFFFFFFF);
2349 iwl_legacy_enable_interrupts(priv);
2350
2351 /* really make sure rfkill handshake bits are cleared */
2352 iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
2353 iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
2354
2355 /* Copy original ucode data image from disk into backup cache.
2356 * This will be used to initialize the on-board processor's
2357 * data SRAM for a clean start when the runtime program first loads. */
2358 memcpy(priv->ucode_data_backup.v_addr, priv->ucode_data.v_addr,
2359 priv->ucode_data.len);
2360
2361 for (i = 0; i < MAX_HW_RESTARTS; i++) {
2362
2363 /* load bootstrap state machine,
2364 * load bootstrap program into processor's memory,
2365 * prepare to load the "initialize" uCode */
2366 ret = priv->cfg->ops->lib->load_ucode(priv);
2367
2368 if (ret) {
2369 IWL_ERR(priv, "Unable to set up bootstrap uCode: %d\n",
2370 ret);
2371 continue;
2372 }
2373
2374 /* start card; "initialize" will load runtime ucode */
2375 iwl4965_nic_start(priv);
2376
2377 IWL_DEBUG_INFO(priv, DRV_NAME " is coming up\n");
2378
2379 return 0;
2380 }
2381
2382 set_bit(STATUS_EXIT_PENDING, &priv->status);
2383 __iwl4965_down(priv);
2384 clear_bit(STATUS_EXIT_PENDING, &priv->status);
2385
2386 /* tried to restart and config the device for as long as our
2387 * patience could withstand */
2388 IWL_ERR(priv, "Unable to initialize device after %d attempts.\n", i);
2389 return -EIO;
2390}
2391
2392
2393/*****************************************************************************
2394 *
2395 * Workqueue callbacks
2396 *
2397 *****************************************************************************/
2398
2399static void iwl4965_bg_init_alive_start(struct work_struct *data)
2400{
2401 struct iwl_priv *priv =
2402 container_of(data, struct iwl_priv, init_alive_start.work);
2403
2404 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
2405 return;
2406
2407 mutex_lock(&priv->mutex);
2408 priv->cfg->ops->lib->init_alive_start(priv);
2409 mutex_unlock(&priv->mutex);
2410}
2411
2412static void iwl4965_bg_alive_start(struct work_struct *data)
2413{
2414 struct iwl_priv *priv =
2415 container_of(data, struct iwl_priv, alive_start.work);
2416
2417 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
2418 return;
2419
2420 mutex_lock(&priv->mutex);
2421 iwl4965_alive_start(priv);
2422 mutex_unlock(&priv->mutex);
2423}
2424
2425static void iwl4965_bg_run_time_calib_work(struct work_struct *work)
2426{
2427 struct iwl_priv *priv = container_of(work, struct iwl_priv,
2428 run_time_calib_work);
2429
2430 mutex_lock(&priv->mutex);
2431
2432 if (test_bit(STATUS_EXIT_PENDING, &priv->status) ||
2433 test_bit(STATUS_SCANNING, &priv->status)) {
2434 mutex_unlock(&priv->mutex);
2435 return;
2436 }
2437
2438 if (priv->start_calib) {
2439 iwl4965_chain_noise_calibration(priv,
2440 (void *)&priv->_4965.statistics);
2441 iwl4965_sensitivity_calibration(priv,
2442 (void *)&priv->_4965.statistics);
2443 }
2444
2445 mutex_unlock(&priv->mutex);
2446}
2447
2448static void iwl4965_bg_restart(struct work_struct *data)
2449{
2450 struct iwl_priv *priv = container_of(data, struct iwl_priv, restart);
2451
2452 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
2453 return;
2454
2455 if (test_and_clear_bit(STATUS_FW_ERROR, &priv->status)) {
2456 struct iwl_rxon_context *ctx;
2457
2458 mutex_lock(&priv->mutex);
2459 for_each_context(priv, ctx)
2460 ctx->vif = NULL;
2461 priv->is_open = 0;
2462
2463 __iwl4965_down(priv);
2464
2465 mutex_unlock(&priv->mutex);
2466 iwl4965_cancel_deferred_work(priv);
2467 ieee80211_restart_hw(priv->hw);
2468 } else {
2469 iwl4965_down(priv);
2470
2471 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
2472 return;
2473
2474 mutex_lock(&priv->mutex);
2475 __iwl4965_up(priv);
2476 mutex_unlock(&priv->mutex);
2477 }
2478}
2479
2480static void iwl4965_bg_rx_replenish(struct work_struct *data)
2481{
2482 struct iwl_priv *priv =
2483 container_of(data, struct iwl_priv, rx_replenish);
2484
2485 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
2486 return;
2487
2488 mutex_lock(&priv->mutex);
2489 iwl4965_rx_replenish(priv);
2490 mutex_unlock(&priv->mutex);
2491}
2492
2493/*****************************************************************************
2494 *
2495 * mac80211 entry point functions
2496 *
2497 *****************************************************************************/
2498
2499#define UCODE_READY_TIMEOUT (4 * HZ)
2500
2501/*
2502 * Not a mac80211 entry point function, but it fits in with all the
2503 * other mac80211 functions grouped here.
2504 */
2505static int iwl4965_mac_setup_register(struct iwl_priv *priv,
2506 u32 max_probe_length)
2507{
2508 int ret;
2509 struct ieee80211_hw *hw = priv->hw;
2510 struct iwl_rxon_context *ctx;
2511
2512 hw->rate_control_algorithm = "iwl-4965-rs";
2513
2514 /* Tell mac80211 our characteristics */
2515 hw->flags = IEEE80211_HW_SIGNAL_DBM |
2516 IEEE80211_HW_AMPDU_AGGREGATION |
2517 IEEE80211_HW_NEED_DTIM_PERIOD |
2518 IEEE80211_HW_SPECTRUM_MGMT |
2519 IEEE80211_HW_REPORTS_TX_ACK_STATUS;
2520
2521 if (priv->cfg->sku & IWL_SKU_N)
2522 hw->flags |= IEEE80211_HW_SUPPORTS_DYNAMIC_SMPS |
2523 IEEE80211_HW_SUPPORTS_STATIC_SMPS;
2524
2525 hw->sta_data_size = sizeof(struct iwl_station_priv);
2526 hw->vif_data_size = sizeof(struct iwl_vif_priv);
2527
2528 for_each_context(priv, ctx) {
2529 hw->wiphy->interface_modes |= ctx->interface_modes;
2530 hw->wiphy->interface_modes |= ctx->exclusive_interface_modes;
2531 }
2532
2533 hw->wiphy->flags |= WIPHY_FLAG_CUSTOM_REGULATORY |
2534 WIPHY_FLAG_DISABLE_BEACON_HINTS;
2535
2536 /*
2537 * For now, disable PS by default because it affects
2538 * RX performance significantly.
2539 */
2540 hw->wiphy->flags &= ~WIPHY_FLAG_PS_ON_BY_DEFAULT;
2541
2542 hw->wiphy->max_scan_ssids = PROBE_OPTION_MAX;
2543 /* we create the 802.11 header and a zero-length SSID element */
2544 hw->wiphy->max_scan_ie_len = max_probe_length - 24 - 2;
2545
2546 /* Default value; 4 EDCA QOS priorities */
2547 hw->queues = 4;
2548
2549 hw->max_listen_interval = IWL_CONN_MAX_LISTEN_INTERVAL;
2550
2551 if (priv->bands[IEEE80211_BAND_2GHZ].n_channels)
2552 priv->hw->wiphy->bands[IEEE80211_BAND_2GHZ] =
2553 &priv->bands[IEEE80211_BAND_2GHZ];
2554 if (priv->bands[IEEE80211_BAND_5GHZ].n_channels)
2555 priv->hw->wiphy->bands[IEEE80211_BAND_5GHZ] =
2556 &priv->bands[IEEE80211_BAND_5GHZ];
2557
2558 iwl_legacy_leds_init(priv);
2559
2560 ret = ieee80211_register_hw(priv->hw);
2561 if (ret) {
2562 IWL_ERR(priv, "Failed to register hw (error %d)\n", ret);
2563 return ret;
2564 }
2565 priv->mac80211_registered = 1;
2566
2567 return 0;
2568}
2569
2570
2571int iwl4965_mac_start(struct ieee80211_hw *hw)
2572{
2573 struct iwl_priv *priv = hw->priv;
2574 int ret;
2575
2576 IWL_DEBUG_MAC80211(priv, "enter\n");
2577
2578 /* we should be verifying the device is ready to be opened */
2579 mutex_lock(&priv->mutex);
2580 ret = __iwl4965_up(priv);
2581 mutex_unlock(&priv->mutex);
2582
2583 if (ret)
2584 return ret;
2585
2586 if (iwl_legacy_is_rfkill(priv))
2587 goto out;
2588
2589 IWL_DEBUG_INFO(priv, "Start UP work done.\n");
2590
2591 /* Wait for START_ALIVE from Run Time ucode. Otherwise callbacks from
2592 * mac80211 will not be run successfully. */
2593 ret = wait_event_interruptible_timeout(priv->wait_command_queue,
2594 test_bit(STATUS_READY, &priv->status),
2595 UCODE_READY_TIMEOUT);
2596 if (!ret) {
2597 if (!test_bit(STATUS_READY, &priv->status)) {
2598 IWL_ERR(priv, "START_ALIVE timeout after %dms.\n",
2599 jiffies_to_msecs(UCODE_READY_TIMEOUT));
2600 return -ETIMEDOUT;
2601 }
2602 }
2603
2604 iwl4965_led_enable(priv);
2605
2606out:
2607 priv->is_open = 1;
2608 IWL_DEBUG_MAC80211(priv, "leave\n");
2609 return 0;
2610}
2611
2612void iwl4965_mac_stop(struct ieee80211_hw *hw)
2613{
2614 struct iwl_priv *priv = hw->priv;
2615
2616 IWL_DEBUG_MAC80211(priv, "enter\n");
2617
2618 if (!priv->is_open)
2619 return;
2620
2621 priv->is_open = 0;
2622
2623 iwl4965_down(priv);
2624
2625 flush_workqueue(priv->workqueue);
2626
2627 /* enable interrupts again in order to receive rfkill changes */
2628 iwl_write32(priv, CSR_INT, 0xFFFFFFFF);
2629 iwl_legacy_enable_interrupts(priv);
2630
2631 IWL_DEBUG_MAC80211(priv, "leave\n");
2632}
2633
2634int iwl4965_mac_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
2635{
2636 struct iwl_priv *priv = hw->priv;
2637
2638 IWL_DEBUG_MACDUMP(priv, "enter\n");
2639
2640 IWL_DEBUG_TX(priv, "dev->xmit(%d bytes) at rate 0x%02x\n", skb->len,
2641 ieee80211_get_tx_rate(hw, IEEE80211_SKB_CB(skb))->bitrate);
2642
2643 if (iwl4965_tx_skb(priv, skb))
2644 dev_kfree_skb_any(skb);
2645
2646 IWL_DEBUG_MACDUMP(priv, "leave\n");
2647 return NETDEV_TX_OK;
2648}
2649
2650void iwl4965_mac_update_tkip_key(struct ieee80211_hw *hw,
2651 struct ieee80211_vif *vif,
2652 struct ieee80211_key_conf *keyconf,
2653 struct ieee80211_sta *sta,
2654 u32 iv32, u16 *phase1key)
2655{
2656 struct iwl_priv *priv = hw->priv;
2657 struct iwl_vif_priv *vif_priv = (void *)vif->drv_priv;
2658
2659 IWL_DEBUG_MAC80211(priv, "enter\n");
2660
2661 iwl4965_update_tkip_key(priv, vif_priv->ctx, keyconf, sta,
2662 iv32, phase1key);
2663
2664 IWL_DEBUG_MAC80211(priv, "leave\n");
2665}
2666
2667int iwl4965_mac_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
2668 struct ieee80211_vif *vif, struct ieee80211_sta *sta,
2669 struct ieee80211_key_conf *key)
2670{
2671 struct iwl_priv *priv = hw->priv;
2672 struct iwl_vif_priv *vif_priv = (void *)vif->drv_priv;
2673 struct iwl_rxon_context *ctx = vif_priv->ctx;
2674 int ret;
2675 u8 sta_id;
2676 bool is_default_wep_key = false;
2677
2678 IWL_DEBUG_MAC80211(priv, "enter\n");
2679
2680 if (priv->cfg->mod_params->sw_crypto) {
2681 IWL_DEBUG_MAC80211(priv, "leave - hwcrypto disabled\n");
2682 return -EOPNOTSUPP;
2683 }
2684
2685 sta_id = iwl_legacy_sta_id_or_broadcast(priv, vif_priv->ctx, sta);
2686 if (sta_id == IWL_INVALID_STATION)
2687 return -EINVAL;
2688
2689 mutex_lock(&priv->mutex);
2690 iwl_legacy_scan_cancel_timeout(priv, 100);
2691
2692 /*
2693 * If we are getting WEP group key and we didn't receive any key mapping
2694 * so far, we are in legacy wep mode (group key only), otherwise we are
2695 * in 1X mode.
2696 * In legacy wep mode, we use another host command to the uCode.
2697 */
2698 if ((key->cipher == WLAN_CIPHER_SUITE_WEP40 ||
2699 key->cipher == WLAN_CIPHER_SUITE_WEP104) &&
2700 !sta) {
2701 if (cmd == SET_KEY)
2702 is_default_wep_key = !ctx->key_mapping_keys;
2703 else
2704 is_default_wep_key =
2705 (key->hw_key_idx == HW_KEY_DEFAULT);
2706 }
2707
2708 switch (cmd) {
2709 case SET_KEY:
2710 if (is_default_wep_key)
2711 ret = iwl4965_set_default_wep_key(priv,
2712 vif_priv->ctx, key);
2713 else
2714 ret = iwl4965_set_dynamic_key(priv, vif_priv->ctx,
2715 key, sta_id);
2716
2717 IWL_DEBUG_MAC80211(priv, "enable hwcrypto key\n");
2718 break;
2719 case DISABLE_KEY:
2720 if (is_default_wep_key)
2721 ret = iwl4965_remove_default_wep_key(priv, ctx, key);
2722 else
2723 ret = iwl4965_remove_dynamic_key(priv, ctx,
2724 key, sta_id);
2725
2726 IWL_DEBUG_MAC80211(priv, "disable hwcrypto key\n");
2727 break;
2728 default:
2729 ret = -EINVAL;
2730 }
2731
2732 mutex_unlock(&priv->mutex);
2733 IWL_DEBUG_MAC80211(priv, "leave\n");
2734
2735 return ret;
2736}
2737
2738int iwl4965_mac_ampdu_action(struct ieee80211_hw *hw,
2739 struct ieee80211_vif *vif,
2740 enum ieee80211_ampdu_mlme_action action,
2741 struct ieee80211_sta *sta, u16 tid, u16 *ssn,
2742 u8 buf_size)
2743{
2744 struct iwl_priv *priv = hw->priv;
2745 int ret = -EINVAL;
2746
2747 IWL_DEBUG_HT(priv, "A-MPDU action on addr %pM tid %d\n",
2748 sta->addr, tid);
2749
2750 if (!(priv->cfg->sku & IWL_SKU_N))
2751 return -EACCES;
2752
2753 mutex_lock(&priv->mutex);
2754
2755 switch (action) {
2756 case IEEE80211_AMPDU_RX_START:
2757 IWL_DEBUG_HT(priv, "start Rx\n");
2758 ret = iwl4965_sta_rx_agg_start(priv, sta, tid, *ssn);
2759 break;
2760 case IEEE80211_AMPDU_RX_STOP:
2761 IWL_DEBUG_HT(priv, "stop Rx\n");
2762 ret = iwl4965_sta_rx_agg_stop(priv, sta, tid);
2763 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
2764 ret = 0;
2765 break;
2766 case IEEE80211_AMPDU_TX_START:
2767 IWL_DEBUG_HT(priv, "start Tx\n");
2768 ret = iwl4965_tx_agg_start(priv, vif, sta, tid, ssn);
2769 if (ret == 0) {
2770 priv->_4965.agg_tids_count++;
2771 IWL_DEBUG_HT(priv, "priv->_4965.agg_tids_count = %u\n",
2772 priv->_4965.agg_tids_count);
2773 }
2774 break;
2775 case IEEE80211_AMPDU_TX_STOP:
2776 IWL_DEBUG_HT(priv, "stop Tx\n");
2777 ret = iwl4965_tx_agg_stop(priv, vif, sta, tid);
2778 if ((ret == 0) && (priv->_4965.agg_tids_count > 0)) {
2779 priv->_4965.agg_tids_count--;
2780 IWL_DEBUG_HT(priv, "priv->_4965.agg_tids_count = %u\n",
2781 priv->_4965.agg_tids_count);
2782 }
2783 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
2784 ret = 0;
2785 break;
2786 case IEEE80211_AMPDU_TX_OPERATIONAL:
2787 ret = 0;
2788 break;
2789 }
2790 mutex_unlock(&priv->mutex);
2791
2792 return ret;
2793}
2794
2795int iwl4965_mac_sta_add(struct ieee80211_hw *hw,
2796 struct ieee80211_vif *vif,
2797 struct ieee80211_sta *sta)
2798{
2799 struct iwl_priv *priv = hw->priv;
2800 struct iwl_station_priv *sta_priv = (void *)sta->drv_priv;
2801 struct iwl_vif_priv *vif_priv = (void *)vif->drv_priv;
2802 bool is_ap = vif->type == NL80211_IFTYPE_STATION;
2803 int ret;
2804 u8 sta_id;
2805
2806 IWL_DEBUG_INFO(priv, "received request to add station %pM\n",
2807 sta->addr);
2808 mutex_lock(&priv->mutex);
2809 IWL_DEBUG_INFO(priv, "proceeding to add station %pM\n",
2810 sta->addr);
2811 sta_priv->common.sta_id = IWL_INVALID_STATION;
2812
2813 atomic_set(&sta_priv->pending_frames, 0);
2814
2815 ret = iwl_legacy_add_station_common(priv, vif_priv->ctx, sta->addr,
2816 is_ap, sta, &sta_id);
2817 if (ret) {
2818 IWL_ERR(priv, "Unable to add station %pM (%d)\n",
2819 sta->addr, ret);
2820 /* Should we return success if return code is EEXIST ? */
2821 mutex_unlock(&priv->mutex);
2822 return ret;
2823 }
2824
2825 sta_priv->common.sta_id = sta_id;
2826
2827 /* Initialize rate scaling */
2828 IWL_DEBUG_INFO(priv, "Initializing rate scaling for station %pM\n",
2829 sta->addr);
2830 iwl4965_rs_rate_init(priv, sta, sta_id);
2831 mutex_unlock(&priv->mutex);
2832
2833 return 0;
2834}
2835
2836void iwl4965_mac_channel_switch(struct ieee80211_hw *hw,
2837 struct ieee80211_channel_switch *ch_switch)
2838{
2839 struct iwl_priv *priv = hw->priv;
2840 const struct iwl_channel_info *ch_info;
2841 struct ieee80211_conf *conf = &hw->conf;
2842 struct ieee80211_channel *channel = ch_switch->channel;
2843 struct iwl_ht_config *ht_conf = &priv->current_ht_config;
2844
2845 struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
2846 u16 ch;
2847 unsigned long flags = 0;
2848
2849 IWL_DEBUG_MAC80211(priv, "enter\n");
2850
2851 if (iwl_legacy_is_rfkill(priv))
2852 goto out_exit;
2853
2854 if (test_bit(STATUS_EXIT_PENDING, &priv->status) ||
2855 test_bit(STATUS_SCANNING, &priv->status))
2856 goto out_exit;
2857
2858 if (!iwl_legacy_is_associated_ctx(ctx))
2859 goto out_exit;
2860
2861 /* channel switch in progress */
2862 if (priv->switch_rxon.switch_in_progress == true)
2863 goto out_exit;
2864
2865 mutex_lock(&priv->mutex);
2866 if (priv->cfg->ops->lib->set_channel_switch) {
2867
2868 ch = channel->hw_value;
2869 if (le16_to_cpu(ctx->active.channel) != ch) {
2870 ch_info = iwl_legacy_get_channel_info(priv,
2871 channel->band,
2872 ch);
2873 if (!iwl_legacy_is_channel_valid(ch_info)) {
2874 IWL_DEBUG_MAC80211(priv, "invalid channel\n");
2875 goto out;
2876 }
2877 spin_lock_irqsave(&priv->lock, flags);
2878
2879 priv->current_ht_config.smps = conf->smps_mode;
2880
2881 /* Configure HT40 channels */
2882 ctx->ht.enabled = conf_is_ht(conf);
2883 if (ctx->ht.enabled) {
2884 if (conf_is_ht40_minus(conf)) {
2885 ctx->ht.extension_chan_offset =
2886 IEEE80211_HT_PARAM_CHA_SEC_BELOW;
2887 ctx->ht.is_40mhz = true;
2888 } else if (conf_is_ht40_plus(conf)) {
2889 ctx->ht.extension_chan_offset =
2890 IEEE80211_HT_PARAM_CHA_SEC_ABOVE;
2891 ctx->ht.is_40mhz = true;
2892 } else {
2893 ctx->ht.extension_chan_offset =
2894 IEEE80211_HT_PARAM_CHA_SEC_NONE;
2895 ctx->ht.is_40mhz = false;
2896 }
2897 } else
2898 ctx->ht.is_40mhz = false;
2899
2900 if ((le16_to_cpu(ctx->staging.channel) != ch))
2901 ctx->staging.flags = 0;
2902
2903 iwl_legacy_set_rxon_channel(priv, channel, ctx);
2904 iwl_legacy_set_rxon_ht(priv, ht_conf);
2905 iwl_legacy_set_flags_for_band(priv, ctx, channel->band,
2906 ctx->vif);
2907 spin_unlock_irqrestore(&priv->lock, flags);
2908
2909 iwl_legacy_set_rate(priv);
2910 /*
2911 * at this point, staging_rxon has the
2912 * configuration for channel switch
2913 */
2914 if (priv->cfg->ops->lib->set_channel_switch(priv,
2915 ch_switch))
2916 priv->switch_rxon.switch_in_progress = false;
2917 }
2918 }
2919out:
2920 mutex_unlock(&priv->mutex);
2921out_exit:
2922 if (!priv->switch_rxon.switch_in_progress)
2923 ieee80211_chswitch_done(ctx->vif, false);
2924 IWL_DEBUG_MAC80211(priv, "leave\n");
2925}
2926
2927void iwl4965_configure_filter(struct ieee80211_hw *hw,
2928 unsigned int changed_flags,
2929 unsigned int *total_flags,
2930 u64 multicast)
2931{
2932 struct iwl_priv *priv = hw->priv;
2933 __le32 filter_or = 0, filter_nand = 0;
2934 struct iwl_rxon_context *ctx;
2935
2936#define CHK(test, flag) do { \
2937 if (*total_flags & (test)) \
2938 filter_or |= (flag); \
2939 else \
2940 filter_nand |= (flag); \
2941 } while (0)
2942
2943 IWL_DEBUG_MAC80211(priv, "Enter: changed: 0x%x, total: 0x%x\n",
2944 changed_flags, *total_flags);
2945
2946 CHK(FIF_OTHER_BSS | FIF_PROMISC_IN_BSS, RXON_FILTER_PROMISC_MSK);
2947 /* Setting _just_ RXON_FILTER_CTL2HOST_MSK causes FH errors */
2948 CHK(FIF_CONTROL, RXON_FILTER_CTL2HOST_MSK | RXON_FILTER_PROMISC_MSK);
2949 CHK(FIF_BCN_PRBRESP_PROMISC, RXON_FILTER_BCON_AWARE_MSK);
2950
2951#undef CHK
2952
2953 mutex_lock(&priv->mutex);
2954
2955 for_each_context(priv, ctx) {
2956 ctx->staging.filter_flags &= ~filter_nand;
2957 ctx->staging.filter_flags |= filter_or;
2958
2959 /*
2960 * Not committing directly because hardware can perform a scan,
2961 * but we'll eventually commit the filter flags change anyway.
2962 */
2963 }
2964
2965 mutex_unlock(&priv->mutex);
2966
2967 /*
2968 * Receiving all multicast frames is always enabled by the
2969 * default flags setup in iwl_legacy_connection_init_rx_config()
2970 * since we currently do not support programming multicast
2971 * filters into the device.
2972 */
2973 *total_flags &= FIF_OTHER_BSS | FIF_ALLMULTI | FIF_PROMISC_IN_BSS |
2974 FIF_BCN_PRBRESP_PROMISC | FIF_CONTROL;
2975}
2976
2977/*****************************************************************************
2978 *
2979 * driver setup and teardown
2980 *
2981 *****************************************************************************/
2982
2983static void iwl4965_bg_txpower_work(struct work_struct *work)
2984{
2985 struct iwl_priv *priv = container_of(work, struct iwl_priv,
2986 txpower_work);
2987
2988 /* If a scan happened to start before we got here
2989 * then just return; the statistics notification will
2990 * kick off another scheduled work to compensate for
2991 * any temperature delta we missed here. */
2992 if (test_bit(STATUS_EXIT_PENDING, &priv->status) ||
2993 test_bit(STATUS_SCANNING, &priv->status))
2994 return;
2995
2996 mutex_lock(&priv->mutex);
2997
2998 /* Regardless of if we are associated, we must reconfigure the
2999 * TX power since frames can be sent on non-radar channels while
3000 * not associated */
3001 priv->cfg->ops->lib->send_tx_power(priv);
3002
3003 /* Update last_temperature to keep is_calib_needed from running
3004 * when it isn't needed... */
3005 priv->last_temperature = priv->temperature;
3006
3007 mutex_unlock(&priv->mutex);
3008}
3009
3010static void iwl4965_setup_deferred_work(struct iwl_priv *priv)
3011{
3012 priv->workqueue = create_singlethread_workqueue(DRV_NAME);
3013
3014 init_waitqueue_head(&priv->wait_command_queue);
3015
3016 INIT_WORK(&priv->restart, iwl4965_bg_restart);
3017 INIT_WORK(&priv->rx_replenish, iwl4965_bg_rx_replenish);
3018 INIT_WORK(&priv->run_time_calib_work, iwl4965_bg_run_time_calib_work);
3019 INIT_DELAYED_WORK(&priv->init_alive_start, iwl4965_bg_init_alive_start);
3020 INIT_DELAYED_WORK(&priv->alive_start, iwl4965_bg_alive_start);
3021
3022 iwl_legacy_setup_scan_deferred_work(priv);
3023
3024 INIT_WORK(&priv->txpower_work, iwl4965_bg_txpower_work);
3025
3026 init_timer(&priv->statistics_periodic);
3027 priv->statistics_periodic.data = (unsigned long)priv;
3028 priv->statistics_periodic.function = iwl4965_bg_statistics_periodic;
3029
3030 init_timer(&priv->ucode_trace);
3031 priv->ucode_trace.data = (unsigned long)priv;
3032 priv->ucode_trace.function = iwl4965_bg_ucode_trace;
3033
3034 init_timer(&priv->watchdog);
3035 priv->watchdog.data = (unsigned long)priv;
3036 priv->watchdog.function = iwl_legacy_bg_watchdog;
3037
3038 tasklet_init(&priv->irq_tasklet, (void (*)(unsigned long))
3039 iwl4965_irq_tasklet, (unsigned long)priv);
3040}
3041
3042static void iwl4965_cancel_deferred_work(struct iwl_priv *priv)
3043{
3044 cancel_work_sync(&priv->txpower_work);
3045 cancel_delayed_work_sync(&priv->init_alive_start);
3046 cancel_delayed_work(&priv->alive_start);
3047 cancel_work_sync(&priv->run_time_calib_work);
3048
3049 iwl_legacy_cancel_scan_deferred_work(priv);
3050
3051 del_timer_sync(&priv->statistics_periodic);
3052 del_timer_sync(&priv->ucode_trace);
3053}
3054
3055static void iwl4965_init_hw_rates(struct iwl_priv *priv,
3056 struct ieee80211_rate *rates)
3057{
3058 int i;
3059
3060 for (i = 0; i < IWL_RATE_COUNT_LEGACY; i++) {
3061 rates[i].bitrate = iwl_rates[i].ieee * 5;
3062 rates[i].hw_value = i; /* Rate scaling will work on indexes */
3063 rates[i].hw_value_short = i;
3064 rates[i].flags = 0;
3065 if ((i >= IWL_FIRST_CCK_RATE) && (i <= IWL_LAST_CCK_RATE)) {
3066 /*
3067 * If CCK != 1M then set short preamble rate flag.
3068 */
3069 rates[i].flags |=
3070 (iwl_rates[i].plcp == IWL_RATE_1M_PLCP) ?
3071 0 : IEEE80211_RATE_SHORT_PREAMBLE;
3072 }
3073 }
3074}
3075/*
3076 * Acquire priv->lock before calling this function !
3077 */
3078void iwl4965_set_wr_ptrs(struct iwl_priv *priv, int txq_id, u32 index)
3079{
3080 iwl_legacy_write_direct32(priv, HBUS_TARG_WRPTR,
3081 (index & 0xff) | (txq_id << 8));
3082 iwl_legacy_write_prph(priv, IWL49_SCD_QUEUE_RDPTR(txq_id), index);
3083}
3084
3085void iwl4965_tx_queue_set_status(struct iwl_priv *priv,
3086 struct iwl_tx_queue *txq,
3087 int tx_fifo_id, int scd_retry)
3088{
3089 int txq_id = txq->q.id;
3090
3091 /* Find out whether to activate Tx queue */
3092 int active = test_bit(txq_id, &priv->txq_ctx_active_msk) ? 1 : 0;
3093
3094 /* Set up and activate */
3095 iwl_legacy_write_prph(priv, IWL49_SCD_QUEUE_STATUS_BITS(txq_id),
3096 (active << IWL49_SCD_QUEUE_STTS_REG_POS_ACTIVE) |
3097 (tx_fifo_id << IWL49_SCD_QUEUE_STTS_REG_POS_TXF) |
3098 (scd_retry << IWL49_SCD_QUEUE_STTS_REG_POS_WSL) |
3099 (scd_retry << IWL49_SCD_QUEUE_STTS_REG_POS_SCD_ACK) |
3100 IWL49_SCD_QUEUE_STTS_REG_MSK);
3101
3102 txq->sched_retry = scd_retry;
3103
3104 IWL_DEBUG_INFO(priv, "%s %s Queue %d on AC %d\n",
3105 active ? "Activate" : "Deactivate",
3106 scd_retry ? "BA" : "AC", txq_id, tx_fifo_id);
3107}
3108
3109
3110static int iwl4965_init_drv(struct iwl_priv *priv)
3111{
3112 int ret;
3113
3114 spin_lock_init(&priv->sta_lock);
3115 spin_lock_init(&priv->hcmd_lock);
3116
3117 INIT_LIST_HEAD(&priv->free_frames);
3118
3119 mutex_init(&priv->mutex);
3120 mutex_init(&priv->sync_cmd_mutex);
3121
3122 priv->ieee_channels = NULL;
3123 priv->ieee_rates = NULL;
3124 priv->band = IEEE80211_BAND_2GHZ;
3125
3126 priv->iw_mode = NL80211_IFTYPE_STATION;
3127 priv->current_ht_config.smps = IEEE80211_SMPS_STATIC;
3128 priv->missed_beacon_threshold = IWL_MISSED_BEACON_THRESHOLD_DEF;
3129 priv->_4965.agg_tids_count = 0;
3130
3131 /* initialize force reset */
3132 priv->force_reset[IWL_RF_RESET].reset_duration =
3133 IWL_DELAY_NEXT_FORCE_RF_RESET;
3134 priv->force_reset[IWL_FW_RESET].reset_duration =
3135 IWL_DELAY_NEXT_FORCE_FW_RELOAD;
3136
3137 /* Choose which receivers/antennas to use */
3138 if (priv->cfg->ops->hcmd->set_rxon_chain)
3139 priv->cfg->ops->hcmd->set_rxon_chain(priv,
3140 &priv->contexts[IWL_RXON_CTX_BSS]);
3141
3142 iwl_legacy_init_scan_params(priv);
3143
3144 /* Set the tx_power_user_lmt to the lowest power level
3145 * this value will get overwritten by channel max power avg
3146 * from eeprom */
3147 priv->tx_power_user_lmt = IWL4965_TX_POWER_TARGET_POWER_MIN;
3148 priv->tx_power_next = IWL4965_TX_POWER_TARGET_POWER_MIN;
3149
3150 ret = iwl_legacy_init_channel_map(priv);
3151 if (ret) {
3152 IWL_ERR(priv, "initializing regulatory failed: %d\n", ret);
3153 goto err;
3154 }
3155
3156 ret = iwl_legacy_init_geos(priv);
3157 if (ret) {
3158 IWL_ERR(priv, "initializing geos failed: %d\n", ret);
3159 goto err_free_channel_map;
3160 }
3161 iwl4965_init_hw_rates(priv, priv->ieee_rates);
3162
3163 return 0;
3164
3165err_free_channel_map:
3166 iwl_legacy_free_channel_map(priv);
3167err:
3168 return ret;
3169}
3170
3171static void iwl4965_uninit_drv(struct iwl_priv *priv)
3172{
3173 iwl4965_calib_free_results(priv);
3174 iwl_legacy_free_geos(priv);
3175 iwl_legacy_free_channel_map(priv);
3176 kfree(priv->scan_cmd);
3177}
3178
3179static void iwl4965_hw_detect(struct iwl_priv *priv)
3180{
3181 priv->hw_rev = _iwl_legacy_read32(priv, CSR_HW_REV);
3182 priv->hw_wa_rev = _iwl_legacy_read32(priv, CSR_HW_REV_WA_REG);
3183 pci_read_config_byte(priv->pci_dev, PCI_REVISION_ID, &priv->rev_id);
3184 IWL_DEBUG_INFO(priv, "HW Revision ID = 0x%X\n", priv->rev_id);
3185}
3186
3187static int iwl4965_set_hw_params(struct iwl_priv *priv)
3188{
3189 priv->hw_params.max_rxq_size = RX_QUEUE_SIZE;
3190 priv->hw_params.max_rxq_log = RX_QUEUE_SIZE_LOG;
3191 if (priv->cfg->mod_params->amsdu_size_8K)
3192 priv->hw_params.rx_page_order = get_order(IWL_RX_BUF_SIZE_8K);
3193 else
3194 priv->hw_params.rx_page_order = get_order(IWL_RX_BUF_SIZE_4K);
3195
3196 priv->hw_params.max_beacon_itrvl = IWL_MAX_UCODE_BEACON_INTERVAL;
3197
3198 if (priv->cfg->mod_params->disable_11n)
3199 priv->cfg->sku &= ~IWL_SKU_N;
3200
3201 /* Device-specific setup */
3202 return priv->cfg->ops->lib->set_hw_params(priv);
3203}
3204
3205static const u8 iwl4965_bss_ac_to_fifo[] = {
3206 IWL_TX_FIFO_VO,
3207 IWL_TX_FIFO_VI,
3208 IWL_TX_FIFO_BE,
3209 IWL_TX_FIFO_BK,
3210};
3211
3212static const u8 iwl4965_bss_ac_to_queue[] = {
3213 0, 1, 2, 3,
3214};
3215
3216static int
3217iwl4965_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
3218{
3219 int err = 0, i;
3220 struct iwl_priv *priv;
3221 struct ieee80211_hw *hw;
3222 struct iwl_cfg *cfg = (struct iwl_cfg *)(ent->driver_data);
3223 unsigned long flags;
3224 u16 pci_cmd;
3225
3226 /************************
3227 * 1. Allocating HW data
3228 ************************/
3229
3230 hw = iwl_legacy_alloc_all(cfg);
3231 if (!hw) {
3232 err = -ENOMEM;
3233 goto out;
3234 }
3235 priv = hw->priv;
3236 /* At this point both hw and priv are allocated. */
3237
3238 /*
3239 * The default context is always valid,
3240 * more may be discovered when firmware
3241 * is loaded.
3242 */
3243 priv->valid_contexts = BIT(IWL_RXON_CTX_BSS);
3244
3245 for (i = 0; i < NUM_IWL_RXON_CTX; i++)
3246 priv->contexts[i].ctxid = i;
3247
3248 priv->contexts[IWL_RXON_CTX_BSS].always_active = true;
3249 priv->contexts[IWL_RXON_CTX_BSS].is_active = true;
3250 priv->contexts[IWL_RXON_CTX_BSS].rxon_cmd = REPLY_RXON;
3251 priv->contexts[IWL_RXON_CTX_BSS].rxon_timing_cmd = REPLY_RXON_TIMING;
3252 priv->contexts[IWL_RXON_CTX_BSS].rxon_assoc_cmd = REPLY_RXON_ASSOC;
3253 priv->contexts[IWL_RXON_CTX_BSS].qos_cmd = REPLY_QOS_PARAM;
3254 priv->contexts[IWL_RXON_CTX_BSS].ap_sta_id = IWL_AP_ID;
3255 priv->contexts[IWL_RXON_CTX_BSS].wep_key_cmd = REPLY_WEPKEY;
3256 priv->contexts[IWL_RXON_CTX_BSS].ac_to_fifo = iwl4965_bss_ac_to_fifo;
3257 priv->contexts[IWL_RXON_CTX_BSS].ac_to_queue = iwl4965_bss_ac_to_queue;
3258 priv->contexts[IWL_RXON_CTX_BSS].exclusive_interface_modes =
3259 BIT(NL80211_IFTYPE_ADHOC);
3260 priv->contexts[IWL_RXON_CTX_BSS].interface_modes =
3261 BIT(NL80211_IFTYPE_STATION);
3262 priv->contexts[IWL_RXON_CTX_BSS].ap_devtype = RXON_DEV_TYPE_AP;
3263 priv->contexts[IWL_RXON_CTX_BSS].ibss_devtype = RXON_DEV_TYPE_IBSS;
3264 priv->contexts[IWL_RXON_CTX_BSS].station_devtype = RXON_DEV_TYPE_ESS;
3265 priv->contexts[IWL_RXON_CTX_BSS].unused_devtype = RXON_DEV_TYPE_ESS;
3266
3267 BUILD_BUG_ON(NUM_IWL_RXON_CTX != 1);
3268
3269 SET_IEEE80211_DEV(hw, &pdev->dev);
3270
3271 IWL_DEBUG_INFO(priv, "*** LOAD DRIVER ***\n");
3272 priv->cfg = cfg;
3273 priv->pci_dev = pdev;
3274 priv->inta_mask = CSR_INI_SET_MASK;
3275
3276 if (iwl_legacy_alloc_traffic_mem(priv))
3277 IWL_ERR(priv, "Not enough memory to generate traffic log\n");
3278
3279 /**************************
3280 * 2. Initializing PCI bus
3281 **************************/
3282 pci_disable_link_state(pdev, PCIE_LINK_STATE_L0S | PCIE_LINK_STATE_L1 |
3283 PCIE_LINK_STATE_CLKPM);
3284
3285 if (pci_enable_device(pdev)) {
3286 err = -ENODEV;
3287 goto out_ieee80211_free_hw;
3288 }
3289
3290 pci_set_master(pdev);
3291
3292 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(36));
3293 if (!err)
3294 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(36));
3295 if (err) {
3296 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
3297 if (!err)
3298 err = pci_set_consistent_dma_mask(pdev,
3299 DMA_BIT_MASK(32));
3300 /* both attempts failed: */
3301 if (err) {
3302 IWL_WARN(priv, "No suitable DMA available.\n");
3303 goto out_pci_disable_device;
3304 }
3305 }
3306
3307 err = pci_request_regions(pdev, DRV_NAME);
3308 if (err)
3309 goto out_pci_disable_device;
3310
3311 pci_set_drvdata(pdev, priv);
3312
3313
3314 /***********************
3315 * 3. Read REV register
3316 ***********************/
3317 priv->hw_base = pci_iomap(pdev, 0, 0);
3318 if (!priv->hw_base) {
3319 err = -ENODEV;
3320 goto out_pci_release_regions;
3321 }
3322
3323 IWL_DEBUG_INFO(priv, "pci_resource_len = 0x%08llx\n",
3324 (unsigned long long) pci_resource_len(pdev, 0));
3325 IWL_DEBUG_INFO(priv, "pci_resource_base = %p\n", priv->hw_base);
3326
3327 /* these spin locks will be used in apm_ops.init and EEPROM access
3328 * we should init now
3329 */
3330 spin_lock_init(&priv->reg_lock);
3331 spin_lock_init(&priv->lock);
3332
3333 /*
3334 * stop and reset the on-board processor just in case it is in a
3335 * strange state ... like being left stranded by a primary kernel
3336 * and this is now the kdump kernel trying to start up
3337 */
3338 iwl_write32(priv, CSR_RESET, CSR_RESET_REG_FLAG_NEVO_RESET);
3339
3340 iwl4965_hw_detect(priv);
3341 IWL_INFO(priv, "Detected %s, REV=0x%X\n",
3342 priv->cfg->name, priv->hw_rev);
3343
3344 /* We disable the RETRY_TIMEOUT register (0x41) to keep
3345 * PCI Tx retries from interfering with C3 CPU state */
3346 pci_write_config_byte(pdev, PCI_CFG_RETRY_TIMEOUT, 0x00);
3347
3348 iwl4965_prepare_card_hw(priv);
3349 if (!priv->hw_ready) {
3350 IWL_WARN(priv, "Failed, HW not ready\n");
3351 goto out_iounmap;
3352 }
3353
3354 /*****************
3355 * 4. Read EEPROM
3356 *****************/
3357 /* Read the EEPROM */
3358 err = iwl_legacy_eeprom_init(priv);
3359 if (err) {
3360 IWL_ERR(priv, "Unable to init EEPROM\n");
3361 goto out_iounmap;
3362 }
3363 err = iwl4965_eeprom_check_version(priv);
3364 if (err)
3365 goto out_free_eeprom;
3366
3367 if (err)
3368 goto out_free_eeprom;
3369
3370 /* extract MAC Address */
3371 iwl4965_eeprom_get_mac(priv, priv->addresses[0].addr);
3372 IWL_DEBUG_INFO(priv, "MAC address: %pM\n", priv->addresses[0].addr);
3373 priv->hw->wiphy->addresses = priv->addresses;
3374 priv->hw->wiphy->n_addresses = 1;
3375
3376 /************************
3377 * 5. Setup HW constants
3378 ************************/
3379 if (iwl4965_set_hw_params(priv)) {
3380 IWL_ERR(priv, "failed to set hw parameters\n");
3381 goto out_free_eeprom;
3382 }
3383
3384 /*******************
3385 * 6. Setup priv
3386 *******************/
3387
3388 err = iwl4965_init_drv(priv);
3389 if (err)
3390 goto out_free_eeprom;
3391 /* At this point both hw and priv are initialized. */
3392
3393 /********************
3394 * 7. Setup services
3395 ********************/
3396 spin_lock_irqsave(&priv->lock, flags);
3397 iwl_legacy_disable_interrupts(priv);
3398 spin_unlock_irqrestore(&priv->lock, flags);
3399
3400 pci_enable_msi(priv->pci_dev);
3401
3402 err = request_irq(priv->pci_dev->irq, iwl_legacy_isr,
3403 IRQF_SHARED, DRV_NAME, priv);
3404 if (err) {
3405 IWL_ERR(priv, "Error allocating IRQ %d\n", priv->pci_dev->irq);
3406 goto out_disable_msi;
3407 }
3408
3409 iwl4965_setup_deferred_work(priv);
3410 iwl4965_setup_rx_handlers(priv);
3411
3412 /*********************************************
3413 * 8. Enable interrupts and read RFKILL state
3414 *********************************************/
3415
3416 /* enable interrupts if needed: hw bug w/a */
3417 pci_read_config_word(priv->pci_dev, PCI_COMMAND, &pci_cmd);
3418 if (pci_cmd & PCI_COMMAND_INTX_DISABLE) {
3419 pci_cmd &= ~PCI_COMMAND_INTX_DISABLE;
3420 pci_write_config_word(priv->pci_dev, PCI_COMMAND, pci_cmd);
3421 }
3422
3423 iwl_legacy_enable_interrupts(priv);
3424
3425 /* If platform's RF_KILL switch is NOT set to KILL */
3426 if (iwl_read32(priv, CSR_GP_CNTRL) &
3427 CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW)
3428 clear_bit(STATUS_RF_KILL_HW, &priv->status);
3429 else
3430 set_bit(STATUS_RF_KILL_HW, &priv->status);
3431
3432 wiphy_rfkill_set_hw_state(priv->hw->wiphy,
3433 test_bit(STATUS_RF_KILL_HW, &priv->status));
3434
3435 iwl_legacy_power_initialize(priv);
3436
3437 init_completion(&priv->_4965.firmware_loading_complete);
3438
3439 err = iwl4965_request_firmware(priv, true);
3440 if (err)
3441 goto out_destroy_workqueue;
3442
3443 return 0;
3444
3445 out_destroy_workqueue:
3446 destroy_workqueue(priv->workqueue);
3447 priv->workqueue = NULL;
3448 free_irq(priv->pci_dev->irq, priv);
3449 out_disable_msi:
3450 pci_disable_msi(priv->pci_dev);
3451 iwl4965_uninit_drv(priv);
3452 out_free_eeprom:
3453 iwl_legacy_eeprom_free(priv);
3454 out_iounmap:
3455 pci_iounmap(pdev, priv->hw_base);
3456 out_pci_release_regions:
3457 pci_set_drvdata(pdev, NULL);
3458 pci_release_regions(pdev);
3459 out_pci_disable_device:
3460 pci_disable_device(pdev);
3461 out_ieee80211_free_hw:
3462 iwl_legacy_free_traffic_mem(priv);
3463 ieee80211_free_hw(priv->hw);
3464 out:
3465 return err;
3466}
3467
3468static void __devexit iwl4965_pci_remove(struct pci_dev *pdev)
3469{
3470 struct iwl_priv *priv = pci_get_drvdata(pdev);
3471 unsigned long flags;
3472
3473 if (!priv)
3474 return;
3475
3476 wait_for_completion(&priv->_4965.firmware_loading_complete);
3477
3478 IWL_DEBUG_INFO(priv, "*** UNLOAD DRIVER ***\n");
3479
3480 iwl_legacy_dbgfs_unregister(priv);
3481 sysfs_remove_group(&pdev->dev.kobj, &iwl_attribute_group);
3482
3483 /* ieee80211_unregister_hw call wil cause iwl_mac_stop to
3484 * to be called and iwl4965_down since we are removing the device
3485 * we need to set STATUS_EXIT_PENDING bit.
3486 */
3487 set_bit(STATUS_EXIT_PENDING, &priv->status);
3488
3489 iwl_legacy_leds_exit(priv);
3490
3491 if (priv->mac80211_registered) {
3492 ieee80211_unregister_hw(priv->hw);
3493 priv->mac80211_registered = 0;
3494 } else {
3495 iwl4965_down(priv);
3496 }
3497
3498 /*
3499 * Make sure device is reset to low power before unloading driver.
3500 * This may be redundant with iwl4965_down(), but there are paths to
3501 * run iwl4965_down() without calling apm_ops.stop(), and there are
3502 * paths to avoid running iwl4965_down() at all before leaving driver.
3503 * This (inexpensive) call *makes sure* device is reset.
3504 */
3505 iwl_legacy_apm_stop(priv);
3506
3507 /* make sure we flush any pending irq or
3508 * tasklet for the driver
3509 */
3510 spin_lock_irqsave(&priv->lock, flags);
3511 iwl_legacy_disable_interrupts(priv);
3512 spin_unlock_irqrestore(&priv->lock, flags);
3513
3514 iwl4965_synchronize_irq(priv);
3515
3516 iwl4965_dealloc_ucode_pci(priv);
3517
3518 if (priv->rxq.bd)
3519 iwl4965_rx_queue_free(priv, &priv->rxq);
3520 iwl4965_hw_txq_ctx_free(priv);
3521
3522 iwl_legacy_eeprom_free(priv);
3523
3524
3525 /*netif_stop_queue(dev); */
3526 flush_workqueue(priv->workqueue);
3527
3528 /* ieee80211_unregister_hw calls iwl_mac_stop, which flushes
3529 * priv->workqueue... so we can't take down the workqueue
3530 * until now... */
3531 destroy_workqueue(priv->workqueue);
3532 priv->workqueue = NULL;
3533 iwl_legacy_free_traffic_mem(priv);
3534
3535 free_irq(priv->pci_dev->irq, priv);
3536 pci_disable_msi(priv->pci_dev);
3537 pci_iounmap(pdev, priv->hw_base);
3538 pci_release_regions(pdev);
3539 pci_disable_device(pdev);
3540 pci_set_drvdata(pdev, NULL);
3541
3542 iwl4965_uninit_drv(priv);
3543
3544 dev_kfree_skb(priv->beacon_skb);
3545
3546 ieee80211_free_hw(priv->hw);
3547}
3548
3549/*
3550 * Activate/Deactivate Tx DMA/FIFO channels according tx fifos mask
3551 * must be called under priv->lock and mac access
3552 */
3553void iwl4965_txq_set_sched(struct iwl_priv *priv, u32 mask)
3554{
3555 iwl_legacy_write_prph(priv, IWL49_SCD_TXFACT, mask);
3556}
3557
3558/*****************************************************************************
3559 *
3560 * driver and module entry point
3561 *
3562 *****************************************************************************/
3563
3564/* Hardware specific file defines the PCI IDs table for that hardware module */
3565static DEFINE_PCI_DEVICE_TABLE(iwl4965_hw_card_ids) = {
3566#if defined(CONFIG_IWL4965_MODULE) || defined(CONFIG_IWL4965)
3567 {IWL_PCI_DEVICE(0x4229, PCI_ANY_ID, iwl4965_cfg)},
3568 {IWL_PCI_DEVICE(0x4230, PCI_ANY_ID, iwl4965_cfg)},
3569#endif /* CONFIG_IWL4965 */
3570
3571 {0}
3572};
3573MODULE_DEVICE_TABLE(pci, iwl4965_hw_card_ids);
3574
3575static struct pci_driver iwl4965_driver = {
3576 .name = DRV_NAME,
3577 .id_table = iwl4965_hw_card_ids,
3578 .probe = iwl4965_pci_probe,
3579 .remove = __devexit_p(iwl4965_pci_remove),
3580 .driver.pm = IWL_LEGACY_PM_OPS,
3581};
3582
3583static int __init iwl4965_init(void)
3584{
3585
3586 int ret;
3587 pr_info(DRV_DESCRIPTION ", " DRV_VERSION "\n");
3588 pr_info(DRV_COPYRIGHT "\n");
3589
3590 ret = iwl4965_rate_control_register();
3591 if (ret) {
3592 pr_err("Unable to register rate control algorithm: %d\n", ret);
3593 return ret;
3594 }
3595
3596 ret = pci_register_driver(&iwl4965_driver);
3597 if (ret) {
3598 pr_err("Unable to initialize PCI module\n");
3599 goto error_register;
3600 }
3601
3602 return ret;
3603
3604error_register:
3605 iwl4965_rate_control_unregister();
3606 return ret;
3607}
3608
3609static void __exit iwl4965_exit(void)
3610{
3611 pci_unregister_driver(&iwl4965_driver);
3612 iwl4965_rate_control_unregister();
3613}
3614
3615module_exit(iwl4965_exit);
3616module_init(iwl4965_init);
3617
3618#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
3619module_param_named(debug, iwl_debug_level, uint, S_IRUGO | S_IWUSR);
3620MODULE_PARM_DESC(debug, "debug output mask");
3621#endif
3622
3623module_param_named(swcrypto, iwl4965_mod_params.sw_crypto, int, S_IRUGO);
3624MODULE_PARM_DESC(swcrypto, "using crypto in software (default 0 [hardware])");
3625module_param_named(queues_num, iwl4965_mod_params.num_of_queues, int, S_IRUGO);
3626MODULE_PARM_DESC(queues_num, "number of hw queues.");
3627module_param_named(11n_disable, iwl4965_mod_params.disable_11n, int, S_IRUGO);
3628MODULE_PARM_DESC(11n_disable, "disable 11n functionality");
3629module_param_named(amsdu_size_8K, iwl4965_mod_params.amsdu_size_8K,
3630 int, S_IRUGO);
3631MODULE_PARM_DESC(amsdu_size_8K, "enable 8K amsdu size");
3632module_param_named(fw_restart, iwl4965_mod_params.restart_fw, int, S_IRUGO);
3633MODULE_PARM_DESC(fw_restart, "restart firmware in case of error");
diff --git a/drivers/net/wireless/iwlwifi/Kconfig b/drivers/net/wireless/iwlwifi/Kconfig
index e1e3b1cf3cff..17d555f2215a 100644
--- a/drivers/net/wireless/iwlwifi/Kconfig
+++ b/drivers/net/wireless/iwlwifi/Kconfig
@@ -1,18 +1,52 @@
1config IWLWIFI 1config IWLAGN
2 tristate "Intel Wireless Wifi" 2 tristate "Intel Wireless WiFi Next Gen AGN - Wireless-N/Advanced-N/Ultimate-N (iwlagn) "
3 depends on PCI && MAC80211 3 depends on PCI && MAC80211
4 select FW_LOADER 4 select FW_LOADER
5 select NEW_LEDS 5 select NEW_LEDS
6 select LEDS_CLASS 6 select LEDS_CLASS
7 select LEDS_TRIGGERS 7 select LEDS_TRIGGERS
8 select MAC80211_LEDS 8 select MAC80211_LEDS
9 ---help---
10 Select to build the driver supporting the:
11
12 Intel Wireless WiFi Link Next-Gen AGN
13
14 This option enables support for use with the following hardware:
15 Intel Wireless WiFi Link 6250AGN Adapter
16 Intel 6000 Series Wi-Fi Adapters (6200AGN and 6300AGN)
17 Intel WiFi Link 1000BGN
18 Intel Wireless WiFi 5150AGN
19 Intel Wireless WiFi 5100AGN, 5300AGN, and 5350AGN
20 Intel 6005 Series Wi-Fi Adapters
21 Intel 6030 Series Wi-Fi Adapters
22 Intel Wireless WiFi Link 6150BGN 2 Adapter
23 Intel 100 Series Wi-Fi Adapters (100BGN and 130BGN)
24 Intel 2000 Series Wi-Fi Adapters
25
26
27 This driver uses the kernel's mac80211 subsystem.
28
29 In order to use this driver, you will need a microcode (uCode)
30 image for it. You can obtain the microcode from:
31
32 <http://intellinuxwireless.org/>.
33
34 The microcode is typically installed in /lib/firmware. You can
35 look in the hotplug script /etc/hotplug/firmware.agent to
36 determine which directory FIRMWARE_DIR is set to when the script
37 runs.
38
39 If you want to compile the driver as a module ( = code which can be
40 inserted in and removed from the running kernel whenever you want),
41 say M here and read <file:Documentation/kbuild/modules.txt>. The
42 module will be called iwlagn.
9 43
10menu "Debugging Options" 44menu "Debugging Options"
11 depends on IWLWIFI 45 depends on IWLAGN
12 46
13config IWLWIFI_DEBUG 47config IWLWIFI_DEBUG
14 bool "Enable full debugging output in iwlagn and iwl3945 drivers" 48 bool "Enable full debugging output in the iwlagn driver"
15 depends on IWLWIFI 49 depends on IWLAGN
16 ---help--- 50 ---help---
17 This option will enable debug tracing output for the iwlwifi drivers 51 This option will enable debug tracing output for the iwlwifi drivers
18 52
@@ -37,7 +71,7 @@ config IWLWIFI_DEBUG
37 71
38config IWLWIFI_DEBUGFS 72config IWLWIFI_DEBUGFS
39 bool "iwlagn debugfs support" 73 bool "iwlagn debugfs support"
40 depends on IWLWIFI && MAC80211_DEBUGFS 74 depends on IWLAGN && MAC80211_DEBUGFS
41 ---help--- 75 ---help---
42 Enable creation of debugfs files for the iwlwifi drivers. This 76 Enable creation of debugfs files for the iwlwifi drivers. This
43 is a low-impact option that allows getting insight into the 77 is a low-impact option that allows getting insight into the
@@ -45,13 +79,13 @@ config IWLWIFI_DEBUGFS
45 79
46config IWLWIFI_DEBUG_EXPERIMENTAL_UCODE 80config IWLWIFI_DEBUG_EXPERIMENTAL_UCODE
47 bool "Experimental uCode support" 81 bool "Experimental uCode support"
48 depends on IWLWIFI && IWLWIFI_DEBUG 82 depends on IWLAGN && IWLWIFI_DEBUG
49 ---help--- 83 ---help---
50 Enable use of experimental ucode for testing and debugging. 84 Enable use of experimental ucode for testing and debugging.
51 85
52config IWLWIFI_DEVICE_TRACING 86config IWLWIFI_DEVICE_TRACING
53 bool "iwlwifi device access tracing" 87 bool "iwlwifi device access tracing"
54 depends on IWLWIFI 88 depends on IWLAGN
55 depends on EVENT_TRACING 89 depends on EVENT_TRACING
56 help 90 help
57 Say Y here to trace all commands, including TX frames and IO 91 Say Y here to trace all commands, including TX frames and IO
@@ -68,57 +102,9 @@ config IWLWIFI_DEVICE_TRACING
68 occur. 102 occur.
69endmenu 103endmenu
70 104
71config IWLAGN
72 tristate "Intel Wireless WiFi Next Gen AGN (iwlagn)"
73 depends on IWLWIFI
74 ---help---
75 Select to build the driver supporting the:
76
77 Intel Wireless WiFi Link Next-Gen AGN
78
79 This driver uses the kernel's mac80211 subsystem.
80
81 In order to use this driver, you will need a microcode (uCode)
82 image for it. You can obtain the microcode from:
83
84 <http://intellinuxwireless.org/>.
85
86 The microcode is typically installed in /lib/firmware. You can
87 look in the hotplug script /etc/hotplug/firmware.agent to
88 determine which directory FIRMWARE_DIR is set to when the script
89 runs.
90
91 If you want to compile the driver as a module ( = code which can be
92 inserted in and removed from the running kernel whenever you want),
93 say M here and read <file:Documentation/kbuild/modules.txt>. The
94 module will be called iwlagn.
95
96
97config IWL4965
98 bool "Intel Wireless WiFi 4965AGN"
99 depends on IWLAGN
100 ---help---
101 This option enables support for Intel Wireless WiFi Link 4965AGN
102
103config IWL5000
104 bool "Intel Wireless-N/Advanced-N/Ultimate-N WiFi Link"
105 depends on IWLAGN
106 ---help---
107 This option enables support for use with the following hardware:
108 Intel Wireless WiFi Link 6250AGN Adapter
109 Intel 6000 Series Wi-Fi Adapters (6200AGN and 6300AGN)
110 Intel WiFi Link 1000BGN
111 Intel Wireless WiFi 5150AGN
112 Intel Wireless WiFi 5100AGN, 5300AGN, and 5350AGN
113 Intel 6005 Series Wi-Fi Adapters
114 Intel 6030 Series Wi-Fi Adapters
115 Intel Wireless WiFi Link 6150BGN 2 Adapter
116 Intel 100 Series Wi-Fi Adapters (100BGN and 130BGN)
117 Intel 2000 Series Wi-Fi Adapters
118
119config IWL_P2P 105config IWL_P2P
120 bool "iwlwifi experimental P2P support" 106 bool "iwlwifi experimental P2P support"
121 depends on IWL5000 107 depends on IWLAGN
122 help 108 help
123 This option enables experimental P2P support for some devices 109 This option enables experimental P2P support for some devices
124 based on microcode support. Since P2P support is still under 110 based on microcode support. Since P2P support is still under
@@ -132,27 +118,3 @@ config IWL_P2P
132 118
133 Say Y only if you want to experiment with P2P. 119 Say Y only if you want to experiment with P2P.
134 120
135config IWL3945
136 tristate "Intel PRO/Wireless 3945ABG/BG Network Connection (iwl3945)"
137 depends on IWLWIFI
138 ---help---
139 Select to build the driver supporting the:
140
141 Intel PRO/Wireless 3945ABG/BG Network Connection
142
143 This driver uses the kernel's mac80211 subsystem.
144
145 In order to use this driver, you will need a microcode (uCode)
146 image for it. You can obtain the microcode from:
147
148 <http://intellinuxwireless.org/>.
149
150 The microcode is typically installed in /lib/firmware. You can
151 look in the hotplug script /etc/hotplug/firmware.agent to
152 determine which directory FIRMWARE_DIR is set to when the script
153 runs.
154
155 If you want to compile the driver as a module ( = code which can be
156 inserted in and removed from the running kernel whenever you want),
157 say M here and read <file:Documentation/kbuild/modules.txt>. The
158 module will be called iwl3945.
diff --git a/drivers/net/wireless/iwlwifi/Makefile b/drivers/net/wireless/iwlwifi/Makefile
index 25be742c69c9..aab7d15bd5ed 100644
--- a/drivers/net/wireless/iwlwifi/Makefile
+++ b/drivers/net/wireless/iwlwifi/Makefile
@@ -1,36 +1,23 @@
1obj-$(CONFIG_IWLWIFI) += iwlcore.o
2iwlcore-objs := iwl-core.o iwl-eeprom.o iwl-hcmd.o iwl-power.o
3iwlcore-objs += iwl-rx.o iwl-tx.o iwl-sta.o
4iwlcore-objs += iwl-scan.o iwl-led.o
5iwlcore-$(CONFIG_IWL3945) += iwl-legacy.o
6iwlcore-$(CONFIG_IWL4965) += iwl-legacy.o
7iwlcore-$(CONFIG_IWLWIFI_DEBUGFS) += iwl-debugfs.o
8iwlcore-$(CONFIG_IWLWIFI_DEVICE_TRACING) += iwl-devtrace.o
9
10# If 3945 is selected only, iwl-legacy.o will be added
11# to iwlcore-m above, but it needs to be built in.
12iwlcore-objs += $(iwlcore-m)
13
14CFLAGS_iwl-devtrace.o := -I$(src)
15
16# AGN 1# AGN
17obj-$(CONFIG_IWLAGN) += iwlagn.o 2obj-$(CONFIG_IWLAGN) += iwlagn.o
18iwlagn-objs := iwl-agn.o iwl-agn-rs.o iwl-agn-led.o 3iwlagn-objs := iwl-agn.o iwl-agn-rs.o iwl-agn-led.o
19iwlagn-objs += iwl-agn-ucode.o iwl-agn-tx.o 4iwlagn-objs += iwl-agn-ucode.o iwl-agn-tx.o
20iwlagn-objs += iwl-agn-lib.o iwl-agn-rx.o iwl-agn-calib.o 5iwlagn-objs += iwl-agn-lib.o iwl-agn-rx.o iwl-agn-calib.o
21iwlagn-objs += iwl-agn-tt.o iwl-agn-sta.o iwl-agn-eeprom.o 6iwlagn-objs += iwl-agn-tt.o iwl-agn-sta.o iwl-agn-eeprom.o
22iwlagn-$(CONFIG_IWLWIFI_DEBUGFS) += iwl-agn-debugfs.o
23 7
24iwlagn-$(CONFIG_IWL4965) += iwl-4965.o 8iwlagn-objs += iwl-core.o iwl-eeprom.o iwl-hcmd.o iwl-power.o
25iwlagn-$(CONFIG_IWL5000) += iwl-agn-rxon.o iwl-agn-hcmd.o iwl-agn-ict.o 9iwlagn-objs += iwl-rx.o iwl-tx.o iwl-sta.o
26iwlagn-$(CONFIG_IWL5000) += iwl-5000.o 10iwlagn-objs += iwl-scan.o iwl-led.o
27iwlagn-$(CONFIG_IWL5000) += iwl-6000.o 11iwlagn-objs += iwl-agn-rxon.o iwl-agn-hcmd.o iwl-agn-ict.o
28iwlagn-$(CONFIG_IWL5000) += iwl-1000.o 12iwlagn-objs += iwl-5000.o
29iwlagn-$(CONFIG_IWL5000) += iwl-2000.o 13iwlagn-objs += iwl-6000.o
14iwlagn-objs += iwl-1000.o
15iwlagn-objs += iwl-2000.o
16
17iwlagn-$(CONFIG_IWLWIFI_DEBUGFS) += iwl-agn-debugfs.o
18iwlagn-$(CONFIG_IWLWIFI_DEBUGFS) += iwl-debugfs.o
19iwlagn-$(CONFIG_IWLWIFI_DEVICE_TRACING) += iwl-devtrace.o
30 20
31# 3945 21CFLAGS_iwl-devtrace.o := -I$(src)
32obj-$(CONFIG_IWL3945) += iwl3945.o
33iwl3945-objs := iwl3945-base.o iwl-3945.o iwl-3945-rs.o iwl-3945-led.o
34iwl3945-$(CONFIG_IWLWIFI_DEBUGFS) += iwl-3945-debugfs.o
35 22
36ccflags-y += -D__CHECK_ENDIAN__ 23ccflags-y += -D__CHECK_ENDIAN__
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn.c b/drivers/net/wireless/iwlwifi/iwl-agn.c
index 9965215697bb..d08fa938501a 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn.c
+++ b/drivers/net/wireless/iwlwifi/iwl-agn.c
@@ -86,7 +86,6 @@ MODULE_DESCRIPTION(DRV_DESCRIPTION);
86MODULE_VERSION(DRV_VERSION); 86MODULE_VERSION(DRV_VERSION);
87MODULE_AUTHOR(DRV_COPYRIGHT " " DRV_AUTHOR); 87MODULE_AUTHOR(DRV_COPYRIGHT " " DRV_AUTHOR);
88MODULE_LICENSE("GPL"); 88MODULE_LICENSE("GPL");
89MODULE_ALIAS("iwl4965");
90 89
91static int iwlagn_ant_coupling; 90static int iwlagn_ant_coupling;
92static bool iwlagn_bt_ch_announce = 1; 91static bool iwlagn_bt_ch_announce = 1;
@@ -3810,7 +3809,6 @@ static void iwlagn_bg_roc_done(struct work_struct *work)
3810 mutex_unlock(&priv->mutex); 3809 mutex_unlock(&priv->mutex);
3811} 3810}
3812 3811
3813#ifdef CONFIG_IWL5000
3814static int iwl_mac_remain_on_channel(struct ieee80211_hw *hw, 3812static int iwl_mac_remain_on_channel(struct ieee80211_hw *hw,
3815 struct ieee80211_channel *channel, 3813 struct ieee80211_channel *channel,
3816 enum nl80211_channel_type channel_type, 3814 enum nl80211_channel_type channel_type,
@@ -3866,7 +3864,6 @@ static int iwl_mac_cancel_remain_on_channel(struct ieee80211_hw *hw)
3866 3864
3867 return 0; 3865 return 0;
3868} 3866}
3869#endif
3870 3867
3871/***************************************************************************** 3868/*****************************************************************************
3872 * 3869 *
@@ -4036,7 +4033,6 @@ static void iwl_uninit_drv(struct iwl_priv *priv)
4036 kfree(priv->scan_cmd); 4033 kfree(priv->scan_cmd);
4037} 4034}
4038 4035
4039#ifdef CONFIG_IWL5000
4040struct ieee80211_ops iwlagn_hw_ops = { 4036struct ieee80211_ops iwlagn_hw_ops = {
4041 .tx = iwlagn_mac_tx, 4037 .tx = iwlagn_mac_tx,
4042 .start = iwlagn_mac_start, 4038 .start = iwlagn_mac_start,
@@ -4061,7 +4057,6 @@ struct ieee80211_ops iwlagn_hw_ops = {
4061 .remain_on_channel = iwl_mac_remain_on_channel, 4057 .remain_on_channel = iwl_mac_remain_on_channel,
4062 .cancel_remain_on_channel = iwl_mac_cancel_remain_on_channel, 4058 .cancel_remain_on_channel = iwl_mac_cancel_remain_on_channel,
4063}; 4059};
4064#endif
4065 4060
4066static void iwl_hw_detect(struct iwl_priv *priv) 4061static void iwl_hw_detect(struct iwl_priv *priv)
4067{ 4062{
@@ -4129,12 +4124,7 @@ static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
4129 if (cfg->mod_params->disable_hw_scan) { 4124 if (cfg->mod_params->disable_hw_scan) {
4130 dev_printk(KERN_DEBUG, &(pdev->dev), 4125 dev_printk(KERN_DEBUG, &(pdev->dev),
4131 "sw scan support is deprecated\n"); 4126 "sw scan support is deprecated\n");
4132#ifdef CONFIG_IWL5000
4133 iwlagn_hw_ops.hw_scan = NULL; 4127 iwlagn_hw_ops.hw_scan = NULL;
4134#endif
4135#ifdef CONFIG_IWL4965
4136 iwl4965_hw_ops.hw_scan = NULL;
4137#endif
4138 } 4128 }
4139 4129
4140 hw = iwl_alloc_all(cfg); 4130 hw = iwl_alloc_all(cfg);
@@ -4513,12 +4503,6 @@ static void __devexit iwl_pci_remove(struct pci_dev *pdev)
4513 4503
4514/* Hardware specific file defines the PCI IDs table for that hardware module */ 4504/* Hardware specific file defines the PCI IDs table for that hardware module */
4515static DEFINE_PCI_DEVICE_TABLE(iwl_hw_card_ids) = { 4505static DEFINE_PCI_DEVICE_TABLE(iwl_hw_card_ids) = {
4516#ifdef CONFIG_IWL4965
4517 {IWL_PCI_DEVICE(0x4229, PCI_ANY_ID, iwl4965_agn_cfg)},
4518 {IWL_PCI_DEVICE(0x4230, PCI_ANY_ID, iwl4965_agn_cfg)},
4519#endif /* CONFIG_IWL4965 */
4520#ifdef CONFIG_IWL5000
4521/* 5100 Series WiFi */
4522 {IWL_PCI_DEVICE(0x4232, 0x1201, iwl5100_agn_cfg)}, /* Mini Card */ 4506 {IWL_PCI_DEVICE(0x4232, 0x1201, iwl5100_agn_cfg)}, /* Mini Card */
4523 {IWL_PCI_DEVICE(0x4232, 0x1301, iwl5100_agn_cfg)}, /* Half Mini Card */ 4507 {IWL_PCI_DEVICE(0x4232, 0x1301, iwl5100_agn_cfg)}, /* Half Mini Card */
4524 {IWL_PCI_DEVICE(0x4232, 0x1204, iwl5100_agn_cfg)}, /* Mini Card */ 4508 {IWL_PCI_DEVICE(0x4232, 0x1204, iwl5100_agn_cfg)}, /* Mini Card */
@@ -4704,8 +4688,6 @@ static DEFINE_PCI_DEVICE_TABLE(iwl_hw_card_ids) = {
4704 {IWL_PCI_DEVICE(0x0893, 0x0266, iwl230_bg_cfg)}, 4688 {IWL_PCI_DEVICE(0x0893, 0x0266, iwl230_bg_cfg)},
4705 {IWL_PCI_DEVICE(0x0892, 0x0466, iwl230_bg_cfg)}, 4689 {IWL_PCI_DEVICE(0x0892, 0x0466, iwl230_bg_cfg)},
4706 4690
4707#endif /* CONFIG_IWL5000 */
4708
4709 {0} 4691 {0}
4710}; 4692};
4711MODULE_DEVICE_TABLE(pci, iwl_hw_card_ids); 4693MODULE_DEVICE_TABLE(pci, iwl_hw_card_ids);
diff --git a/drivers/net/wireless/iwlwifi/iwl-core.c b/drivers/net/wireless/iwlwifi/iwl-core.c
index 977ddfb8c24c..4bd342060254 100644
--- a/drivers/net/wireless/iwlwifi/iwl-core.c
+++ b/drivers/net/wireless/iwlwifi/iwl-core.c
@@ -43,11 +43,6 @@
43#include "iwl-helpers.h" 43#include "iwl-helpers.h"
44 44
45 45
46MODULE_DESCRIPTION("iwl core");
47MODULE_VERSION(IWLWIFI_VERSION);
48MODULE_AUTHOR(DRV_COPYRIGHT " " DRV_AUTHOR);
49MODULE_LICENSE("GPL");
50
51/* 46/*
52 * set bt_coex_active to true, uCode will do kill/defer 47 * set bt_coex_active to true, uCode will do kill/defer
53 * every time the priority line is asserted (BT is sending signals on the 48 * every time the priority line is asserted (BT is sending signals on the
@@ -65,15 +60,12 @@ MODULE_LICENSE("GPL");
65 * default: bt_coex_active = true (BT_COEX_ENABLE) 60 * default: bt_coex_active = true (BT_COEX_ENABLE)
66 */ 61 */
67bool bt_coex_active = true; 62bool bt_coex_active = true;
68EXPORT_SYMBOL_GPL(bt_coex_active);
69module_param(bt_coex_active, bool, S_IRUGO); 63module_param(bt_coex_active, bool, S_IRUGO);
70MODULE_PARM_DESC(bt_coex_active, "enable wifi/bluetooth co-exist"); 64MODULE_PARM_DESC(bt_coex_active, "enable wifi/bluetooth co-exist");
71 65
72u32 iwl_debug_level; 66u32 iwl_debug_level;
73EXPORT_SYMBOL(iwl_debug_level);
74 67
75const u8 iwl_bcast_addr[ETH_ALEN] = { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF }; 68const u8 iwl_bcast_addr[ETH_ALEN] = { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF };
76EXPORT_SYMBOL(iwl_bcast_addr);
77 69
78 70
79/* This function both allocates and initializes hw and priv. */ 71/* This function both allocates and initializes hw and priv. */
@@ -98,7 +90,6 @@ struct ieee80211_hw *iwl_alloc_all(struct iwl_cfg *cfg)
98out: 90out:
99 return hw; 91 return hw;
100} 92}
101EXPORT_SYMBOL(iwl_alloc_all);
102 93
103#define MAX_BIT_RATE_40_MHZ 150 /* Mbps */ 94#define MAX_BIT_RATE_40_MHZ 150 /* Mbps */
104#define MAX_BIT_RATE_20_MHZ 72 /* Mbps */ 95#define MAX_BIT_RATE_20_MHZ 72 /* Mbps */
@@ -272,7 +263,6 @@ int iwlcore_init_geos(struct iwl_priv *priv)
272 263
273 return 0; 264 return 0;
274} 265}
275EXPORT_SYMBOL(iwlcore_init_geos);
276 266
277/* 267/*
278 * iwlcore_free_geos - undo allocations in iwlcore_init_geos 268 * iwlcore_free_geos - undo allocations in iwlcore_init_geos
@@ -283,7 +273,6 @@ void iwlcore_free_geos(struct iwl_priv *priv)
283 kfree(priv->ieee_rates); 273 kfree(priv->ieee_rates);
284 clear_bit(STATUS_GEO_CONFIGURED, &priv->status); 274 clear_bit(STATUS_GEO_CONFIGURED, &priv->status);
285} 275}
286EXPORT_SYMBOL(iwlcore_free_geos);
287 276
288static bool iwl_is_channel_extension(struct iwl_priv *priv, 277static bool iwl_is_channel_extension(struct iwl_priv *priv,
289 enum ieee80211_band band, 278 enum ieee80211_band band,
@@ -328,7 +317,6 @@ bool iwl_is_ht40_tx_allowed(struct iwl_priv *priv,
328 le16_to_cpu(ctx->staging.channel), 317 le16_to_cpu(ctx->staging.channel),
329 ctx->ht.extension_chan_offset); 318 ctx->ht.extension_chan_offset);
330} 319}
331EXPORT_SYMBOL(iwl_is_ht40_tx_allowed);
332 320
333static u16 iwl_adjust_beacon_interval(u16 beacon_val, u16 max_beacon_val) 321static u16 iwl_adjust_beacon_interval(u16 beacon_val, u16 max_beacon_val)
334{ 322{
@@ -429,7 +417,6 @@ int iwl_send_rxon_timing(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
429 return iwl_send_cmd_pdu(priv, ctx->rxon_timing_cmd, 417 return iwl_send_cmd_pdu(priv, ctx->rxon_timing_cmd,
430 sizeof(ctx->timing), &ctx->timing); 418 sizeof(ctx->timing), &ctx->timing);
431} 419}
432EXPORT_SYMBOL(iwl_send_rxon_timing);
433 420
434void iwl_set_rxon_hwcrypto(struct iwl_priv *priv, struct iwl_rxon_context *ctx, 421void iwl_set_rxon_hwcrypto(struct iwl_priv *priv, struct iwl_rxon_context *ctx,
435 int hw_decrypt) 422 int hw_decrypt)
@@ -442,7 +429,6 @@ void iwl_set_rxon_hwcrypto(struct iwl_priv *priv, struct iwl_rxon_context *ctx,
442 rxon->filter_flags |= RXON_FILTER_DIS_DECRYPT_MSK; 429 rxon->filter_flags |= RXON_FILTER_DIS_DECRYPT_MSK;
443 430
444} 431}
445EXPORT_SYMBOL(iwl_set_rxon_hwcrypto);
446 432
447/* validate RXON structure is valid */ 433/* validate RXON structure is valid */
448int iwl_check_rxon_cmd(struct iwl_priv *priv, struct iwl_rxon_context *ctx) 434int iwl_check_rxon_cmd(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
@@ -515,7 +501,6 @@ int iwl_check_rxon_cmd(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
515 } 501 }
516 return 0; 502 return 0;
517} 503}
518EXPORT_SYMBOL(iwl_check_rxon_cmd);
519 504
520/** 505/**
521 * iwl_full_rxon_required - check if full RXON (vs RXON_ASSOC) cmd is needed 506 * iwl_full_rxon_required - check if full RXON (vs RXON_ASSOC) cmd is needed
@@ -579,7 +564,6 @@ int iwl_full_rxon_required(struct iwl_priv *priv,
579 564
580 return 0; 565 return 0;
581} 566}
582EXPORT_SYMBOL(iwl_full_rxon_required);
583 567
584u8 iwl_rate_get_lowest_plcp(struct iwl_priv *priv, 568u8 iwl_rate_get_lowest_plcp(struct iwl_priv *priv,
585 struct iwl_rxon_context *ctx) 569 struct iwl_rxon_context *ctx)
@@ -593,7 +577,6 @@ u8 iwl_rate_get_lowest_plcp(struct iwl_priv *priv,
593 else 577 else
594 return IWL_RATE_6M_PLCP; 578 return IWL_RATE_6M_PLCP;
595} 579}
596EXPORT_SYMBOL(iwl_rate_get_lowest_plcp);
597 580
598static void _iwl_set_rxon_ht(struct iwl_priv *priv, 581static void _iwl_set_rxon_ht(struct iwl_priv *priv,
599 struct iwl_ht_config *ht_conf, 582 struct iwl_ht_config *ht_conf,
@@ -670,7 +653,6 @@ void iwl_set_rxon_ht(struct iwl_priv *priv, struct iwl_ht_config *ht_conf)
670 for_each_context(priv, ctx) 653 for_each_context(priv, ctx)
671 _iwl_set_rxon_ht(priv, ht_conf, ctx); 654 _iwl_set_rxon_ht(priv, ht_conf, ctx);
672} 655}
673EXPORT_SYMBOL(iwl_set_rxon_ht);
674 656
675/* Return valid, unused, channel for a passive scan to reset the RF */ 657/* Return valid, unused, channel for a passive scan to reset the RF */
676u8 iwl_get_single_channel_number(struct iwl_priv *priv, 658u8 iwl_get_single_channel_number(struct iwl_priv *priv,
@@ -711,7 +693,6 @@ u8 iwl_get_single_channel_number(struct iwl_priv *priv,
711 693
712 return channel; 694 return channel;
713} 695}
714EXPORT_SYMBOL(iwl_get_single_channel_number);
715 696
716/** 697/**
717 * iwl_set_rxon_channel - Set the band and channel values in staging RXON 698 * iwl_set_rxon_channel - Set the band and channel values in staging RXON
@@ -742,7 +723,6 @@ int iwl_set_rxon_channel(struct iwl_priv *priv, struct ieee80211_channel *ch,
742 723
743 return 0; 724 return 0;
744} 725}
745EXPORT_SYMBOL(iwl_set_rxon_channel);
746 726
747void iwl_set_flags_for_band(struct iwl_priv *priv, 727void iwl_set_flags_for_band(struct iwl_priv *priv,
748 struct iwl_rxon_context *ctx, 728 struct iwl_rxon_context *ctx,
@@ -766,7 +746,6 @@ void iwl_set_flags_for_band(struct iwl_priv *priv,
766 ctx->staging.flags &= ~RXON_FLG_CCK_MSK; 746 ctx->staging.flags &= ~RXON_FLG_CCK_MSK;
767 } 747 }
768} 748}
769EXPORT_SYMBOL(iwl_set_flags_for_band);
770 749
771/* 750/*
772 * initialize rxon structure with default values from eeprom 751 * initialize rxon structure with default values from eeprom
@@ -838,7 +817,6 @@ void iwl_connection_init_rx_config(struct iwl_priv *priv,
838 ctx->staging.ofdm_ht_dual_stream_basic_rates = 0xff; 817 ctx->staging.ofdm_ht_dual_stream_basic_rates = 0xff;
839 ctx->staging.ofdm_ht_triple_stream_basic_rates = 0xff; 818 ctx->staging.ofdm_ht_triple_stream_basic_rates = 0xff;
840} 819}
841EXPORT_SYMBOL(iwl_connection_init_rx_config);
842 820
843void iwl_set_rate(struct iwl_priv *priv) 821void iwl_set_rate(struct iwl_priv *priv)
844{ 822{
@@ -871,7 +849,6 @@ void iwl_set_rate(struct iwl_priv *priv)
871 (IWL_OFDM_BASIC_RATES_MASK >> IWL_FIRST_OFDM_RATE) & 0xFF; 849 (IWL_OFDM_BASIC_RATES_MASK >> IWL_FIRST_OFDM_RATE) & 0xFF;
872 } 850 }
873} 851}
874EXPORT_SYMBOL(iwl_set_rate);
875 852
876void iwl_chswitch_done(struct iwl_priv *priv, bool is_success) 853void iwl_chswitch_done(struct iwl_priv *priv, bool is_success)
877{ 854{
@@ -891,7 +868,6 @@ void iwl_chswitch_done(struct iwl_priv *priv, bool is_success)
891 mutex_unlock(&priv->mutex); 868 mutex_unlock(&priv->mutex);
892 } 869 }
893} 870}
894EXPORT_SYMBOL(iwl_chswitch_done);
895 871
896void iwl_rx_csa(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb) 872void iwl_rx_csa(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb)
897{ 873{
@@ -919,7 +895,6 @@ void iwl_rx_csa(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb)
919 } 895 }
920 } 896 }
921} 897}
922EXPORT_SYMBOL(iwl_rx_csa);
923 898
924#ifdef CONFIG_IWLWIFI_DEBUG 899#ifdef CONFIG_IWLWIFI_DEBUG
925void iwl_print_rx_config_cmd(struct iwl_priv *priv, 900void iwl_print_rx_config_cmd(struct iwl_priv *priv,
@@ -941,7 +916,6 @@ void iwl_print_rx_config_cmd(struct iwl_priv *priv,
941 IWL_DEBUG_RADIO(priv, "u8[6] bssid_addr: %pM\n", rxon->bssid_addr); 916 IWL_DEBUG_RADIO(priv, "u8[6] bssid_addr: %pM\n", rxon->bssid_addr);
942 IWL_DEBUG_RADIO(priv, "u16 assoc_id: 0x%x\n", le16_to_cpu(rxon->assoc_id)); 917 IWL_DEBUG_RADIO(priv, "u16 assoc_id: 0x%x\n", le16_to_cpu(rxon->assoc_id));
943} 918}
944EXPORT_SYMBOL(iwl_print_rx_config_cmd);
945#endif 919#endif
946/** 920/**
947 * iwl_irq_handle_error - called for HW or SW error interrupt from card 921 * iwl_irq_handle_error - called for HW or SW error interrupt from card
@@ -1021,7 +995,6 @@ void iwl_irq_handle_error(struct iwl_priv *priv)
1021 queue_work(priv->workqueue, &priv->restart); 995 queue_work(priv->workqueue, &priv->restart);
1022 } 996 }
1023} 997}
1024EXPORT_SYMBOL(iwl_irq_handle_error);
1025 998
1026static int iwl_apm_stop_master(struct iwl_priv *priv) 999static int iwl_apm_stop_master(struct iwl_priv *priv)
1027{ 1000{
@@ -1058,7 +1031,6 @@ void iwl_apm_stop(struct iwl_priv *priv)
1058 */ 1031 */
1059 iwl_clear_bit(priv, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_INIT_DONE); 1032 iwl_clear_bit(priv, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
1060} 1033}
1061EXPORT_SYMBOL(iwl_apm_stop);
1062 1034
1063 1035
1064/* 1036/*
@@ -1173,7 +1145,6 @@ int iwl_apm_init(struct iwl_priv *priv)
1173out: 1145out:
1174 return ret; 1146 return ret;
1175} 1147}
1176EXPORT_SYMBOL(iwl_apm_init);
1177 1148
1178 1149
1179int iwl_set_tx_power(struct iwl_priv *priv, s8 tx_power, bool force) 1150int iwl_set_tx_power(struct iwl_priv *priv, s8 tx_power, bool force)
@@ -1233,7 +1204,6 @@ int iwl_set_tx_power(struct iwl_priv *priv, s8 tx_power, bool force)
1233 } 1204 }
1234 return ret; 1205 return ret;
1235} 1206}
1236EXPORT_SYMBOL(iwl_set_tx_power);
1237 1207
1238void iwl_send_bt_config(struct iwl_priv *priv) 1208void iwl_send_bt_config(struct iwl_priv *priv)
1239{ 1209{
@@ -1257,7 +1227,6 @@ void iwl_send_bt_config(struct iwl_priv *priv)
1257 sizeof(struct iwl_bt_cmd), &bt_cmd)) 1227 sizeof(struct iwl_bt_cmd), &bt_cmd))
1258 IWL_ERR(priv, "failed to send BT Coex Config\n"); 1228 IWL_ERR(priv, "failed to send BT Coex Config\n");
1259} 1229}
1260EXPORT_SYMBOL(iwl_send_bt_config);
1261 1230
1262int iwl_send_statistics_request(struct iwl_priv *priv, u8 flags, bool clear) 1231int iwl_send_statistics_request(struct iwl_priv *priv, u8 flags, bool clear)
1263{ 1232{
@@ -1275,7 +1244,6 @@ int iwl_send_statistics_request(struct iwl_priv *priv, u8 flags, bool clear)
1275 sizeof(struct iwl_statistics_cmd), 1244 sizeof(struct iwl_statistics_cmd),
1276 &statistics_cmd); 1245 &statistics_cmd);
1277} 1246}
1278EXPORT_SYMBOL(iwl_send_statistics_request);
1279 1247
1280void iwl_rx_pm_sleep_notif(struct iwl_priv *priv, 1248void iwl_rx_pm_sleep_notif(struct iwl_priv *priv,
1281 struct iwl_rx_mem_buffer *rxb) 1249 struct iwl_rx_mem_buffer *rxb)
@@ -1287,7 +1255,6 @@ void iwl_rx_pm_sleep_notif(struct iwl_priv *priv,
1287 sleep->pm_sleep_mode, sleep->pm_wakeup_src); 1255 sleep->pm_sleep_mode, sleep->pm_wakeup_src);
1288#endif 1256#endif
1289} 1257}
1290EXPORT_SYMBOL(iwl_rx_pm_sleep_notif);
1291 1258
1292void iwl_rx_pm_debug_statistics_notif(struct iwl_priv *priv, 1259void iwl_rx_pm_debug_statistics_notif(struct iwl_priv *priv,
1293 struct iwl_rx_mem_buffer *rxb) 1260 struct iwl_rx_mem_buffer *rxb)
@@ -1299,7 +1266,6 @@ void iwl_rx_pm_debug_statistics_notif(struct iwl_priv *priv,
1299 get_cmd_string(pkt->hdr.cmd)); 1266 get_cmd_string(pkt->hdr.cmd));
1300 iwl_print_hex_dump(priv, IWL_DL_RADIO, pkt->u.raw, len); 1267 iwl_print_hex_dump(priv, IWL_DL_RADIO, pkt->u.raw, len);
1301} 1268}
1302EXPORT_SYMBOL(iwl_rx_pm_debug_statistics_notif);
1303 1269
1304void iwl_rx_reply_error(struct iwl_priv *priv, 1270void iwl_rx_reply_error(struct iwl_priv *priv,
1305 struct iwl_rx_mem_buffer *rxb) 1271 struct iwl_rx_mem_buffer *rxb)
@@ -1314,7 +1280,6 @@ void iwl_rx_reply_error(struct iwl_priv *priv,
1314 le16_to_cpu(pkt->u.err_resp.bad_cmd_seq_num), 1280 le16_to_cpu(pkt->u.err_resp.bad_cmd_seq_num),
1315 le32_to_cpu(pkt->u.err_resp.error_info)); 1281 le32_to_cpu(pkt->u.err_resp.error_info));
1316} 1282}
1317EXPORT_SYMBOL(iwl_rx_reply_error);
1318 1283
1319void iwl_clear_isr_stats(struct iwl_priv *priv) 1284void iwl_clear_isr_stats(struct iwl_priv *priv)
1320{ 1285{
@@ -1366,7 +1331,6 @@ int iwl_mac_conf_tx(struct ieee80211_hw *hw, u16 queue,
1366 IWL_DEBUG_MAC80211(priv, "leave\n"); 1331 IWL_DEBUG_MAC80211(priv, "leave\n");
1367 return 0; 1332 return 0;
1368} 1333}
1369EXPORT_SYMBOL(iwl_mac_conf_tx);
1370 1334
1371int iwl_mac_tx_last_beacon(struct ieee80211_hw *hw) 1335int iwl_mac_tx_last_beacon(struct ieee80211_hw *hw)
1372{ 1336{
@@ -1374,7 +1338,6 @@ int iwl_mac_tx_last_beacon(struct ieee80211_hw *hw)
1374 1338
1375 return priv->ibss_manager == IWL_IBSS_MANAGER; 1339 return priv->ibss_manager == IWL_IBSS_MANAGER;
1376} 1340}
1377EXPORT_SYMBOL_GPL(iwl_mac_tx_last_beacon);
1378 1341
1379static int iwl_set_mode(struct iwl_priv *priv, struct iwl_rxon_context *ctx) 1342static int iwl_set_mode(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
1380{ 1343{
@@ -1484,7 +1447,6 @@ int iwl_mac_add_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
1484 IWL_DEBUG_MAC80211(priv, "leave\n"); 1447 IWL_DEBUG_MAC80211(priv, "leave\n");
1485 return err; 1448 return err;
1486} 1449}
1487EXPORT_SYMBOL(iwl_mac_add_interface);
1488 1450
1489static void iwl_teardown_interface(struct iwl_priv *priv, 1451static void iwl_teardown_interface(struct iwl_priv *priv,
1490 struct ieee80211_vif *vif, 1452 struct ieee80211_vif *vif,
@@ -1537,7 +1499,6 @@ void iwl_mac_remove_interface(struct ieee80211_hw *hw,
1537 IWL_DEBUG_MAC80211(priv, "leave\n"); 1499 IWL_DEBUG_MAC80211(priv, "leave\n");
1538 1500
1539} 1501}
1540EXPORT_SYMBOL(iwl_mac_remove_interface);
1541 1502
1542int iwl_alloc_txq_mem(struct iwl_priv *priv) 1503int iwl_alloc_txq_mem(struct iwl_priv *priv)
1543{ 1504{
@@ -1552,14 +1513,12 @@ int iwl_alloc_txq_mem(struct iwl_priv *priv)
1552 } 1513 }
1553 return 0; 1514 return 0;
1554} 1515}
1555EXPORT_SYMBOL(iwl_alloc_txq_mem);
1556 1516
1557void iwl_free_txq_mem(struct iwl_priv *priv) 1517void iwl_free_txq_mem(struct iwl_priv *priv)
1558{ 1518{
1559 kfree(priv->txq); 1519 kfree(priv->txq);
1560 priv->txq = NULL; 1520 priv->txq = NULL;
1561} 1521}
1562EXPORT_SYMBOL(iwl_free_txq_mem);
1563 1522
1564#ifdef CONFIG_IWLWIFI_DEBUGFS 1523#ifdef CONFIG_IWLWIFI_DEBUGFS
1565 1524
@@ -1598,7 +1557,6 @@ int iwl_alloc_traffic_mem(struct iwl_priv *priv)
1598 iwl_reset_traffic_log(priv); 1557 iwl_reset_traffic_log(priv);
1599 return 0; 1558 return 0;
1600} 1559}
1601EXPORT_SYMBOL(iwl_alloc_traffic_mem);
1602 1560
1603void iwl_free_traffic_mem(struct iwl_priv *priv) 1561void iwl_free_traffic_mem(struct iwl_priv *priv)
1604{ 1562{
@@ -1608,7 +1566,6 @@ void iwl_free_traffic_mem(struct iwl_priv *priv)
1608 kfree(priv->rx_traffic); 1566 kfree(priv->rx_traffic);
1609 priv->rx_traffic = NULL; 1567 priv->rx_traffic = NULL;
1610} 1568}
1611EXPORT_SYMBOL(iwl_free_traffic_mem);
1612 1569
1613void iwl_dbg_log_tx_data_frame(struct iwl_priv *priv, 1570void iwl_dbg_log_tx_data_frame(struct iwl_priv *priv,
1614 u16 length, struct ieee80211_hdr *header) 1571 u16 length, struct ieee80211_hdr *header)
@@ -1633,7 +1590,6 @@ void iwl_dbg_log_tx_data_frame(struct iwl_priv *priv,
1633 (priv->tx_traffic_idx + 1) % IWL_TRAFFIC_ENTRIES; 1590 (priv->tx_traffic_idx + 1) % IWL_TRAFFIC_ENTRIES;
1634 } 1591 }
1635} 1592}
1636EXPORT_SYMBOL(iwl_dbg_log_tx_data_frame);
1637 1593
1638void iwl_dbg_log_rx_data_frame(struct iwl_priv *priv, 1594void iwl_dbg_log_rx_data_frame(struct iwl_priv *priv,
1639 u16 length, struct ieee80211_hdr *header) 1595 u16 length, struct ieee80211_hdr *header)
@@ -1658,7 +1614,6 @@ void iwl_dbg_log_rx_data_frame(struct iwl_priv *priv,
1658 (priv->rx_traffic_idx + 1) % IWL_TRAFFIC_ENTRIES; 1614 (priv->rx_traffic_idx + 1) % IWL_TRAFFIC_ENTRIES;
1659 } 1615 }
1660} 1616}
1661EXPORT_SYMBOL(iwl_dbg_log_rx_data_frame);
1662 1617
1663const char *get_mgmt_string(int cmd) 1618const char *get_mgmt_string(int cmd)
1664{ 1619{
@@ -1795,7 +1750,6 @@ void iwl_update_stats(struct iwl_priv *priv, bool is_tx, __le16 fc, u16 len)
1795 stats->data_bytes += len; 1750 stats->data_bytes += len;
1796 } 1751 }
1797} 1752}
1798EXPORT_SYMBOL(iwl_update_stats);
1799#endif 1753#endif
1800 1754
1801static void iwl_force_rf_reset(struct iwl_priv *priv) 1755static void iwl_force_rf_reset(struct iwl_priv *priv)
@@ -1934,7 +1888,6 @@ int iwl_mac_change_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
1934 mutex_unlock(&priv->mutex); 1888 mutex_unlock(&priv->mutex);
1935 return err; 1889 return err;
1936} 1890}
1937EXPORT_SYMBOL(iwl_mac_change_interface);
1938 1891
1939/* 1892/*
1940 * On every watchdog tick we check (latest) time stamp. If it does not 1893 * On every watchdog tick we check (latest) time stamp. If it does not
@@ -2006,7 +1959,6 @@ void iwl_bg_watchdog(unsigned long data)
2006 mod_timer(&priv->watchdog, jiffies + 1959 mod_timer(&priv->watchdog, jiffies +
2007 msecs_to_jiffies(IWL_WD_TICK(timeout))); 1960 msecs_to_jiffies(IWL_WD_TICK(timeout)));
2008} 1961}
2009EXPORT_SYMBOL(iwl_bg_watchdog);
2010 1962
2011void iwl_setup_watchdog(struct iwl_priv *priv) 1963void iwl_setup_watchdog(struct iwl_priv *priv)
2012{ 1964{
@@ -2018,7 +1970,6 @@ void iwl_setup_watchdog(struct iwl_priv *priv)
2018 else 1970 else
2019 del_timer(&priv->watchdog); 1971 del_timer(&priv->watchdog);
2020} 1972}
2021EXPORT_SYMBOL(iwl_setup_watchdog);
2022 1973
2023/* 1974/*
2024 * extended beacon time format 1975 * extended beacon time format
@@ -2044,7 +1995,6 @@ u32 iwl_usecs_to_beacons(struct iwl_priv *priv, u32 usec, u32 beacon_interval)
2044 1995
2045 return (quot << priv->hw_params.beacon_time_tsf_bits) + rem; 1996 return (quot << priv->hw_params.beacon_time_tsf_bits) + rem;
2046} 1997}
2047EXPORT_SYMBOL(iwl_usecs_to_beacons);
2048 1998
2049/* base is usually what we get from ucode with each received frame, 1999/* base is usually what we get from ucode with each received frame,
2050 * the same as HW timer counter counting down 2000 * the same as HW timer counter counting down
@@ -2072,7 +2022,6 @@ __le32 iwl_add_beacon_time(struct iwl_priv *priv, u32 base,
2072 2022
2073 return cpu_to_le32(res); 2023 return cpu_to_le32(res);
2074} 2024}
2075EXPORT_SYMBOL(iwl_add_beacon_time);
2076 2025
2077#ifdef CONFIG_PM 2026#ifdef CONFIG_PM
2078 2027
@@ -2092,7 +2041,6 @@ int iwl_pci_suspend(struct device *device)
2092 2041
2093 return 0; 2042 return 0;
2094} 2043}
2095EXPORT_SYMBOL(iwl_pci_suspend);
2096 2044
2097int iwl_pci_resume(struct device *device) 2045int iwl_pci_resume(struct device *device)
2098{ 2046{
@@ -2121,7 +2069,6 @@ int iwl_pci_resume(struct device *device)
2121 2069
2122 return 0; 2070 return 0;
2123} 2071}
2124EXPORT_SYMBOL(iwl_pci_resume);
2125 2072
2126const struct dev_pm_ops iwl_pm_ops = { 2073const struct dev_pm_ops iwl_pm_ops = {
2127 .suspend = iwl_pci_suspend, 2074 .suspend = iwl_pci_suspend,
@@ -2131,6 +2078,5 @@ const struct dev_pm_ops iwl_pm_ops = {
2131 .poweroff = iwl_pci_suspend, 2078 .poweroff = iwl_pci_suspend,
2132 .restore = iwl_pci_resume, 2079 .restore = iwl_pci_resume,
2133}; 2080};
2134EXPORT_SYMBOL(iwl_pm_ops);
2135 2081
2136#endif /* CONFIG_PM */ 2082#endif /* CONFIG_PM */
diff --git a/drivers/net/wireless/iwlwifi/iwl-debugfs.c b/drivers/net/wireless/iwlwifi/iwl-debugfs.c
index bc7a965c18f9..8842411f1cf3 100644
--- a/drivers/net/wireless/iwlwifi/iwl-debugfs.c
+++ b/drivers/net/wireless/iwlwifi/iwl-debugfs.c
@@ -1788,7 +1788,6 @@ err:
1788 iwl_dbgfs_unregister(priv); 1788 iwl_dbgfs_unregister(priv);
1789 return -ENOMEM; 1789 return -ENOMEM;
1790} 1790}
1791EXPORT_SYMBOL(iwl_dbgfs_register);
1792 1791
1793/** 1792/**
1794 * Remove the debugfs files and directories 1793 * Remove the debugfs files and directories
@@ -1802,7 +1801,6 @@ void iwl_dbgfs_unregister(struct iwl_priv *priv)
1802 debugfs_remove_recursive(priv->debugfs_dir); 1801 debugfs_remove_recursive(priv->debugfs_dir);
1803 priv->debugfs_dir = NULL; 1802 priv->debugfs_dir = NULL;
1804} 1803}
1805EXPORT_SYMBOL(iwl_dbgfs_unregister);
1806 1804
1807 1805
1808 1806
diff --git a/drivers/net/wireless/iwlwifi/iwl-dev.h b/drivers/net/wireless/iwlwifi/iwl-dev.h
index 065615ee040a..58165c769cf1 100644
--- a/drivers/net/wireless/iwlwifi/iwl-dev.h
+++ b/drivers/net/wireless/iwlwifi/iwl-dev.h
@@ -43,14 +43,14 @@
43#include "iwl-prph.h" 43#include "iwl-prph.h"
44#include "iwl-fh.h" 44#include "iwl-fh.h"
45#include "iwl-debug.h" 45#include "iwl-debug.h"
46#include "iwl-4965-hw.h"
47#include "iwl-3945-hw.h"
48#include "iwl-agn-hw.h" 46#include "iwl-agn-hw.h"
49#include "iwl-led.h" 47#include "iwl-led.h"
50#include "iwl-power.h" 48#include "iwl-power.h"
51#include "iwl-agn-rs.h" 49#include "iwl-agn-rs.h"
52#include "iwl-agn-tt.h" 50#include "iwl-agn-tt.h"
53 51
52#define U32_PAD(n) ((4-(n))&0x3)
53
54struct iwl_tx_queue; 54struct iwl_tx_queue;
55 55
56/* CT-KILL constants */ 56/* CT-KILL constants */
diff --git a/drivers/net/wireless/iwlwifi/iwl-eeprom.c b/drivers/net/wireless/iwlwifi/iwl-eeprom.c
index 358cfd7e5af1..833194a2c639 100644
--- a/drivers/net/wireless/iwlwifi/iwl-eeprom.c
+++ b/drivers/net/wireless/iwlwifi/iwl-eeprom.c
@@ -222,7 +222,6 @@ const u8 *iwlcore_eeprom_query_addr(const struct iwl_priv *priv, size_t offset)
222 BUG_ON(offset >= priv->cfg->base_params->eeprom_size); 222 BUG_ON(offset >= priv->cfg->base_params->eeprom_size);
223 return &priv->eeprom[offset]; 223 return &priv->eeprom[offset];
224} 224}
225EXPORT_SYMBOL(iwlcore_eeprom_query_addr);
226 225
227static int iwl_init_otp_access(struct iwl_priv *priv) 226static int iwl_init_otp_access(struct iwl_priv *priv)
228{ 227{
@@ -382,7 +381,6 @@ const u8 *iwl_eeprom_query_addr(const struct iwl_priv *priv, size_t offset)
382{ 381{
383 return priv->cfg->ops->lib->eeprom_ops.query_addr(priv, offset); 382 return priv->cfg->ops->lib->eeprom_ops.query_addr(priv, offset);
384} 383}
385EXPORT_SYMBOL(iwl_eeprom_query_addr);
386 384
387u16 iwl_eeprom_query16(const struct iwl_priv *priv, size_t offset) 385u16 iwl_eeprom_query16(const struct iwl_priv *priv, size_t offset)
388{ 386{
@@ -390,7 +388,6 @@ u16 iwl_eeprom_query16(const struct iwl_priv *priv, size_t offset)
390 return 0; 388 return 0;
391 return (u16)priv->eeprom[offset] | ((u16)priv->eeprom[offset + 1] << 8); 389 return (u16)priv->eeprom[offset] | ((u16)priv->eeprom[offset + 1] << 8);
392} 390}
393EXPORT_SYMBOL(iwl_eeprom_query16);
394 391
395/** 392/**
396 * iwl_eeprom_init - read EEPROM contents 393 * iwl_eeprom_init - read EEPROM contents
@@ -509,14 +506,12 @@ err:
509alloc_err: 506alloc_err:
510 return ret; 507 return ret;
511} 508}
512EXPORT_SYMBOL(iwl_eeprom_init);
513 509
514void iwl_eeprom_free(struct iwl_priv *priv) 510void iwl_eeprom_free(struct iwl_priv *priv)
515{ 511{
516 kfree(priv->eeprom); 512 kfree(priv->eeprom);
517 priv->eeprom = NULL; 513 priv->eeprom = NULL;
518} 514}
519EXPORT_SYMBOL(iwl_eeprom_free);
520 515
521static void iwl_init_band_reference(const struct iwl_priv *priv, 516static void iwl_init_band_reference(const struct iwl_priv *priv,
522 int eep_band, int *eeprom_ch_count, 517 int eep_band, int *eeprom_ch_count,
@@ -779,7 +774,6 @@ int iwl_init_channel_map(struct iwl_priv *priv)
779 774
780 return 0; 775 return 0;
781} 776}
782EXPORT_SYMBOL(iwl_init_channel_map);
783 777
784/* 778/*
785 * iwl_free_channel_map - undo allocations in iwl_init_channel_map 779 * iwl_free_channel_map - undo allocations in iwl_init_channel_map
@@ -789,7 +783,6 @@ void iwl_free_channel_map(struct iwl_priv *priv)
789 kfree(priv->channel_info); 783 kfree(priv->channel_info);
790 priv->channel_count = 0; 784 priv->channel_count = 0;
791} 785}
792EXPORT_SYMBOL(iwl_free_channel_map);
793 786
794/** 787/**
795 * iwl_get_channel_info - Find driver's private channel info 788 * iwl_get_channel_info - Find driver's private channel info
@@ -818,4 +811,3 @@ const struct iwl_channel_info *iwl_get_channel_info(const struct iwl_priv *priv,
818 811
819 return NULL; 812 return NULL;
820} 813}
821EXPORT_SYMBOL(iwl_get_channel_info);
diff --git a/drivers/net/wireless/iwlwifi/iwl-hcmd.c b/drivers/net/wireless/iwlwifi/iwl-hcmd.c
index e4b953d7b7bf..02499f684683 100644
--- a/drivers/net/wireless/iwlwifi/iwl-hcmd.c
+++ b/drivers/net/wireless/iwlwifi/iwl-hcmd.c
@@ -114,7 +114,6 @@ const char *get_cmd_string(u8 cmd)
114 114
115 } 115 }
116} 116}
117EXPORT_SYMBOL(get_cmd_string);
118 117
119#define HOST_COMPLETE_TIMEOUT (HZ / 2) 118#define HOST_COMPLETE_TIMEOUT (HZ / 2)
120 119
@@ -253,7 +252,6 @@ out:
253 mutex_unlock(&priv->sync_cmd_mutex); 252 mutex_unlock(&priv->sync_cmd_mutex);
254 return ret; 253 return ret;
255} 254}
256EXPORT_SYMBOL(iwl_send_cmd_sync);
257 255
258int iwl_send_cmd(struct iwl_priv *priv, struct iwl_host_cmd *cmd) 256int iwl_send_cmd(struct iwl_priv *priv, struct iwl_host_cmd *cmd)
259{ 257{
@@ -262,7 +260,6 @@ int iwl_send_cmd(struct iwl_priv *priv, struct iwl_host_cmd *cmd)
262 260
263 return iwl_send_cmd_sync(priv, cmd); 261 return iwl_send_cmd_sync(priv, cmd);
264} 262}
265EXPORT_SYMBOL(iwl_send_cmd);
266 263
267int iwl_send_cmd_pdu(struct iwl_priv *priv, u8 id, u16 len, const void *data) 264int iwl_send_cmd_pdu(struct iwl_priv *priv, u8 id, u16 len, const void *data)
268{ 265{
@@ -274,7 +271,6 @@ int iwl_send_cmd_pdu(struct iwl_priv *priv, u8 id, u16 len, const void *data)
274 271
275 return iwl_send_cmd_sync(priv, &cmd); 272 return iwl_send_cmd_sync(priv, &cmd);
276} 273}
277EXPORT_SYMBOL(iwl_send_cmd_pdu);
278 274
279int iwl_send_cmd_pdu_async(struct iwl_priv *priv, 275int iwl_send_cmd_pdu_async(struct iwl_priv *priv,
280 u8 id, u16 len, const void *data, 276 u8 id, u16 len, const void *data,
@@ -293,4 +289,3 @@ int iwl_send_cmd_pdu_async(struct iwl_priv *priv,
293 289
294 return iwl_send_cmd_async(priv, &cmd); 290 return iwl_send_cmd_async(priv, &cmd);
295} 291}
296EXPORT_SYMBOL(iwl_send_cmd_pdu_async);
diff --git a/drivers/net/wireless/iwlwifi/iwl-led.c b/drivers/net/wireless/iwlwifi/iwl-led.c
index 074ad2275228..d7f2a0bb32c9 100644
--- a/drivers/net/wireless/iwlwifi/iwl-led.c
+++ b/drivers/net/wireless/iwlwifi/iwl-led.c
@@ -175,7 +175,6 @@ void iwl_leds_init(struct iwl_priv *priv)
175 175
176 priv->led_registered = true; 176 priv->led_registered = true;
177} 177}
178EXPORT_SYMBOL(iwl_leds_init);
179 178
180void iwl_leds_exit(struct iwl_priv *priv) 179void iwl_leds_exit(struct iwl_priv *priv)
181{ 180{
@@ -185,4 +184,3 @@ void iwl_leds_exit(struct iwl_priv *priv)
185 led_classdev_unregister(&priv->led); 184 led_classdev_unregister(&priv->led);
186 kfree(priv->led.name); 185 kfree(priv->led.name);
187} 186}
188EXPORT_SYMBOL(iwl_leds_exit);
diff --git a/drivers/net/wireless/iwlwifi/iwl-legacy.c b/drivers/net/wireless/iwlwifi/iwl-legacy.c
deleted file mode 100644
index e1ace3ce30b3..000000000000
--- a/drivers/net/wireless/iwlwifi/iwl-legacy.c
+++ /dev/null
@@ -1,657 +0,0 @@
1/******************************************************************************
2 *
3 * GPL LICENSE SUMMARY
4 *
5 * Copyright(c) 2008 - 2010 Intel Corporation. All rights reserved.
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of version 2 of the GNU General Public License as
9 * published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful, but
12 * WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
19 * USA
20 *
21 * The full GNU General Public License is included in this distribution
22 * in the file called LICENSE.GPL.
23 *
24 * Contact Information:
25 * Intel Linux Wireless <ilw@linux.intel.com>
26 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
27 *****************************************************************************/
28
29#include <linux/kernel.h>
30#include <net/mac80211.h>
31
32#include "iwl-dev.h"
33#include "iwl-core.h"
34#include "iwl-helpers.h"
35#include "iwl-legacy.h"
36
37static void iwl_update_qos(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
38{
39 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
40 return;
41
42 if (!ctx->is_active)
43 return;
44
45 ctx->qos_data.def_qos_parm.qos_flags = 0;
46
47 if (ctx->qos_data.qos_active)
48 ctx->qos_data.def_qos_parm.qos_flags |=
49 QOS_PARAM_FLG_UPDATE_EDCA_MSK;
50
51 if (ctx->ht.enabled)
52 ctx->qos_data.def_qos_parm.qos_flags |= QOS_PARAM_FLG_TGN_MSK;
53
54 IWL_DEBUG_QOS(priv, "send QoS cmd with Qos active=%d FLAGS=0x%X\n",
55 ctx->qos_data.qos_active,
56 ctx->qos_data.def_qos_parm.qos_flags);
57
58 iwl_send_cmd_pdu_async(priv, ctx->qos_cmd,
59 sizeof(struct iwl_qosparam_cmd),
60 &ctx->qos_data.def_qos_parm, NULL);
61}
62
63/**
64 * iwl_legacy_mac_config - mac80211 config callback
65 */
66int iwl_legacy_mac_config(struct ieee80211_hw *hw, u32 changed)
67{
68 struct iwl_priv *priv = hw->priv;
69 const struct iwl_channel_info *ch_info;
70 struct ieee80211_conf *conf = &hw->conf;
71 struct ieee80211_channel *channel = conf->channel;
72 struct iwl_ht_config *ht_conf = &priv->current_ht_config;
73 struct iwl_rxon_context *ctx;
74 unsigned long flags = 0;
75 int ret = 0;
76 u16 ch;
77 int scan_active = 0;
78 bool ht_changed[NUM_IWL_RXON_CTX] = {};
79
80 if (WARN_ON(!priv->cfg->ops->legacy))
81 return -EOPNOTSUPP;
82
83 mutex_lock(&priv->mutex);
84
85 IWL_DEBUG_MAC80211(priv, "enter to channel %d changed 0x%X\n",
86 channel->hw_value, changed);
87
88 if (unlikely(test_bit(STATUS_SCANNING, &priv->status))) {
89 scan_active = 1;
90 IWL_DEBUG_MAC80211(priv, "scan active\n");
91 }
92
93 if (changed & (IEEE80211_CONF_CHANGE_SMPS |
94 IEEE80211_CONF_CHANGE_CHANNEL)) {
95 /* mac80211 uses static for non-HT which is what we want */
96 priv->current_ht_config.smps = conf->smps_mode;
97
98 /*
99 * Recalculate chain counts.
100 *
101 * If monitor mode is enabled then mac80211 will
102 * set up the SM PS mode to OFF if an HT channel is
103 * configured.
104 */
105 if (priv->cfg->ops->hcmd->set_rxon_chain)
106 for_each_context(priv, ctx)
107 priv->cfg->ops->hcmd->set_rxon_chain(priv, ctx);
108 }
109
110 /* during scanning mac80211 will delay channel setting until
111 * scan finish with changed = 0
112 */
113 if (!changed || (changed & IEEE80211_CONF_CHANGE_CHANNEL)) {
114 if (scan_active)
115 goto set_ch_out;
116
117 ch = channel->hw_value;
118 ch_info = iwl_get_channel_info(priv, channel->band, ch);
119 if (!is_channel_valid(ch_info)) {
120 IWL_DEBUG_MAC80211(priv, "leave - invalid channel\n");
121 ret = -EINVAL;
122 goto set_ch_out;
123 }
124
125 spin_lock_irqsave(&priv->lock, flags);
126
127 for_each_context(priv, ctx) {
128 /* Configure HT40 channels */
129 if (ctx->ht.enabled != conf_is_ht(conf)) {
130 ctx->ht.enabled = conf_is_ht(conf);
131 ht_changed[ctx->ctxid] = true;
132 }
133 if (ctx->ht.enabled) {
134 if (conf_is_ht40_minus(conf)) {
135 ctx->ht.extension_chan_offset =
136 IEEE80211_HT_PARAM_CHA_SEC_BELOW;
137 ctx->ht.is_40mhz = true;
138 } else if (conf_is_ht40_plus(conf)) {
139 ctx->ht.extension_chan_offset =
140 IEEE80211_HT_PARAM_CHA_SEC_ABOVE;
141 ctx->ht.is_40mhz = true;
142 } else {
143 ctx->ht.extension_chan_offset =
144 IEEE80211_HT_PARAM_CHA_SEC_NONE;
145 ctx->ht.is_40mhz = false;
146 }
147 } else
148 ctx->ht.is_40mhz = false;
149
150 /*
151 * Default to no protection. Protection mode will
152 * later be set from BSS config in iwl_ht_conf
153 */
154 ctx->ht.protection = IEEE80211_HT_OP_MODE_PROTECTION_NONE;
155
156 /* if we are switching from ht to 2.4 clear flags
157 * from any ht related info since 2.4 does not
158 * support ht */
159 if ((le16_to_cpu(ctx->staging.channel) != ch))
160 ctx->staging.flags = 0;
161
162 iwl_set_rxon_channel(priv, channel, ctx);
163 iwl_set_rxon_ht(priv, ht_conf);
164
165 iwl_set_flags_for_band(priv, ctx, channel->band,
166 ctx->vif);
167 }
168
169 spin_unlock_irqrestore(&priv->lock, flags);
170
171 if (priv->cfg->ops->legacy->update_bcast_stations)
172 ret = priv->cfg->ops->legacy->update_bcast_stations(priv);
173
174 set_ch_out:
175 /* The list of supported rates and rate mask can be different
176 * for each band; since the band may have changed, reset
177 * the rate mask to what mac80211 lists */
178 iwl_set_rate(priv);
179 }
180
181 if (changed & (IEEE80211_CONF_CHANGE_PS |
182 IEEE80211_CONF_CHANGE_IDLE)) {
183 ret = iwl_power_update_mode(priv, false);
184 if (ret)
185 IWL_DEBUG_MAC80211(priv, "Error setting sleep level\n");
186 }
187
188 if (changed & IEEE80211_CONF_CHANGE_POWER) {
189 IWL_DEBUG_MAC80211(priv, "TX Power old=%d new=%d\n",
190 priv->tx_power_user_lmt, conf->power_level);
191
192 iwl_set_tx_power(priv, conf->power_level, false);
193 }
194
195 if (!iwl_is_ready(priv)) {
196 IWL_DEBUG_MAC80211(priv, "leave - not ready\n");
197 goto out;
198 }
199
200 if (scan_active)
201 goto out;
202
203 for_each_context(priv, ctx) {
204 if (memcmp(&ctx->active, &ctx->staging, sizeof(ctx->staging)))
205 iwlcore_commit_rxon(priv, ctx);
206 else
207 IWL_DEBUG_INFO(priv,
208 "Not re-sending same RXON configuration.\n");
209 if (ht_changed[ctx->ctxid])
210 iwl_update_qos(priv, ctx);
211 }
212
213out:
214 IWL_DEBUG_MAC80211(priv, "leave\n");
215 mutex_unlock(&priv->mutex);
216 return ret;
217}
218EXPORT_SYMBOL(iwl_legacy_mac_config);
219
220void iwl_legacy_mac_reset_tsf(struct ieee80211_hw *hw)
221{
222 struct iwl_priv *priv = hw->priv;
223 unsigned long flags;
224 /* IBSS can only be the IWL_RXON_CTX_BSS context */
225 struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
226
227 if (WARN_ON(!priv->cfg->ops->legacy))
228 return;
229
230 mutex_lock(&priv->mutex);
231 IWL_DEBUG_MAC80211(priv, "enter\n");
232
233 spin_lock_irqsave(&priv->lock, flags);
234 memset(&priv->current_ht_config, 0, sizeof(struct iwl_ht_config));
235 spin_unlock_irqrestore(&priv->lock, flags);
236
237 spin_lock_irqsave(&priv->lock, flags);
238
239 /* new association get rid of ibss beacon skb */
240 if (priv->beacon_skb)
241 dev_kfree_skb(priv->beacon_skb);
242
243 priv->beacon_skb = NULL;
244
245 priv->timestamp = 0;
246
247 spin_unlock_irqrestore(&priv->lock, flags);
248
249 iwl_scan_cancel_timeout(priv, 100);
250 if (!iwl_is_ready_rf(priv)) {
251 IWL_DEBUG_MAC80211(priv, "leave - not ready\n");
252 mutex_unlock(&priv->mutex);
253 return;
254 }
255
256 /* we are restarting association process
257 * clear RXON_FILTER_ASSOC_MSK bit
258 */
259 ctx->staging.filter_flags &= ~RXON_FILTER_ASSOC_MSK;
260 iwlcore_commit_rxon(priv, ctx);
261
262 iwl_set_rate(priv);
263
264 mutex_unlock(&priv->mutex);
265
266 IWL_DEBUG_MAC80211(priv, "leave\n");
267}
268EXPORT_SYMBOL(iwl_legacy_mac_reset_tsf);
269
270static void iwl_ht_conf(struct iwl_priv *priv,
271 struct ieee80211_vif *vif)
272{
273 struct iwl_ht_config *ht_conf = &priv->current_ht_config;
274 struct ieee80211_sta *sta;
275 struct ieee80211_bss_conf *bss_conf = &vif->bss_conf;
276 struct iwl_rxon_context *ctx = iwl_rxon_ctx_from_vif(vif);
277
278 IWL_DEBUG_ASSOC(priv, "enter:\n");
279
280 if (!ctx->ht.enabled)
281 return;
282
283 ctx->ht.protection =
284 bss_conf->ht_operation_mode & IEEE80211_HT_OP_MODE_PROTECTION;
285 ctx->ht.non_gf_sta_present =
286 !!(bss_conf->ht_operation_mode & IEEE80211_HT_OP_MODE_NON_GF_STA_PRSNT);
287
288 ht_conf->single_chain_sufficient = false;
289
290 switch (vif->type) {
291 case NL80211_IFTYPE_STATION:
292 rcu_read_lock();
293 sta = ieee80211_find_sta(vif, bss_conf->bssid);
294 if (sta) {
295 struct ieee80211_sta_ht_cap *ht_cap = &sta->ht_cap;
296 int maxstreams;
297
298 maxstreams = (ht_cap->mcs.tx_params &
299 IEEE80211_HT_MCS_TX_MAX_STREAMS_MASK)
300 >> IEEE80211_HT_MCS_TX_MAX_STREAMS_SHIFT;
301 maxstreams += 1;
302
303 if ((ht_cap->mcs.rx_mask[1] == 0) &&
304 (ht_cap->mcs.rx_mask[2] == 0))
305 ht_conf->single_chain_sufficient = true;
306 if (maxstreams <= 1)
307 ht_conf->single_chain_sufficient = true;
308 } else {
309 /*
310 * If at all, this can only happen through a race
311 * when the AP disconnects us while we're still
312 * setting up the connection, in that case mac80211
313 * will soon tell us about that.
314 */
315 ht_conf->single_chain_sufficient = true;
316 }
317 rcu_read_unlock();
318 break;
319 case NL80211_IFTYPE_ADHOC:
320 ht_conf->single_chain_sufficient = true;
321 break;
322 default:
323 break;
324 }
325
326 IWL_DEBUG_ASSOC(priv, "leave\n");
327}
328
329static inline void iwl_set_no_assoc(struct iwl_priv *priv,
330 struct ieee80211_vif *vif)
331{
332 struct iwl_rxon_context *ctx = iwl_rxon_ctx_from_vif(vif);
333
334 /*
335 * inform the ucode that there is no longer an
336 * association and that no more packets should be
337 * sent
338 */
339 ctx->staging.filter_flags &= ~RXON_FILTER_ASSOC_MSK;
340 ctx->staging.assoc_id = 0;
341 iwlcore_commit_rxon(priv, ctx);
342}
343
344static void iwlcore_beacon_update(struct ieee80211_hw *hw,
345 struct ieee80211_vif *vif)
346{
347 struct iwl_priv *priv = hw->priv;
348 unsigned long flags;
349 __le64 timestamp;
350 struct sk_buff *skb = ieee80211_beacon_get(hw, vif);
351
352 if (!skb)
353 return;
354
355 IWL_DEBUG_MAC80211(priv, "enter\n");
356
357 lockdep_assert_held(&priv->mutex);
358
359 if (!priv->beacon_ctx) {
360 IWL_ERR(priv, "update beacon but no beacon context!\n");
361 dev_kfree_skb(skb);
362 return;
363 }
364
365 spin_lock_irqsave(&priv->lock, flags);
366
367 if (priv->beacon_skb)
368 dev_kfree_skb(priv->beacon_skb);
369
370 priv->beacon_skb = skb;
371
372 timestamp = ((struct ieee80211_mgmt *)skb->data)->u.beacon.timestamp;
373 priv->timestamp = le64_to_cpu(timestamp);
374
375 IWL_DEBUG_MAC80211(priv, "leave\n");
376 spin_unlock_irqrestore(&priv->lock, flags);
377
378 if (!iwl_is_ready_rf(priv)) {
379 IWL_DEBUG_MAC80211(priv, "leave - RF not ready\n");
380 return;
381 }
382
383 priv->cfg->ops->legacy->post_associate(priv);
384}
385
386void iwl_legacy_mac_bss_info_changed(struct ieee80211_hw *hw,
387 struct ieee80211_vif *vif,
388 struct ieee80211_bss_conf *bss_conf,
389 u32 changes)
390{
391 struct iwl_priv *priv = hw->priv;
392 struct iwl_rxon_context *ctx = iwl_rxon_ctx_from_vif(vif);
393 int ret;
394
395 if (WARN_ON(!priv->cfg->ops->legacy))
396 return;
397
398 IWL_DEBUG_MAC80211(priv, "changes = 0x%X\n", changes);
399
400 if (!iwl_is_alive(priv))
401 return;
402
403 mutex_lock(&priv->mutex);
404
405 if (changes & BSS_CHANGED_QOS) {
406 unsigned long flags;
407
408 spin_lock_irqsave(&priv->lock, flags);
409 ctx->qos_data.qos_active = bss_conf->qos;
410 iwl_update_qos(priv, ctx);
411 spin_unlock_irqrestore(&priv->lock, flags);
412 }
413
414 if (changes & BSS_CHANGED_BEACON_ENABLED) {
415 /*
416 * the add_interface code must make sure we only ever
417 * have a single interface that could be beaconing at
418 * any time.
419 */
420 if (vif->bss_conf.enable_beacon)
421 priv->beacon_ctx = ctx;
422 else
423 priv->beacon_ctx = NULL;
424 }
425
426 if (changes & BSS_CHANGED_BEACON && vif->type == NL80211_IFTYPE_AP) {
427 dev_kfree_skb(priv->beacon_skb);
428 priv->beacon_skb = ieee80211_beacon_get(hw, vif);
429 }
430
431 if (changes & BSS_CHANGED_BEACON_INT && vif->type == NL80211_IFTYPE_AP)
432 iwl_send_rxon_timing(priv, ctx);
433
434 if (changes & BSS_CHANGED_BSSID) {
435 IWL_DEBUG_MAC80211(priv, "BSSID %pM\n", bss_conf->bssid);
436
437 /*
438 * If there is currently a HW scan going on in the
439 * background then we need to cancel it else the RXON
440 * below/in post_associate will fail.
441 */
442 if (iwl_scan_cancel_timeout(priv, 100)) {
443 IWL_WARN(priv, "Aborted scan still in progress after 100ms\n");
444 IWL_DEBUG_MAC80211(priv, "leaving - scan abort failed.\n");
445 mutex_unlock(&priv->mutex);
446 return;
447 }
448
449 /* mac80211 only sets assoc when in STATION mode */
450 if (vif->type == NL80211_IFTYPE_ADHOC || bss_conf->assoc) {
451 memcpy(ctx->staging.bssid_addr,
452 bss_conf->bssid, ETH_ALEN);
453
454 /* currently needed in a few places */
455 memcpy(priv->bssid, bss_conf->bssid, ETH_ALEN);
456 } else {
457 ctx->staging.filter_flags &=
458 ~RXON_FILTER_ASSOC_MSK;
459 }
460
461 }
462
463 /*
464 * This needs to be after setting the BSSID in case
465 * mac80211 decides to do both changes at once because
466 * it will invoke post_associate.
467 */
468 if (vif->type == NL80211_IFTYPE_ADHOC && changes & BSS_CHANGED_BEACON)
469 iwlcore_beacon_update(hw, vif);
470
471 if (changes & BSS_CHANGED_ERP_PREAMBLE) {
472 IWL_DEBUG_MAC80211(priv, "ERP_PREAMBLE %d\n",
473 bss_conf->use_short_preamble);
474 if (bss_conf->use_short_preamble)
475 ctx->staging.flags |= RXON_FLG_SHORT_PREAMBLE_MSK;
476 else
477 ctx->staging.flags &= ~RXON_FLG_SHORT_PREAMBLE_MSK;
478 }
479
480 if (changes & BSS_CHANGED_ERP_CTS_PROT) {
481 IWL_DEBUG_MAC80211(priv, "ERP_CTS %d\n", bss_conf->use_cts_prot);
482 if (bss_conf->use_cts_prot && (priv->band != IEEE80211_BAND_5GHZ))
483 ctx->staging.flags |= RXON_FLG_TGG_PROTECT_MSK;
484 else
485 ctx->staging.flags &= ~RXON_FLG_TGG_PROTECT_MSK;
486 if (bss_conf->use_cts_prot)
487 ctx->staging.flags |= RXON_FLG_SELF_CTS_EN;
488 else
489 ctx->staging.flags &= ~RXON_FLG_SELF_CTS_EN;
490 }
491
492 if (changes & BSS_CHANGED_BASIC_RATES) {
493 /* XXX use this information
494 *
495 * To do that, remove code from iwl_set_rate() and put something
496 * like this here:
497 *
498 if (A-band)
499 ctx->staging.ofdm_basic_rates =
500 bss_conf->basic_rates;
501 else
502 ctx->staging.ofdm_basic_rates =
503 bss_conf->basic_rates >> 4;
504 ctx->staging.cck_basic_rates =
505 bss_conf->basic_rates & 0xF;
506 */
507 }
508
509 if (changes & BSS_CHANGED_HT) {
510 iwl_ht_conf(priv, vif);
511
512 if (priv->cfg->ops->hcmd->set_rxon_chain)
513 priv->cfg->ops->hcmd->set_rxon_chain(priv, ctx);
514 }
515
516 if (changes & BSS_CHANGED_ASSOC) {
517 IWL_DEBUG_MAC80211(priv, "ASSOC %d\n", bss_conf->assoc);
518 if (bss_conf->assoc) {
519 priv->timestamp = bss_conf->timestamp;
520
521 if (!iwl_is_rfkill(priv))
522 priv->cfg->ops->legacy->post_associate(priv);
523 } else
524 iwl_set_no_assoc(priv, vif);
525 }
526
527 if (changes && iwl_is_associated_ctx(ctx) && bss_conf->aid) {
528 IWL_DEBUG_MAC80211(priv, "Changes (%#x) while associated\n",
529 changes);
530 ret = iwl_send_rxon_assoc(priv, ctx);
531 if (!ret) {
532 /* Sync active_rxon with latest change. */
533 memcpy((void *)&ctx->active,
534 &ctx->staging,
535 sizeof(struct iwl_rxon_cmd));
536 }
537 }
538
539 if (changes & BSS_CHANGED_BEACON_ENABLED) {
540 if (vif->bss_conf.enable_beacon) {
541 memcpy(ctx->staging.bssid_addr,
542 bss_conf->bssid, ETH_ALEN);
543 memcpy(priv->bssid, bss_conf->bssid, ETH_ALEN);
544 priv->cfg->ops->legacy->config_ap(priv);
545 } else
546 iwl_set_no_assoc(priv, vif);
547 }
548
549 if (changes & BSS_CHANGED_IBSS) {
550 ret = priv->cfg->ops->legacy->manage_ibss_station(priv, vif,
551 bss_conf->ibss_joined);
552 if (ret)
553 IWL_ERR(priv, "failed to %s IBSS station %pM\n",
554 bss_conf->ibss_joined ? "add" : "remove",
555 bss_conf->bssid);
556 }
557
558 mutex_unlock(&priv->mutex);
559
560 IWL_DEBUG_MAC80211(priv, "leave\n");
561}
562EXPORT_SYMBOL(iwl_legacy_mac_bss_info_changed);
563
564irqreturn_t iwl_isr_legacy(int irq, void *data)
565{
566 struct iwl_priv *priv = data;
567 u32 inta, inta_mask;
568 u32 inta_fh;
569 unsigned long flags;
570 if (!priv)
571 return IRQ_NONE;
572
573 spin_lock_irqsave(&priv->lock, flags);
574
575 /* Disable (but don't clear!) interrupts here to avoid
576 * back-to-back ISRs and sporadic interrupts from our NIC.
577 * If we have something to service, the tasklet will re-enable ints.
578 * If we *don't* have something, we'll re-enable before leaving here. */
579 inta_mask = iwl_read32(priv, CSR_INT_MASK); /* just for debug */
580 iwl_write32(priv, CSR_INT_MASK, 0x00000000);
581
582 /* Discover which interrupts are active/pending */
583 inta = iwl_read32(priv, CSR_INT);
584 inta_fh = iwl_read32(priv, CSR_FH_INT_STATUS);
585
586 /* Ignore interrupt if there's nothing in NIC to service.
587 * This may be due to IRQ shared with another device,
588 * or due to sporadic interrupts thrown from our NIC. */
589 if (!inta && !inta_fh) {
590 IWL_DEBUG_ISR(priv,
591 "Ignore interrupt, inta == 0, inta_fh == 0\n");
592 goto none;
593 }
594
595 if ((inta == 0xFFFFFFFF) || ((inta & 0xFFFFFFF0) == 0xa5a5a5a0)) {
596 /* Hardware disappeared. It might have already raised
597 * an interrupt */
598 IWL_WARN(priv, "HARDWARE GONE?? INTA == 0x%08x\n", inta);
599 goto unplugged;
600 }
601
602 IWL_DEBUG_ISR(priv, "ISR inta 0x%08x, enabled 0x%08x, fh 0x%08x\n",
603 inta, inta_mask, inta_fh);
604
605 inta &= ~CSR_INT_BIT_SCD;
606
607 /* iwl_irq_tasklet() will service interrupts and re-enable them */
608 if (likely(inta || inta_fh))
609 tasklet_schedule(&priv->irq_tasklet);
610
611unplugged:
612 spin_unlock_irqrestore(&priv->lock, flags);
613 return IRQ_HANDLED;
614
615none:
616 /* re-enable interrupts here since we don't have anything to service. */
617 /* only Re-enable if disabled by irq */
618 if (test_bit(STATUS_INT_ENABLED, &priv->status))
619 iwl_enable_interrupts(priv);
620 spin_unlock_irqrestore(&priv->lock, flags);
621 return IRQ_NONE;
622}
623EXPORT_SYMBOL(iwl_isr_legacy);
624
625/*
626 * iwl_legacy_tx_cmd_protection: Set rts/cts. 3945 and 4965 only share this
627 * function.
628 */
629void iwl_legacy_tx_cmd_protection(struct iwl_priv *priv,
630 struct ieee80211_tx_info *info,
631 __le16 fc, __le32 *tx_flags)
632{
633 if (info->control.rates[0].flags & IEEE80211_TX_RC_USE_RTS_CTS) {
634 *tx_flags |= TX_CMD_FLG_RTS_MSK;
635 *tx_flags &= ~TX_CMD_FLG_CTS_MSK;
636 *tx_flags |= TX_CMD_FLG_FULL_TXOP_PROT_MSK;
637
638 if (!ieee80211_is_mgmt(fc))
639 return;
640
641 switch (fc & cpu_to_le16(IEEE80211_FCTL_STYPE)) {
642 case cpu_to_le16(IEEE80211_STYPE_AUTH):
643 case cpu_to_le16(IEEE80211_STYPE_DEAUTH):
644 case cpu_to_le16(IEEE80211_STYPE_ASSOC_REQ):
645 case cpu_to_le16(IEEE80211_STYPE_REASSOC_REQ):
646 *tx_flags &= ~TX_CMD_FLG_RTS_MSK;
647 *tx_flags |= TX_CMD_FLG_CTS_MSK;
648 break;
649 }
650 } else if (info->control.rates[0].flags &
651 IEEE80211_TX_RC_USE_CTS_PROTECT) {
652 *tx_flags &= ~TX_CMD_FLG_RTS_MSK;
653 *tx_flags |= TX_CMD_FLG_CTS_MSK;
654 *tx_flags |= TX_CMD_FLG_FULL_TXOP_PROT_MSK;
655 }
656}
657EXPORT_SYMBOL(iwl_legacy_tx_cmd_protection);
diff --git a/drivers/net/wireless/iwlwifi/iwl-power.c b/drivers/net/wireless/iwlwifi/iwl-power.c
index 1d1bf3234d8d..576795e2c75b 100644
--- a/drivers/net/wireless/iwlwifi/iwl-power.c
+++ b/drivers/net/wireless/iwlwifi/iwl-power.c
@@ -425,7 +425,6 @@ int iwl_power_set_mode(struct iwl_priv *priv, struct iwl_powertable_cmd *cmd,
425 425
426 return ret; 426 return ret;
427} 427}
428EXPORT_SYMBOL(iwl_power_set_mode);
429 428
430int iwl_power_update_mode(struct iwl_priv *priv, bool force) 429int iwl_power_update_mode(struct iwl_priv *priv, bool force)
431{ 430{
@@ -434,7 +433,6 @@ int iwl_power_update_mode(struct iwl_priv *priv, bool force)
434 iwl_power_build_cmd(priv, &cmd); 433 iwl_power_build_cmd(priv, &cmd);
435 return iwl_power_set_mode(priv, &cmd, force); 434 return iwl_power_set_mode(priv, &cmd, force);
436} 435}
437EXPORT_SYMBOL(iwl_power_update_mode);
438 436
439/* initialize to default */ 437/* initialize to default */
440void iwl_power_initialize(struct iwl_priv *priv) 438void iwl_power_initialize(struct iwl_priv *priv)
@@ -448,4 +446,3 @@ void iwl_power_initialize(struct iwl_priv *priv)
448 memset(&priv->power_data.sleep_cmd, 0, 446 memset(&priv->power_data.sleep_cmd, 0,
449 sizeof(priv->power_data.sleep_cmd)); 447 sizeof(priv->power_data.sleep_cmd));
450} 448}
451EXPORT_SYMBOL(iwl_power_initialize);
diff --git a/drivers/net/wireless/iwlwifi/iwl-rx.c b/drivers/net/wireless/iwlwifi/iwl-rx.c
index bc89393fb696..a21f6fe10fb7 100644
--- a/drivers/net/wireless/iwlwifi/iwl-rx.c
+++ b/drivers/net/wireless/iwlwifi/iwl-rx.c
@@ -118,7 +118,6 @@ int iwl_rx_queue_space(const struct iwl_rx_queue *q)
118 s = 0; 118 s = 0;
119 return s; 119 return s;
120} 120}
121EXPORT_SYMBOL(iwl_rx_queue_space);
122 121
123/** 122/**
124 * iwl_rx_queue_update_write_ptr - Update the write pointer for the RX queue 123 * iwl_rx_queue_update_write_ptr - Update the write pointer for the RX queue
@@ -170,7 +169,6 @@ void iwl_rx_queue_update_write_ptr(struct iwl_priv *priv, struct iwl_rx_queue *q
170 exit_unlock: 169 exit_unlock:
171 spin_unlock_irqrestore(&q->lock, flags); 170 spin_unlock_irqrestore(&q->lock, flags);
172} 171}
173EXPORT_SYMBOL(iwl_rx_queue_update_write_ptr);
174 172
175int iwl_rx_queue_alloc(struct iwl_priv *priv) 173int iwl_rx_queue_alloc(struct iwl_priv *priv)
176{ 174{
@@ -211,7 +209,6 @@ err_rb:
211err_bd: 209err_bd:
212 return -ENOMEM; 210 return -ENOMEM;
213} 211}
214EXPORT_SYMBOL(iwl_rx_queue_alloc);
215 212
216 213
217void iwl_rx_spectrum_measure_notif(struct iwl_priv *priv, 214void iwl_rx_spectrum_measure_notif(struct iwl_priv *priv,
@@ -229,7 +226,6 @@ void iwl_rx_spectrum_measure_notif(struct iwl_priv *priv,
229 memcpy(&priv->measure_report, report, sizeof(*report)); 226 memcpy(&priv->measure_report, report, sizeof(*report));
230 priv->measurement_status |= MEASUREMENT_READY; 227 priv->measurement_status |= MEASUREMENT_READY;
231} 228}
232EXPORT_SYMBOL(iwl_rx_spectrum_measure_notif);
233 229
234void iwl_recover_from_statistics(struct iwl_priv *priv, 230void iwl_recover_from_statistics(struct iwl_priv *priv,
235 struct iwl_rx_packet *pkt) 231 struct iwl_rx_packet *pkt)
@@ -249,7 +245,6 @@ void iwl_recover_from_statistics(struct iwl_priv *priv,
249 !priv->cfg->ops->lib->check_plcp_health(priv, pkt)) 245 !priv->cfg->ops->lib->check_plcp_health(priv, pkt))
250 iwl_force_reset(priv, IWL_RF_RESET, false); 246 iwl_force_reset(priv, IWL_RF_RESET, false);
251} 247}
252EXPORT_SYMBOL(iwl_recover_from_statistics);
253 248
254/* 249/*
255 * returns non-zero if packet should be dropped 250 * returns non-zero if packet should be dropped
@@ -302,4 +297,3 @@ int iwl_set_decrypted_flag(struct iwl_priv *priv,
302 } 297 }
303 return 0; 298 return 0;
304} 299}
305EXPORT_SYMBOL(iwl_set_decrypted_flag);
diff --git a/drivers/net/wireless/iwlwifi/iwl-scan.c b/drivers/net/wireless/iwlwifi/iwl-scan.c
index 08f1bea8b652..faa6d34cb658 100644
--- a/drivers/net/wireless/iwlwifi/iwl-scan.c
+++ b/drivers/net/wireless/iwlwifi/iwl-scan.c
@@ -155,7 +155,6 @@ int iwl_scan_cancel(struct iwl_priv *priv)
155 queue_work(priv->workqueue, &priv->abort_scan); 155 queue_work(priv->workqueue, &priv->abort_scan);
156 return 0; 156 return 0;
157} 157}
158EXPORT_SYMBOL(iwl_scan_cancel);
159 158
160/** 159/**
161 * iwl_scan_cancel_timeout - Cancel any currently executing HW scan 160 * iwl_scan_cancel_timeout - Cancel any currently executing HW scan
@@ -180,7 +179,6 @@ int iwl_scan_cancel_timeout(struct iwl_priv *priv, unsigned long ms)
180 179
181 return test_bit(STATUS_SCAN_HW, &priv->status); 180 return test_bit(STATUS_SCAN_HW, &priv->status);
182} 181}
183EXPORT_SYMBOL(iwl_scan_cancel_timeout);
184 182
185/* Service response to REPLY_SCAN_CMD (0x80) */ 183/* Service response to REPLY_SCAN_CMD (0x80) */
186static void iwl_rx_reply_scan(struct iwl_priv *priv, 184static void iwl_rx_reply_scan(struct iwl_priv *priv,
@@ -288,7 +286,6 @@ void iwl_setup_rx_scan_handlers(struct iwl_priv *priv)
288 priv->rx_handlers[SCAN_COMPLETE_NOTIFICATION] = 286 priv->rx_handlers[SCAN_COMPLETE_NOTIFICATION] =
289 iwl_rx_scan_complete_notif; 287 iwl_rx_scan_complete_notif;
290} 288}
291EXPORT_SYMBOL(iwl_setup_rx_scan_handlers);
292 289
293inline u16 iwl_get_active_dwell_time(struct iwl_priv *priv, 290inline u16 iwl_get_active_dwell_time(struct iwl_priv *priv,
294 enum ieee80211_band band, 291 enum ieee80211_band band,
@@ -301,7 +298,6 @@ inline u16 iwl_get_active_dwell_time(struct iwl_priv *priv,
301 return IWL_ACTIVE_DWELL_TIME_24 + 298 return IWL_ACTIVE_DWELL_TIME_24 +
302 IWL_ACTIVE_DWELL_FACTOR_24GHZ * (n_probes + 1); 299 IWL_ACTIVE_DWELL_FACTOR_24GHZ * (n_probes + 1);
303} 300}
304EXPORT_SYMBOL(iwl_get_active_dwell_time);
305 301
306u16 iwl_get_passive_dwell_time(struct iwl_priv *priv, 302u16 iwl_get_passive_dwell_time(struct iwl_priv *priv,
307 enum ieee80211_band band, 303 enum ieee80211_band band,
@@ -333,7 +329,6 @@ u16 iwl_get_passive_dwell_time(struct iwl_priv *priv,
333 329
334 return passive; 330 return passive;
335} 331}
336EXPORT_SYMBOL(iwl_get_passive_dwell_time);
337 332
338void iwl_init_scan_params(struct iwl_priv *priv) 333void iwl_init_scan_params(struct iwl_priv *priv)
339{ 334{
@@ -343,7 +338,6 @@ void iwl_init_scan_params(struct iwl_priv *priv)
343 if (!priv->scan_tx_ant[IEEE80211_BAND_2GHZ]) 338 if (!priv->scan_tx_ant[IEEE80211_BAND_2GHZ])
344 priv->scan_tx_ant[IEEE80211_BAND_2GHZ] = ant_idx; 339 priv->scan_tx_ant[IEEE80211_BAND_2GHZ] = ant_idx;
345} 340}
346EXPORT_SYMBOL(iwl_init_scan_params);
347 341
348static int __must_check iwl_scan_initiate(struct iwl_priv *priv, 342static int __must_check iwl_scan_initiate(struct iwl_priv *priv,
349 struct ieee80211_vif *vif, 343 struct ieee80211_vif *vif,
@@ -439,7 +433,6 @@ out_unlock:
439 433
440 return ret; 434 return ret;
441} 435}
442EXPORT_SYMBOL(iwl_mac_hw_scan);
443 436
444/* 437/*
445 * internal short scan, this function should only been called while associated. 438 * internal short scan, this function should only been called while associated.
@@ -536,7 +529,6 @@ u16 iwl_fill_probe_req(struct iwl_priv *priv, struct ieee80211_mgmt *frame,
536 529
537 return (u16)len; 530 return (u16)len;
538} 531}
539EXPORT_SYMBOL(iwl_fill_probe_req);
540 532
541static void iwl_bg_abort_scan(struct work_struct *work) 533static void iwl_bg_abort_scan(struct work_struct *work)
542{ 534{
@@ -621,7 +613,6 @@ void iwl_setup_scan_deferred_work(struct iwl_priv *priv)
621 INIT_WORK(&priv->start_internal_scan, iwl_bg_start_internal_scan); 613 INIT_WORK(&priv->start_internal_scan, iwl_bg_start_internal_scan);
622 INIT_DELAYED_WORK(&priv->scan_check, iwl_bg_scan_check); 614 INIT_DELAYED_WORK(&priv->scan_check, iwl_bg_scan_check);
623} 615}
624EXPORT_SYMBOL(iwl_setup_scan_deferred_work);
625 616
626void iwl_cancel_scan_deferred_work(struct iwl_priv *priv) 617void iwl_cancel_scan_deferred_work(struct iwl_priv *priv)
627{ 618{
@@ -635,4 +626,3 @@ void iwl_cancel_scan_deferred_work(struct iwl_priv *priv)
635 mutex_unlock(&priv->mutex); 626 mutex_unlock(&priv->mutex);
636 } 627 }
637} 628}
638EXPORT_SYMBOL(iwl_cancel_scan_deferred_work);
diff --git a/drivers/net/wireless/iwlwifi/iwl-sta.c b/drivers/net/wireless/iwlwifi/iwl-sta.c
index 49493d176515..bc90a12408a3 100644
--- a/drivers/net/wireless/iwlwifi/iwl-sta.c
+++ b/drivers/net/wireless/iwlwifi/iwl-sta.c
@@ -169,7 +169,6 @@ int iwl_send_add_sta(struct iwl_priv *priv,
169 169
170 return ret; 170 return ret;
171} 171}
172EXPORT_SYMBOL(iwl_send_add_sta);
173 172
174static void iwl_set_ht_add_station(struct iwl_priv *priv, u8 index, 173static void iwl_set_ht_add_station(struct iwl_priv *priv, u8 index,
175 struct ieee80211_sta *sta, 174 struct ieee80211_sta *sta,
@@ -316,7 +315,6 @@ u8 iwl_prep_station(struct iwl_priv *priv, struct iwl_rxon_context *ctx,
316 return sta_id; 315 return sta_id;
317 316
318} 317}
319EXPORT_SYMBOL_GPL(iwl_prep_station);
320 318
321#define STA_WAIT_TIMEOUT (HZ/2) 319#define STA_WAIT_TIMEOUT (HZ/2)
322 320
@@ -379,7 +377,6 @@ int iwl_add_station_common(struct iwl_priv *priv, struct iwl_rxon_context *ctx,
379 *sta_id_r = sta_id; 377 *sta_id_r = sta_id;
380 return ret; 378 return ret;
381} 379}
382EXPORT_SYMBOL(iwl_add_station_common);
383 380
384/** 381/**
385 * iwl_sta_ucode_deactivate - deactivate ucode status for a station 382 * iwl_sta_ucode_deactivate - deactivate ucode status for a station
@@ -513,7 +510,6 @@ out_err:
513 spin_unlock_irqrestore(&priv->sta_lock, flags); 510 spin_unlock_irqrestore(&priv->sta_lock, flags);
514 return -EINVAL; 511 return -EINVAL;
515} 512}
516EXPORT_SYMBOL_GPL(iwl_remove_station);
517 513
518/** 514/**
519 * iwl_clear_ucode_stations - clear ucode station table bits 515 * iwl_clear_ucode_stations - clear ucode station table bits
@@ -548,7 +544,6 @@ void iwl_clear_ucode_stations(struct iwl_priv *priv,
548 if (!cleared) 544 if (!cleared)
549 IWL_DEBUG_INFO(priv, "No active stations found to be cleared\n"); 545 IWL_DEBUG_INFO(priv, "No active stations found to be cleared\n");
550} 546}
551EXPORT_SYMBOL(iwl_clear_ucode_stations);
552 547
553/** 548/**
554 * iwl_restore_stations() - Restore driver known stations to device 549 * iwl_restore_stations() - Restore driver known stations to device
@@ -625,7 +620,6 @@ void iwl_restore_stations(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
625 else 620 else
626 IWL_DEBUG_INFO(priv, "Restoring all known stations .... complete.\n"); 621 IWL_DEBUG_INFO(priv, "Restoring all known stations .... complete.\n");
627} 622}
628EXPORT_SYMBOL(iwl_restore_stations);
629 623
630void iwl_reprogram_ap_sta(struct iwl_priv *priv, struct iwl_rxon_context *ctx) 624void iwl_reprogram_ap_sta(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
631{ 625{
@@ -668,7 +662,6 @@ void iwl_reprogram_ap_sta(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
668 priv->stations[sta_id].sta.sta.addr, ret); 662 priv->stations[sta_id].sta.sta.addr, ret);
669 iwl_send_lq_cmd(priv, ctx, &lq, CMD_SYNC, true); 663 iwl_send_lq_cmd(priv, ctx, &lq, CMD_SYNC, true);
670} 664}
671EXPORT_SYMBOL(iwl_reprogram_ap_sta);
672 665
673int iwl_get_free_ucode_key_index(struct iwl_priv *priv) 666int iwl_get_free_ucode_key_index(struct iwl_priv *priv)
674{ 667{
@@ -680,7 +673,6 @@ int iwl_get_free_ucode_key_index(struct iwl_priv *priv)
680 673
681 return WEP_INVALID_OFFSET; 674 return WEP_INVALID_OFFSET;
682} 675}
683EXPORT_SYMBOL(iwl_get_free_ucode_key_index);
684 676
685void iwl_dealloc_bcast_stations(struct iwl_priv *priv) 677void iwl_dealloc_bcast_stations(struct iwl_priv *priv)
686{ 678{
@@ -700,7 +692,6 @@ void iwl_dealloc_bcast_stations(struct iwl_priv *priv)
700 } 692 }
701 spin_unlock_irqrestore(&priv->sta_lock, flags); 693 spin_unlock_irqrestore(&priv->sta_lock, flags);
702} 694}
703EXPORT_SYMBOL_GPL(iwl_dealloc_bcast_stations);
704 695
705#ifdef CONFIG_IWLWIFI_DEBUG 696#ifdef CONFIG_IWLWIFI_DEBUG
706static void iwl_dump_lq_cmd(struct iwl_priv *priv, 697static void iwl_dump_lq_cmd(struct iwl_priv *priv,
@@ -810,7 +801,6 @@ int iwl_send_lq_cmd(struct iwl_priv *priv, struct iwl_rxon_context *ctx,
810 } 801 }
811 return ret; 802 return ret;
812} 803}
813EXPORT_SYMBOL(iwl_send_lq_cmd);
814 804
815int iwl_mac_sta_remove(struct ieee80211_hw *hw, 805int iwl_mac_sta_remove(struct ieee80211_hw *hw,
816 struct ieee80211_vif *vif, 806 struct ieee80211_vif *vif,
@@ -832,4 +822,3 @@ int iwl_mac_sta_remove(struct ieee80211_hw *hw,
832 mutex_unlock(&priv->mutex); 822 mutex_unlock(&priv->mutex);
833 return ret; 823 return ret;
834} 824}
835EXPORT_SYMBOL(iwl_mac_sta_remove);
diff --git a/drivers/net/wireless/iwlwifi/iwl-tx.c b/drivers/net/wireless/iwlwifi/iwl-tx.c
index 073b6ce6141c..7e607d39da1c 100644
--- a/drivers/net/wireless/iwlwifi/iwl-tx.c
+++ b/drivers/net/wireless/iwlwifi/iwl-tx.c
@@ -84,7 +84,6 @@ void iwl_txq_update_write_ptr(struct iwl_priv *priv, struct iwl_tx_queue *txq)
84 } 84 }
85 txq->need_update = 0; 85 txq->need_update = 0;
86} 86}
87EXPORT_SYMBOL(iwl_txq_update_write_ptr);
88 87
89/** 88/**
90 * iwl_tx_queue_free - Deallocate DMA queue. 89 * iwl_tx_queue_free - Deallocate DMA queue.
@@ -131,7 +130,6 @@ void iwl_tx_queue_free(struct iwl_priv *priv, int txq_id)
131 /* 0-fill queue descriptor structure */ 130 /* 0-fill queue descriptor structure */
132 memset(txq, 0, sizeof(*txq)); 131 memset(txq, 0, sizeof(*txq));
133} 132}
134EXPORT_SYMBOL(iwl_tx_queue_free);
135 133
136/** 134/**
137 * iwl_cmd_queue_free - Deallocate DMA queue. 135 * iwl_cmd_queue_free - Deallocate DMA queue.
@@ -193,7 +191,6 @@ void iwl_cmd_queue_free(struct iwl_priv *priv)
193 /* 0-fill queue descriptor structure */ 191 /* 0-fill queue descriptor structure */
194 memset(txq, 0, sizeof(*txq)); 192 memset(txq, 0, sizeof(*txq));
195} 193}
196EXPORT_SYMBOL(iwl_cmd_queue_free);
197 194
198/*************** DMA-QUEUE-GENERAL-FUNCTIONS ***** 195/*************** DMA-QUEUE-GENERAL-FUNCTIONS *****
199 * DMA services 196 * DMA services
@@ -233,7 +230,6 @@ int iwl_queue_space(const struct iwl_queue *q)
233 s = 0; 230 s = 0;
234 return s; 231 return s;
235} 232}
236EXPORT_SYMBOL(iwl_queue_space);
237 233
238 234
239/** 235/**
@@ -384,7 +380,6 @@ out_free_arrays:
384 380
385 return -ENOMEM; 381 return -ENOMEM;
386} 382}
387EXPORT_SYMBOL(iwl_tx_queue_init);
388 383
389void iwl_tx_queue_reset(struct iwl_priv *priv, struct iwl_tx_queue *txq, 384void iwl_tx_queue_reset(struct iwl_priv *priv, struct iwl_tx_queue *txq,
390 int slots_num, u32 txq_id) 385 int slots_num, u32 txq_id)
@@ -404,7 +399,6 @@ void iwl_tx_queue_reset(struct iwl_priv *priv, struct iwl_tx_queue *txq,
404 /* Tell device where to find queue */ 399 /* Tell device where to find queue */
405 priv->cfg->ops->lib->txq_init(priv, txq); 400 priv->cfg->ops->lib->txq_init(priv, txq);
406} 401}
407EXPORT_SYMBOL(iwl_tx_queue_reset);
408 402
409/*************** HOST COMMAND QUEUE FUNCTIONS *****/ 403/*************** HOST COMMAND QUEUE FUNCTIONS *****/
410 404
@@ -641,4 +635,3 @@ void iwl_tx_cmd_complete(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb)
641 } 635 }
642 meta->flags = 0; 636 meta->flags = 0;
643} 637}
644EXPORT_SYMBOL(iwl_tx_cmd_complete);