aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/wireless/iwlegacy
diff options
context:
space:
mode:
authorJohn W. Linville <linville@tuxdriver.com>2011-03-04 14:10:40 -0500
committerJohn W. Linville <linville@tuxdriver.com>2011-03-04 14:10:40 -0500
commit85a7045a90052749885e166f40af5e9140032287 (patch)
treecb4702d50bbe1d10ab9320ad3f63323b817727f7 /drivers/net/wireless/iwlegacy
parent29546a6404e3a4b5d13f0a9586eb5cf1c3b25167 (diff)
parente46395a4b3d32d161d8b6d8e4a002972b1faae3e (diff)
Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/linville/wireless-next-2.6 into for-davem
Diffstat (limited to 'drivers/net/wireless/iwlegacy')
-rw-r--r--drivers/net/wireless/iwlegacy/Kconfig116
-rw-r--r--drivers/net/wireless/iwlegacy/Makefile25
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-3945-debugfs.c523
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-3945-debugfs.h60
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-3945-fh.h187
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-3945-hw.h293
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-3945-led.c64
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-3945-led.h32
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-3945-rs.c994
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-3945.c2742
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-3945.h308
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-4965-calib.c967
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-4965-calib.h75
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-4965-debugfs.c774
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-4965-debugfs.h59
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-4965-eeprom.c154
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-4965-hw.h814
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-4965-led.c74
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-4965-led.h33
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-4965-lib.c1260
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-4965-rs.c2870
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-4965-rx.c291
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-4965-sta.c721
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-4965-tx.c1369
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-4965-ucode.c166
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-4965.c2188
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-4965.h282
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-commands.h3405
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-core.c2674
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-core.h646
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-csr.h422
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-debug.h198
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-debugfs.c1467
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-dev.h1426
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-devtrace.c45
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-devtrace.h270
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-eeprom.c561
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-eeprom.h344
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-fh.h513
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-hcmd.c271
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-helpers.h181
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-io.h545
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-led.c188
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-led.h56
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-legacy-rs.h456
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-power.c165
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-power.h55
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-prph.h523
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-rx.c302
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-scan.c625
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-spectrum.h92
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-sta.c816
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-sta.h148
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-tx.c660
-rw-r--r--drivers/net/wireless/iwlegacy/iwl3945-base.c4293
-rw-r--r--drivers/net/wireless/iwlegacy/iwl4965-base.c3632
56 files changed, 42420 insertions, 0 deletions
diff --git a/drivers/net/wireless/iwlegacy/Kconfig b/drivers/net/wireless/iwlegacy/Kconfig
new file mode 100644
index 000000000000..2a45dd44cc12
--- /dev/null
+++ b/drivers/net/wireless/iwlegacy/Kconfig
@@ -0,0 +1,116 @@
1config IWLWIFI_LEGACY
2 tristate "Intel Wireless Wifi legacy devices"
3 depends on PCI && MAC80211
4 select FW_LOADER
5 select NEW_LEDS
6 select LEDS_CLASS
7 select LEDS_TRIGGERS
8 select MAC80211_LEDS
9
10menu "Debugging Options"
11 depends on IWLWIFI_LEGACY
12
13config IWLWIFI_LEGACY_DEBUG
14 bool "Enable full debugging output in 4965 and 3945 drivers"
15 depends on IWLWIFI_LEGACY
16 ---help---
17 This option will enable debug tracing output for the iwlwifilegacy
18 drivers.
19
20 This will result in the kernel module being ~100k larger. You can
21 control which debug output is sent to the kernel log by setting the
22 value in
23
24 /sys/class/net/wlan0/device/debug_level
25
26 This entry will only exist if this option is enabled.
27
28 To set a value, simply echo an 8-byte hex value to the same file:
29
30 % echo 0x43fff > /sys/class/net/wlan0/device/debug_level
31
32 You can find the list of debug mask values in:
33 drivers/net/wireless/iwlwifilegacy/iwl-debug.h
34
35 If this is your first time using this driver, you should say Y here
36 as the debug information can assist others in helping you resolve
37 any problems you may encounter.
38
39config IWLWIFI_LEGACY_DEBUGFS
40 bool "4965 and 3945 debugfs support"
41 depends on IWLWIFI_LEGACY && MAC80211_DEBUGFS
42 ---help---
43 Enable creation of debugfs files for the iwlwifilegacy drivers. This
44 is a low-impact option that allows getting insight into the
45 driver's state at runtime.
46
47config IWLWIFI_LEGACY_DEVICE_TRACING
48 bool "iwlwifilegacy legacy device access tracing"
49 depends on IWLWIFI_LEGACY
50 depends on EVENT_TRACING
51 help
52 Say Y here to trace all commands, including TX frames and IO
53 accesses, sent to the device. If you say yes, iwlwifilegacy will
54 register with the ftrace framework for event tracing and dump
55 all this information to the ringbuffer, you may need to
56 increase the ringbuffer size. See the ftrace documentation
57 for more information.
58
59 When tracing is not enabled, this option still has some
60 (though rather small) overhead.
61
62 If unsure, say Y so we can help you better when problems
63 occur.
64endmenu
65
66config IWL4965
67 tristate "Intel Wireless WiFi 4965AGN (iwl4965)"
68 depends on IWLWIFI_LEGACY
69 ---help---
70 This option enables support for
71
72 Select to build the driver supporting the:
73
74 Intel Wireless WiFi Link 4965AGN
75
76 This driver uses the kernel's mac80211 subsystem.
77
78 In order to use this driver, you will need a microcode (uCode)
79 image for it. You can obtain the microcode from:
80
81 <http://intellinuxwireless.org/>.
82
83 The microcode is typically installed in /lib/firmware. You can
84 look in the hotplug script /etc/hotplug/firmware.agent to
85 determine which directory FIRMWARE_DIR is set to when the script
86 runs.
87
88 If you want to compile the driver as a module ( = code which can be
89 inserted in and removed from the running kernel whenever you want),
90 say M here and read <file:Documentation/kbuild/modules.txt>. The
91 module will be called iwl4965.
92
93config IWL3945
94 tristate "Intel PRO/Wireless 3945ABG/BG Network Connection (iwl3945)"
95 depends on IWLWIFI_LEGACY
96 ---help---
97 Select to build the driver supporting the:
98
99 Intel PRO/Wireless 3945ABG/BG Network Connection
100
101 This driver uses the kernel's mac80211 subsystem.
102
103 In order to use this driver, you will need a microcode (uCode)
104 image for it. You can obtain the microcode from:
105
106 <http://intellinuxwireless.org/>.
107
108 The microcode is typically installed in /lib/firmware. You can
109 look in the hotplug script /etc/hotplug/firmware.agent to
110 determine which directory FIRMWARE_DIR is set to when the script
111 runs.
112
113 If you want to compile the driver as a module ( = code which can be
114 inserted in and removed from the running kernel whenever you want),
115 say M here and read <file:Documentation/kbuild/modules.txt>. The
116 module will be called iwl3945.
diff --git a/drivers/net/wireless/iwlegacy/Makefile b/drivers/net/wireless/iwlegacy/Makefile
new file mode 100644
index 000000000000..d56aeb38c211
--- /dev/null
+++ b/drivers/net/wireless/iwlegacy/Makefile
@@ -0,0 +1,25 @@
1obj-$(CONFIG_IWLWIFI_LEGACY) += iwl-legacy.o
2iwl-legacy-objs := iwl-core.o iwl-eeprom.o iwl-hcmd.o iwl-power.o
3iwl-legacy-objs += iwl-rx.o iwl-tx.o iwl-sta.o
4iwl-legacy-objs += iwl-scan.o iwl-led.o
5iwl-legacy-$(CONFIG_IWLWIFI_LEGACY_DEBUGFS) += iwl-debugfs.o
6iwl-legacy-$(CONFIG_IWLWIFI_LEGACY_DEVICE_TRACING) += iwl-devtrace.o
7
8iwl-legacy-objs += $(iwl-legacy-m)
9
10CFLAGS_iwl-devtrace.o := -I$(src)
11
12# 4965
13obj-$(CONFIG_IWL4965) += iwl4965.o
14iwl4965-objs := iwl-4965.o iwl4965-base.o iwl-4965-rs.o iwl-4965-led.o
15iwl4965-objs += iwl-4965-ucode.o iwl-4965-tx.o
16iwl4965-objs += iwl-4965-lib.o iwl-4965-rx.o iwl-4965-calib.o
17iwl4965-objs += iwl-4965-sta.o iwl-4965-eeprom.o
18iwl4965-$(CONFIG_IWLWIFI_LEGACY_DEBUGFS) += iwl-4965-debugfs.o
19
20# 3945
21obj-$(CONFIG_IWL3945) += iwl3945.o
22iwl3945-objs := iwl3945-base.o iwl-3945.o iwl-3945-rs.o iwl-3945-led.o
23iwl3945-$(CONFIG_IWLWIFI_LEGACY_DEBUGFS) += iwl-3945-debugfs.o
24
25ccflags-y += -D__CHECK_ENDIAN__
diff --git a/drivers/net/wireless/iwlegacy/iwl-3945-debugfs.c b/drivers/net/wireless/iwlegacy/iwl-3945-debugfs.c
new file mode 100644
index 000000000000..cfabb38793ab
--- /dev/null
+++ b/drivers/net/wireless/iwlegacy/iwl-3945-debugfs.c
@@ -0,0 +1,523 @@
1/******************************************************************************
2 *
3 * GPL LICENSE SUMMARY
4 *
5 * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of version 2 of the GNU General Public License as
9 * published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful, but
12 * WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
19 * USA
20 *
21 * The full GNU General Public License is included in this distribution
22 * in the file called LICENSE.GPL.
23 *
24 * Contact Information:
25 * Intel Linux Wireless <ilw@linux.intel.com>
26 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
27 *****************************************************************************/
28
29#include "iwl-3945-debugfs.h"
30
31
32static int iwl3945_statistics_flag(struct iwl_priv *priv, char *buf, int bufsz)
33{
34 int p = 0;
35
36 p += scnprintf(buf + p, bufsz - p, "Statistics Flag(0x%X):\n",
37 le32_to_cpu(priv->_3945.statistics.flag));
38 if (le32_to_cpu(priv->_3945.statistics.flag) &
39 UCODE_STATISTICS_CLEAR_MSK)
40 p += scnprintf(buf + p, bufsz - p,
41 "\tStatistics have been cleared\n");
42 p += scnprintf(buf + p, bufsz - p, "\tOperational Frequency: %s\n",
43 (le32_to_cpu(priv->_3945.statistics.flag) &
44 UCODE_STATISTICS_FREQUENCY_MSK)
45 ? "2.4 GHz" : "5.2 GHz");
46 p += scnprintf(buf + p, bufsz - p, "\tTGj Narrow Band: %s\n",
47 (le32_to_cpu(priv->_3945.statistics.flag) &
48 UCODE_STATISTICS_NARROW_BAND_MSK)
49 ? "enabled" : "disabled");
50 return p;
51}
52
53ssize_t iwl3945_ucode_rx_stats_read(struct file *file,
54 char __user *user_buf,
55 size_t count, loff_t *ppos)
56{
57 struct iwl_priv *priv = file->private_data;
58 int pos = 0;
59 char *buf;
60 int bufsz = sizeof(struct iwl39_statistics_rx_phy) * 40 +
61 sizeof(struct iwl39_statistics_rx_non_phy) * 40 + 400;
62 ssize_t ret;
63 struct iwl39_statistics_rx_phy *ofdm, *accum_ofdm, *delta_ofdm,
64 *max_ofdm;
65 struct iwl39_statistics_rx_phy *cck, *accum_cck, *delta_cck, *max_cck;
66 struct iwl39_statistics_rx_non_phy *general, *accum_general;
67 struct iwl39_statistics_rx_non_phy *delta_general, *max_general;
68
69 if (!iwl_legacy_is_alive(priv))
70 return -EAGAIN;
71
72 buf = kzalloc(bufsz, GFP_KERNEL);
73 if (!buf) {
74 IWL_ERR(priv, "Can not allocate Buffer\n");
75 return -ENOMEM;
76 }
77
78 /*
79 * The statistic information display here is based on
80 * the last statistics notification from uCode
81 * might not reflect the current uCode activity
82 */
83 ofdm = &priv->_3945.statistics.rx.ofdm;
84 cck = &priv->_3945.statistics.rx.cck;
85 general = &priv->_3945.statistics.rx.general;
86 accum_ofdm = &priv->_3945.accum_statistics.rx.ofdm;
87 accum_cck = &priv->_3945.accum_statistics.rx.cck;
88 accum_general = &priv->_3945.accum_statistics.rx.general;
89 delta_ofdm = &priv->_3945.delta_statistics.rx.ofdm;
90 delta_cck = &priv->_3945.delta_statistics.rx.cck;
91 delta_general = &priv->_3945.delta_statistics.rx.general;
92 max_ofdm = &priv->_3945.max_delta.rx.ofdm;
93 max_cck = &priv->_3945.max_delta.rx.cck;
94 max_general = &priv->_3945.max_delta.rx.general;
95
96 pos += iwl3945_statistics_flag(priv, buf, bufsz);
97 pos += scnprintf(buf + pos, bufsz - pos, "%-32s current"
98 "acumulative delta max\n",
99 "Statistics_Rx - OFDM:");
100 pos += scnprintf(buf + pos, bufsz - pos,
101 " %-30s %10u %10u %10u %10u\n",
102 "ina_cnt:", le32_to_cpu(ofdm->ina_cnt),
103 accum_ofdm->ina_cnt,
104 delta_ofdm->ina_cnt, max_ofdm->ina_cnt);
105 pos += scnprintf(buf + pos, bufsz - pos,
106 " %-30s %10u %10u %10u %10u\n",
107 "fina_cnt:",
108 le32_to_cpu(ofdm->fina_cnt), accum_ofdm->fina_cnt,
109 delta_ofdm->fina_cnt, max_ofdm->fina_cnt);
110 pos += scnprintf(buf + pos, bufsz - pos,
111 " %-30s %10u %10u %10u %10u\n", "plcp_err:",
112 le32_to_cpu(ofdm->plcp_err), accum_ofdm->plcp_err,
113 delta_ofdm->plcp_err, max_ofdm->plcp_err);
114 pos += scnprintf(buf + pos, bufsz - pos,
115 " %-30s %10u %10u %10u %10u\n", "crc32_err:",
116 le32_to_cpu(ofdm->crc32_err), accum_ofdm->crc32_err,
117 delta_ofdm->crc32_err, max_ofdm->crc32_err);
118 pos += scnprintf(buf + pos, bufsz - pos,
119 " %-30s %10u %10u %10u %10u\n", "overrun_err:",
120 le32_to_cpu(ofdm->overrun_err),
121 accum_ofdm->overrun_err, delta_ofdm->overrun_err,
122 max_ofdm->overrun_err);
123 pos += scnprintf(buf + pos, bufsz - pos,
124 " %-30s %10u %10u %10u %10u\n",
125 "early_overrun_err:",
126 le32_to_cpu(ofdm->early_overrun_err),
127 accum_ofdm->early_overrun_err,
128 delta_ofdm->early_overrun_err,
129 max_ofdm->early_overrun_err);
130 pos += scnprintf(buf + pos, bufsz - pos,
131 " %-30s %10u %10u %10u %10u\n",
132 "crc32_good:", le32_to_cpu(ofdm->crc32_good),
133 accum_ofdm->crc32_good, delta_ofdm->crc32_good,
134 max_ofdm->crc32_good);
135 pos += scnprintf(buf + pos, bufsz - pos,
136 " %-30s %10u %10u %10u %10u\n", "false_alarm_cnt:",
137 le32_to_cpu(ofdm->false_alarm_cnt),
138 accum_ofdm->false_alarm_cnt,
139 delta_ofdm->false_alarm_cnt,
140 max_ofdm->false_alarm_cnt);
141 pos += scnprintf(buf + pos, bufsz - pos,
142 " %-30s %10u %10u %10u %10u\n",
143 "fina_sync_err_cnt:",
144 le32_to_cpu(ofdm->fina_sync_err_cnt),
145 accum_ofdm->fina_sync_err_cnt,
146 delta_ofdm->fina_sync_err_cnt,
147 max_ofdm->fina_sync_err_cnt);
148 pos += scnprintf(buf + pos, bufsz - pos,
149 " %-30s %10u %10u %10u %10u\n",
150 "sfd_timeout:",
151 le32_to_cpu(ofdm->sfd_timeout),
152 accum_ofdm->sfd_timeout,
153 delta_ofdm->sfd_timeout,
154 max_ofdm->sfd_timeout);
155 pos += scnprintf(buf + pos, bufsz - pos,
156 " %-30s %10u %10u %10u %10u\n",
157 "fina_timeout:",
158 le32_to_cpu(ofdm->fina_timeout),
159 accum_ofdm->fina_timeout,
160 delta_ofdm->fina_timeout,
161 max_ofdm->fina_timeout);
162 pos += scnprintf(buf + pos, bufsz - pos,
163 " %-30s %10u %10u %10u %10u\n",
164 "unresponded_rts:",
165 le32_to_cpu(ofdm->unresponded_rts),
166 accum_ofdm->unresponded_rts,
167 delta_ofdm->unresponded_rts,
168 max_ofdm->unresponded_rts);
169 pos += scnprintf(buf + pos, bufsz - pos,
170 " %-30s %10u %10u %10u %10u\n",
171 "rxe_frame_lmt_ovrun:",
172 le32_to_cpu(ofdm->rxe_frame_limit_overrun),
173 accum_ofdm->rxe_frame_limit_overrun,
174 delta_ofdm->rxe_frame_limit_overrun,
175 max_ofdm->rxe_frame_limit_overrun);
176 pos += scnprintf(buf + pos, bufsz - pos,
177 " %-30s %10u %10u %10u %10u\n",
178 "sent_ack_cnt:",
179 le32_to_cpu(ofdm->sent_ack_cnt),
180 accum_ofdm->sent_ack_cnt,
181 delta_ofdm->sent_ack_cnt,
182 max_ofdm->sent_ack_cnt);
183 pos += scnprintf(buf + pos, bufsz - pos,
184 " %-30s %10u %10u %10u %10u\n",
185 "sent_cts_cnt:",
186 le32_to_cpu(ofdm->sent_cts_cnt),
187 accum_ofdm->sent_cts_cnt,
188 delta_ofdm->sent_cts_cnt, max_ofdm->sent_cts_cnt);
189
190 pos += scnprintf(buf + pos, bufsz - pos, "%-32s current"
191 "acumulative delta max\n",
192 "Statistics_Rx - CCK:");
193 pos += scnprintf(buf + pos, bufsz - pos,
194 " %-30s %10u %10u %10u %10u\n",
195 "ina_cnt:",
196 le32_to_cpu(cck->ina_cnt), accum_cck->ina_cnt,
197 delta_cck->ina_cnt, max_cck->ina_cnt);
198 pos += scnprintf(buf + pos, bufsz - pos,
199 " %-30s %10u %10u %10u %10u\n",
200 "fina_cnt:",
201 le32_to_cpu(cck->fina_cnt), accum_cck->fina_cnt,
202 delta_cck->fina_cnt, max_cck->fina_cnt);
203 pos += scnprintf(buf + pos, bufsz - pos,
204 " %-30s %10u %10u %10u %10u\n",
205 "plcp_err:",
206 le32_to_cpu(cck->plcp_err), accum_cck->plcp_err,
207 delta_cck->plcp_err, max_cck->plcp_err);
208 pos += scnprintf(buf + pos, bufsz - pos,
209 " %-30s %10u %10u %10u %10u\n",
210 "crc32_err:",
211 le32_to_cpu(cck->crc32_err), accum_cck->crc32_err,
212 delta_cck->crc32_err, max_cck->crc32_err);
213 pos += scnprintf(buf + pos, bufsz - pos,
214 " %-30s %10u %10u %10u %10u\n",
215 "overrun_err:",
216 le32_to_cpu(cck->overrun_err),
217 accum_cck->overrun_err,
218 delta_cck->overrun_err, max_cck->overrun_err);
219 pos += scnprintf(buf + pos, bufsz - pos,
220 " %-30s %10u %10u %10u %10u\n",
221 "early_overrun_err:",
222 le32_to_cpu(cck->early_overrun_err),
223 accum_cck->early_overrun_err,
224 delta_cck->early_overrun_err,
225 max_cck->early_overrun_err);
226 pos += scnprintf(buf + pos, bufsz - pos,
227 " %-30s %10u %10u %10u %10u\n",
228 "crc32_good:",
229 le32_to_cpu(cck->crc32_good), accum_cck->crc32_good,
230 delta_cck->crc32_good,
231 max_cck->crc32_good);
232 pos += scnprintf(buf + pos, bufsz - pos,
233 " %-30s %10u %10u %10u %10u\n",
234 "false_alarm_cnt:",
235 le32_to_cpu(cck->false_alarm_cnt),
236 accum_cck->false_alarm_cnt,
237 delta_cck->false_alarm_cnt, max_cck->false_alarm_cnt);
238 pos += scnprintf(buf + pos, bufsz - pos,
239 " %-30s %10u %10u %10u %10u\n",
240 "fina_sync_err_cnt:",
241 le32_to_cpu(cck->fina_sync_err_cnt),
242 accum_cck->fina_sync_err_cnt,
243 delta_cck->fina_sync_err_cnt,
244 max_cck->fina_sync_err_cnt);
245 pos += scnprintf(buf + pos, bufsz - pos,
246 " %-30s %10u %10u %10u %10u\n",
247 "sfd_timeout:",
248 le32_to_cpu(cck->sfd_timeout),
249 accum_cck->sfd_timeout,
250 delta_cck->sfd_timeout, max_cck->sfd_timeout);
251 pos += scnprintf(buf + pos, bufsz - pos,
252 " %-30s %10u %10u %10u %10u\n",
253 "fina_timeout:",
254 le32_to_cpu(cck->fina_timeout),
255 accum_cck->fina_timeout,
256 delta_cck->fina_timeout, max_cck->fina_timeout);
257 pos += scnprintf(buf + pos, bufsz - pos,
258 " %-30s %10u %10u %10u %10u\n",
259 "unresponded_rts:",
260 le32_to_cpu(cck->unresponded_rts),
261 accum_cck->unresponded_rts,
262 delta_cck->unresponded_rts,
263 max_cck->unresponded_rts);
264 pos += scnprintf(buf + pos, bufsz - pos,
265 " %-30s %10u %10u %10u %10u\n",
266 "rxe_frame_lmt_ovrun:",
267 le32_to_cpu(cck->rxe_frame_limit_overrun),
268 accum_cck->rxe_frame_limit_overrun,
269 delta_cck->rxe_frame_limit_overrun,
270 max_cck->rxe_frame_limit_overrun);
271 pos += scnprintf(buf + pos, bufsz - pos,
272 " %-30s %10u %10u %10u %10u\n",
273 "sent_ack_cnt:",
274 le32_to_cpu(cck->sent_ack_cnt),
275 accum_cck->sent_ack_cnt,
276 delta_cck->sent_ack_cnt,
277 max_cck->sent_ack_cnt);
278 pos += scnprintf(buf + pos, bufsz - pos,
279 " %-30s %10u %10u %10u %10u\n",
280 "sent_cts_cnt:",
281 le32_to_cpu(cck->sent_cts_cnt),
282 accum_cck->sent_cts_cnt,
283 delta_cck->sent_cts_cnt,
284 max_cck->sent_cts_cnt);
285
286 pos += scnprintf(buf + pos, bufsz - pos, "%-32s current"
287 "acumulative delta max\n",
288 "Statistics_Rx - GENERAL:");
289 pos += scnprintf(buf + pos, bufsz - pos,
290 " %-30s %10u %10u %10u %10u\n",
291 "bogus_cts:",
292 le32_to_cpu(general->bogus_cts),
293 accum_general->bogus_cts,
294 delta_general->bogus_cts, max_general->bogus_cts);
295 pos += scnprintf(buf + pos, bufsz - pos,
296 " %-30s %10u %10u %10u %10u\n",
297 "bogus_ack:",
298 le32_to_cpu(general->bogus_ack),
299 accum_general->bogus_ack,
300 delta_general->bogus_ack, max_general->bogus_ack);
301 pos += scnprintf(buf + pos, bufsz - pos,
302 " %-30s %10u %10u %10u %10u\n",
303 "non_bssid_frames:",
304 le32_to_cpu(general->non_bssid_frames),
305 accum_general->non_bssid_frames,
306 delta_general->non_bssid_frames,
307 max_general->non_bssid_frames);
308 pos += scnprintf(buf + pos, bufsz - pos,
309 " %-30s %10u %10u %10u %10u\n",
310 "filtered_frames:",
311 le32_to_cpu(general->filtered_frames),
312 accum_general->filtered_frames,
313 delta_general->filtered_frames,
314 max_general->filtered_frames);
315 pos += scnprintf(buf + pos, bufsz - pos,
316 " %-30s %10u %10u %10u %10u\n",
317 "non_channel_beacons:",
318 le32_to_cpu(general->non_channel_beacons),
319 accum_general->non_channel_beacons,
320 delta_general->non_channel_beacons,
321 max_general->non_channel_beacons);
322
323 ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
324 kfree(buf);
325 return ret;
326}
327
328ssize_t iwl3945_ucode_tx_stats_read(struct file *file,
329 char __user *user_buf,
330 size_t count, loff_t *ppos)
331{
332 struct iwl_priv *priv = file->private_data;
333 int pos = 0;
334 char *buf;
335 int bufsz = (sizeof(struct iwl39_statistics_tx) * 48) + 250;
336 ssize_t ret;
337 struct iwl39_statistics_tx *tx, *accum_tx, *delta_tx, *max_tx;
338
339 if (!iwl_legacy_is_alive(priv))
340 return -EAGAIN;
341
342 buf = kzalloc(bufsz, GFP_KERNEL);
343 if (!buf) {
344 IWL_ERR(priv, "Can not allocate Buffer\n");
345 return -ENOMEM;
346 }
347
348 /*
349 * The statistic information display here is based on
350 * the last statistics notification from uCode
351 * might not reflect the current uCode activity
352 */
353 tx = &priv->_3945.statistics.tx;
354 accum_tx = &priv->_3945.accum_statistics.tx;
355 delta_tx = &priv->_3945.delta_statistics.tx;
356 max_tx = &priv->_3945.max_delta.tx;
357 pos += iwl3945_statistics_flag(priv, buf, bufsz);
358 pos += scnprintf(buf + pos, bufsz - pos, "%-32s current"
359 "acumulative delta max\n",
360 "Statistics_Tx:");
361 pos += scnprintf(buf + pos, bufsz - pos,
362 " %-30s %10u %10u %10u %10u\n",
363 "preamble:",
364 le32_to_cpu(tx->preamble_cnt),
365 accum_tx->preamble_cnt,
366 delta_tx->preamble_cnt, max_tx->preamble_cnt);
367 pos += scnprintf(buf + pos, bufsz - pos,
368 " %-30s %10u %10u %10u %10u\n",
369 "rx_detected_cnt:",
370 le32_to_cpu(tx->rx_detected_cnt),
371 accum_tx->rx_detected_cnt,
372 delta_tx->rx_detected_cnt, max_tx->rx_detected_cnt);
373 pos += scnprintf(buf + pos, bufsz - pos,
374 " %-30s %10u %10u %10u %10u\n",
375 "bt_prio_defer_cnt:",
376 le32_to_cpu(tx->bt_prio_defer_cnt),
377 accum_tx->bt_prio_defer_cnt,
378 delta_tx->bt_prio_defer_cnt,
379 max_tx->bt_prio_defer_cnt);
380 pos += scnprintf(buf + pos, bufsz - pos,
381 " %-30s %10u %10u %10u %10u\n",
382 "bt_prio_kill_cnt:",
383 le32_to_cpu(tx->bt_prio_kill_cnt),
384 accum_tx->bt_prio_kill_cnt,
385 delta_tx->bt_prio_kill_cnt,
386 max_tx->bt_prio_kill_cnt);
387 pos += scnprintf(buf + pos, bufsz - pos,
388 " %-30s %10u %10u %10u %10u\n",
389 "few_bytes_cnt:",
390 le32_to_cpu(tx->few_bytes_cnt),
391 accum_tx->few_bytes_cnt,
392 delta_tx->few_bytes_cnt, max_tx->few_bytes_cnt);
393 pos += scnprintf(buf + pos, bufsz - pos,
394 " %-30s %10u %10u %10u %10u\n",
395 "cts_timeout:",
396 le32_to_cpu(tx->cts_timeout), accum_tx->cts_timeout,
397 delta_tx->cts_timeout, max_tx->cts_timeout);
398 pos += scnprintf(buf + pos, bufsz - pos,
399 " %-30s %10u %10u %10u %10u\n",
400 "ack_timeout:",
401 le32_to_cpu(tx->ack_timeout),
402 accum_tx->ack_timeout,
403 delta_tx->ack_timeout, max_tx->ack_timeout);
404 pos += scnprintf(buf + pos, bufsz - pos,
405 " %-30s %10u %10u %10u %10u\n",
406 "expected_ack_cnt:",
407 le32_to_cpu(tx->expected_ack_cnt),
408 accum_tx->expected_ack_cnt,
409 delta_tx->expected_ack_cnt,
410 max_tx->expected_ack_cnt);
411 pos += scnprintf(buf + pos, bufsz - pos,
412 " %-30s %10u %10u %10u %10u\n",
413 "actual_ack_cnt:",
414 le32_to_cpu(tx->actual_ack_cnt),
415 accum_tx->actual_ack_cnt,
416 delta_tx->actual_ack_cnt,
417 max_tx->actual_ack_cnt);
418
419 ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
420 kfree(buf);
421 return ret;
422}
423
424ssize_t iwl3945_ucode_general_stats_read(struct file *file,
425 char __user *user_buf,
426 size_t count, loff_t *ppos)
427{
428 struct iwl_priv *priv = file->private_data;
429 int pos = 0;
430 char *buf;
431 int bufsz = sizeof(struct iwl39_statistics_general) * 10 + 300;
432 ssize_t ret;
433 struct iwl39_statistics_general *general, *accum_general;
434 struct iwl39_statistics_general *delta_general, *max_general;
435 struct statistics_dbg *dbg, *accum_dbg, *delta_dbg, *max_dbg;
436 struct iwl39_statistics_div *div, *accum_div, *delta_div, *max_div;
437
438 if (!iwl_legacy_is_alive(priv))
439 return -EAGAIN;
440
441 buf = kzalloc(bufsz, GFP_KERNEL);
442 if (!buf) {
443 IWL_ERR(priv, "Can not allocate Buffer\n");
444 return -ENOMEM;
445 }
446
447 /*
448 * The statistic information display here is based on
449 * the last statistics notification from uCode
450 * might not reflect the current uCode activity
451 */
452 general = &priv->_3945.statistics.general;
453 dbg = &priv->_3945.statistics.general.dbg;
454 div = &priv->_3945.statistics.general.div;
455 accum_general = &priv->_3945.accum_statistics.general;
456 delta_general = &priv->_3945.delta_statistics.general;
457 max_general = &priv->_3945.max_delta.general;
458 accum_dbg = &priv->_3945.accum_statistics.general.dbg;
459 delta_dbg = &priv->_3945.delta_statistics.general.dbg;
460 max_dbg = &priv->_3945.max_delta.general.dbg;
461 accum_div = &priv->_3945.accum_statistics.general.div;
462 delta_div = &priv->_3945.delta_statistics.general.div;
463 max_div = &priv->_3945.max_delta.general.div;
464 pos += iwl3945_statistics_flag(priv, buf, bufsz);
465 pos += scnprintf(buf + pos, bufsz - pos, "%-32s current"
466 "acumulative delta max\n",
467 "Statistics_General:");
468 pos += scnprintf(buf + pos, bufsz - pos,
469 " %-30s %10u %10u %10u %10u\n",
470 "burst_check:",
471 le32_to_cpu(dbg->burst_check),
472 accum_dbg->burst_check,
473 delta_dbg->burst_check, max_dbg->burst_check);
474 pos += scnprintf(buf + pos, bufsz - pos,
475 " %-30s %10u %10u %10u %10u\n",
476 "burst_count:",
477 le32_to_cpu(dbg->burst_count),
478 accum_dbg->burst_count,
479 delta_dbg->burst_count, max_dbg->burst_count);
480 pos += scnprintf(buf + pos, bufsz - pos,
481 " %-30s %10u %10u %10u %10u\n",
482 "sleep_time:",
483 le32_to_cpu(general->sleep_time),
484 accum_general->sleep_time,
485 delta_general->sleep_time, max_general->sleep_time);
486 pos += scnprintf(buf + pos, bufsz - pos,
487 " %-30s %10u %10u %10u %10u\n",
488 "slots_out:",
489 le32_to_cpu(general->slots_out),
490 accum_general->slots_out,
491 delta_general->slots_out, max_general->slots_out);
492 pos += scnprintf(buf + pos, bufsz - pos,
493 " %-30s %10u %10u %10u %10u\n",
494 "slots_idle:",
495 le32_to_cpu(general->slots_idle),
496 accum_general->slots_idle,
497 delta_general->slots_idle, max_general->slots_idle);
498 pos += scnprintf(buf + pos, bufsz - pos, "ttl_timestamp:\t\t\t%u\n",
499 le32_to_cpu(general->ttl_timestamp));
500 pos += scnprintf(buf + pos, bufsz - pos,
501 " %-30s %10u %10u %10u %10u\n",
502 "tx_on_a:",
503 le32_to_cpu(div->tx_on_a), accum_div->tx_on_a,
504 delta_div->tx_on_a, max_div->tx_on_a);
505 pos += scnprintf(buf + pos, bufsz - pos,
506 " %-30s %10u %10u %10u %10u\n",
507 "tx_on_b:",
508 le32_to_cpu(div->tx_on_b), accum_div->tx_on_b,
509 delta_div->tx_on_b, max_div->tx_on_b);
510 pos += scnprintf(buf + pos, bufsz - pos,
511 " %-30s %10u %10u %10u %10u\n",
512 "exec_time:",
513 le32_to_cpu(div->exec_time), accum_div->exec_time,
514 delta_div->exec_time, max_div->exec_time);
515 pos += scnprintf(buf + pos, bufsz - pos,
516 " %-30s %10u %10u %10u %10u\n",
517 "probe_time:",
518 le32_to_cpu(div->probe_time), accum_div->probe_time,
519 delta_div->probe_time, max_div->probe_time);
520 ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
521 kfree(buf);
522 return ret;
523}
diff --git a/drivers/net/wireless/iwlegacy/iwl-3945-debugfs.h b/drivers/net/wireless/iwlegacy/iwl-3945-debugfs.h
new file mode 100644
index 000000000000..8fef4b32b447
--- /dev/null
+++ b/drivers/net/wireless/iwlegacy/iwl-3945-debugfs.h
@@ -0,0 +1,60 @@
1/******************************************************************************
2 *
3 * GPL LICENSE SUMMARY
4 *
5 * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of version 2 of the GNU General Public License as
9 * published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful, but
12 * WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
19 * USA
20 *
21 * The full GNU General Public License is included in this distribution
22 * in the file called LICENSE.GPL.
23 *
24 * Contact Information:
25 * Intel Linux Wireless <ilw@linux.intel.com>
26 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
27 *****************************************************************************/
28
29#include "iwl-dev.h"
30#include "iwl-core.h"
31#include "iwl-debug.h"
32
33#ifdef CONFIG_IWLWIFI_LEGACY_DEBUGFS
34ssize_t iwl3945_ucode_rx_stats_read(struct file *file, char __user *user_buf,
35 size_t count, loff_t *ppos);
36ssize_t iwl3945_ucode_tx_stats_read(struct file *file, char __user *user_buf,
37 size_t count, loff_t *ppos);
38ssize_t iwl3945_ucode_general_stats_read(struct file *file,
39 char __user *user_buf, size_t count,
40 loff_t *ppos);
41#else
42static ssize_t iwl3945_ucode_rx_stats_read(struct file *file,
43 char __user *user_buf, size_t count,
44 loff_t *ppos)
45{
46 return 0;
47}
48static ssize_t iwl3945_ucode_tx_stats_read(struct file *file,
49 char __user *user_buf, size_t count,
50 loff_t *ppos)
51{
52 return 0;
53}
54static ssize_t iwl3945_ucode_general_stats_read(struct file *file,
55 char __user *user_buf,
56 size_t count, loff_t *ppos)
57{
58 return 0;
59}
60#endif
diff --git a/drivers/net/wireless/iwlegacy/iwl-3945-fh.h b/drivers/net/wireless/iwlegacy/iwl-3945-fh.h
new file mode 100644
index 000000000000..836c9919f82e
--- /dev/null
+++ b/drivers/net/wireless/iwlegacy/iwl-3945-fh.h
@@ -0,0 +1,187 @@
1/******************************************************************************
2 *
3 * This file is provided under a dual BSD/GPLv2 license. When using or
4 * redistributing this file, you may do so under either license.
5 *
6 * GPL LICENSE SUMMARY
7 *
8 * Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved.
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of version 2 of the GNU General Public License as
12 * published by the Free Software Foundation.
13 *
14 * This program is distributed in the hope that it will be useful, but
15 * WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
22 * USA
23 *
24 * The full GNU General Public License is included in this distribution
25 * in the file called LICENSE.GPL.
26 *
27 * Contact Information:
28 * Intel Linux Wireless <ilw@linux.intel.com>
29 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
30 *
31 * BSD LICENSE
32 *
33 * Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved.
34 * All rights reserved.
35 *
36 * Redistribution and use in source and binary forms, with or without
37 * modification, are permitted provided that the following conditions
38 * are met:
39 *
40 * * Redistributions of source code must retain the above copyright
41 * notice, this list of conditions and the following disclaimer.
42 * * Redistributions in binary form must reproduce the above copyright
43 * notice, this list of conditions and the following disclaimer in
44 * the documentation and/or other materials provided with the
45 * distribution.
46 * * Neither the name Intel Corporation nor the names of its
47 * contributors may be used to endorse or promote products derived
48 * from this software without specific prior written permission.
49 *
50 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
51 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
52 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
53 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
54 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
55 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
56 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
57 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
58 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
59 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
60 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
61 *
62 *****************************************************************************/
63#ifndef __iwl_3945_fh_h__
64#define __iwl_3945_fh_h__
65
66/************************************/
67/* iwl3945 Flow Handler Definitions */
68/************************************/
69
70/**
71 * This I/O area is directly read/writable by driver (e.g. Linux uses writel())
72 * Addresses are offsets from device's PCI hardware base address.
73 */
74#define FH39_MEM_LOWER_BOUND (0x0800)
75#define FH39_MEM_UPPER_BOUND (0x1000)
76
77#define FH39_CBCC_TABLE (FH39_MEM_LOWER_BOUND + 0x140)
78#define FH39_TFDB_TABLE (FH39_MEM_LOWER_BOUND + 0x180)
79#define FH39_RCSR_TABLE (FH39_MEM_LOWER_BOUND + 0x400)
80#define FH39_RSSR_TABLE (FH39_MEM_LOWER_BOUND + 0x4c0)
81#define FH39_TCSR_TABLE (FH39_MEM_LOWER_BOUND + 0x500)
82#define FH39_TSSR_TABLE (FH39_MEM_LOWER_BOUND + 0x680)
83
84/* TFDB (Transmit Frame Buffer Descriptor) */
85#define FH39_TFDB(_ch, buf) (FH39_TFDB_TABLE + \
86 ((_ch) * 2 + (buf)) * 0x28)
87#define FH39_TFDB_CHNL_BUF_CTRL_REG(_ch) (FH39_TFDB_TABLE + 0x50 * (_ch))
88
89/* CBCC channel is [0,2] */
90#define FH39_CBCC(_ch) (FH39_CBCC_TABLE + (_ch) * 0x8)
91#define FH39_CBCC_CTRL(_ch) (FH39_CBCC(_ch) + 0x00)
92#define FH39_CBCC_BASE(_ch) (FH39_CBCC(_ch) + 0x04)
93
94/* RCSR channel is [0,2] */
95#define FH39_RCSR(_ch) (FH39_RCSR_TABLE + (_ch) * 0x40)
96#define FH39_RCSR_CONFIG(_ch) (FH39_RCSR(_ch) + 0x00)
97#define FH39_RCSR_RBD_BASE(_ch) (FH39_RCSR(_ch) + 0x04)
98#define FH39_RCSR_WPTR(_ch) (FH39_RCSR(_ch) + 0x20)
99#define FH39_RCSR_RPTR_ADDR(_ch) (FH39_RCSR(_ch) + 0x24)
100
101#define FH39_RSCSR_CHNL0_WPTR (FH39_RCSR_WPTR(0))
102
103/* RSSR */
104#define FH39_RSSR_CTRL (FH39_RSSR_TABLE + 0x000)
105#define FH39_RSSR_STATUS (FH39_RSSR_TABLE + 0x004)
106
107/* TCSR */
108#define FH39_TCSR(_ch) (FH39_TCSR_TABLE + (_ch) * 0x20)
109#define FH39_TCSR_CONFIG(_ch) (FH39_TCSR(_ch) + 0x00)
110#define FH39_TCSR_CREDIT(_ch) (FH39_TCSR(_ch) + 0x04)
111#define FH39_TCSR_BUFF_STTS(_ch) (FH39_TCSR(_ch) + 0x08)
112
113/* TSSR */
114#define FH39_TSSR_CBB_BASE (FH39_TSSR_TABLE + 0x000)
115#define FH39_TSSR_MSG_CONFIG (FH39_TSSR_TABLE + 0x008)
116#define FH39_TSSR_TX_STATUS (FH39_TSSR_TABLE + 0x010)
117
118
119/* DBM */
120
121#define FH39_SRVC_CHNL (6)
122
123#define FH39_RCSR_RX_CONFIG_REG_POS_RBDC_SIZE (20)
124#define FH39_RCSR_RX_CONFIG_REG_POS_IRQ_RBTH (4)
125
126#define FH39_RCSR_RX_CONFIG_REG_BIT_WR_STTS_EN (0x08000000)
127
128#define FH39_RCSR_RX_CONFIG_REG_VAL_DMA_CHNL_EN_ENABLE (0x80000000)
129
130#define FH39_RCSR_RX_CONFIG_REG_VAL_RDRBD_EN_ENABLE (0x20000000)
131
132#define FH39_RCSR_RX_CONFIG_REG_VAL_MAX_FRAG_SIZE_128 (0x01000000)
133
134#define FH39_RCSR_RX_CONFIG_REG_VAL_IRQ_DEST_INT_HOST (0x00001000)
135
136#define FH39_RCSR_RX_CONFIG_REG_VAL_MSG_MODE_FH (0x00000000)
137
138#define FH39_TCSR_TX_CONFIG_REG_VAL_MSG_MODE_TXF (0x00000000)
139#define FH39_TCSR_TX_CONFIG_REG_VAL_MSG_MODE_DRIVER (0x00000001)
140
141#define FH39_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_DISABLE_VAL (0x00000000)
142#define FH39_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE_VAL (0x00000008)
143
144#define FH39_TCSR_TX_CONFIG_REG_VAL_CIRQ_HOST_IFTFD (0x00200000)
145
146#define FH39_TCSR_TX_CONFIG_REG_VAL_CIRQ_RTC_NOINT (0x00000000)
147
148#define FH39_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_PAUSE (0x00000000)
149#define FH39_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE (0x80000000)
150
151#define FH39_TCSR_CHNL_TX_BUF_STS_REG_VAL_TFDB_VALID (0x00004000)
152
153#define FH39_TCSR_CHNL_TX_BUF_STS_REG_BIT_TFDB_WPTR (0x00000001)
154
155#define FH39_TSSR_TX_MSG_CONFIG_REG_VAL_SNOOP_RD_TXPD_ON (0xFF000000)
156#define FH39_TSSR_TX_MSG_CONFIG_REG_VAL_ORDER_RD_TXPD_ON (0x00FF0000)
157
158#define FH39_TSSR_TX_MSG_CONFIG_REG_VAL_MAX_FRAG_SIZE_128B (0x00000400)
159
160#define FH39_TSSR_TX_MSG_CONFIG_REG_VAL_SNOOP_RD_TFD_ON (0x00000100)
161#define FH39_TSSR_TX_MSG_CONFIG_REG_VAL_ORDER_RD_CBB_ON (0x00000080)
162
163#define FH39_TSSR_TX_MSG_CONFIG_REG_VAL_ORDER_RSP_WAIT_TH (0x00000020)
164#define FH39_TSSR_TX_MSG_CONFIG_REG_VAL_RSP_WAIT_TH (0x00000005)
165
166#define FH39_TSSR_TX_STATUS_REG_BIT_BUFS_EMPTY(_ch) (BIT(_ch) << 24)
167#define FH39_TSSR_TX_STATUS_REG_BIT_NO_PEND_REQ(_ch) (BIT(_ch) << 16)
168
169#define FH39_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(_ch) \
170 (FH39_TSSR_TX_STATUS_REG_BIT_BUFS_EMPTY(_ch) | \
171 FH39_TSSR_TX_STATUS_REG_BIT_NO_PEND_REQ(_ch))
172
173#define FH39_RSSR_CHNL0_RX_STATUS_CHNL_IDLE (0x01000000)
174
175struct iwl3945_tfd_tb {
176 __le32 addr;
177 __le32 len;
178} __packed;
179
180struct iwl3945_tfd {
181 __le32 control_flags;
182 struct iwl3945_tfd_tb tbs[4];
183 u8 __pad[28];
184} __packed;
185
186
187#endif /* __iwl_3945_fh_h__ */
diff --git a/drivers/net/wireless/iwlegacy/iwl-3945-hw.h b/drivers/net/wireless/iwlegacy/iwl-3945-hw.h
new file mode 100644
index 000000000000..779d3cb86e2c
--- /dev/null
+++ b/drivers/net/wireless/iwlegacy/iwl-3945-hw.h
@@ -0,0 +1,293 @@
1/******************************************************************************
2 *
3 * This file is provided under a dual BSD/GPLv2 license. When using or
4 * redistributing this file, you may do so under either license.
5 *
6 * GPL LICENSE SUMMARY
7 *
8 * Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved.
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of version 2 of the GNU General Public License as
12 * published by the Free Software Foundation.
13 *
14 * This program is distributed in the hope that it will be useful, but
15 * WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
22 * USA
23 *
24 * The full GNU General Public License is included in this distribution
25 * in the file called LICENSE.GPL.
26 *
27 * Contact Information:
28 * Intel Linux Wireless <ilw@linux.intel.com>
29 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
30 *
31 * BSD LICENSE
32 *
33 * Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved.
34 * All rights reserved.
35 *
36 * Redistribution and use in source and binary forms, with or without
37 * modification, are permitted provided that the following conditions
38 * are met:
39 *
40 * * Redistributions of source code must retain the above copyright
41 * notice, this list of conditions and the following disclaimer.
42 * * Redistributions in binary form must reproduce the above copyright
43 * notice, this list of conditions and the following disclaimer in
44 * the documentation and/or other materials provided with the
45 * distribution.
46 * * Neither the name Intel Corporation nor the names of its
47 * contributors may be used to endorse or promote products derived
48 * from this software without specific prior written permission.
49 *
50 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
51 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
52 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
53 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
54 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
55 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
56 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
57 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
58 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
59 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
60 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
61 *
62 *****************************************************************************/
63/*
64 * Please use this file (iwl-3945-hw.h) only for hardware-related definitions.
65 * Please use iwl-commands.h for uCode API definitions.
66 * Please use iwl-3945.h for driver implementation definitions.
67 */
68
69#ifndef __iwl_3945_hw__
70#define __iwl_3945_hw__
71
72#include "iwl-eeprom.h"
73
74/* RSSI to dBm */
75#define IWL39_RSSI_OFFSET 95
76
77#define IWL_DEFAULT_TX_POWER 0x0F
78
79/*
80 * EEPROM related constants, enums, and structures.
81 */
82#define EEPROM_SKU_CAP_OP_MODE_MRC (1 << 7)
83
84/*
85 * Mapping of a Tx power level, at factory calibration temperature,
86 * to a radio/DSP gain table index.
87 * One for each of 5 "sample" power levels in each band.
88 * v_det is measured at the factory, using the 3945's built-in power amplifier
89 * (PA) output voltage detector. This same detector is used during Tx of
90 * long packets in normal operation to provide feedback as to proper output
91 * level.
92 * Data copied from EEPROM.
93 * DO NOT ALTER THIS STRUCTURE!!!
94 */
95struct iwl3945_eeprom_txpower_sample {
96 u8 gain_index; /* index into power (gain) setup table ... */
97 s8 power; /* ... for this pwr level for this chnl group */
98 u16 v_det; /* PA output voltage */
99} __packed;
100
101/*
102 * Mappings of Tx power levels -> nominal radio/DSP gain table indexes.
103 * One for each channel group (a.k.a. "band") (1 for BG, 4 for A).
104 * Tx power setup code interpolates between the 5 "sample" power levels
105 * to determine the nominal setup for a requested power level.
106 * Data copied from EEPROM.
107 * DO NOT ALTER THIS STRUCTURE!!!
108 */
109struct iwl3945_eeprom_txpower_group {
110 struct iwl3945_eeprom_txpower_sample samples[5]; /* 5 power levels */
111 s32 a, b, c, d, e; /* coefficients for voltage->power
112 * formula (signed) */
113 s32 Fa, Fb, Fc, Fd, Fe; /* these modify coeffs based on
114 * frequency (signed) */
115 s8 saturation_power; /* highest power possible by h/w in this
116 * band */
117 u8 group_channel; /* "representative" channel # in this band */
118 s16 temperature; /* h/w temperature at factory calib this band
119 * (signed) */
120} __packed;
121
122/*
123 * Temperature-based Tx-power compensation data, not band-specific.
124 * These coefficients are use to modify a/b/c/d/e coeffs based on
125 * difference between current temperature and factory calib temperature.
126 * Data copied from EEPROM.
127 */
128struct iwl3945_eeprom_temperature_corr {
129 u32 Ta;
130 u32 Tb;
131 u32 Tc;
132 u32 Td;
133 u32 Te;
134} __packed;
135
136/*
137 * EEPROM map
138 */
139struct iwl3945_eeprom {
140 u8 reserved0[16];
141 u16 device_id; /* abs.ofs: 16 */
142 u8 reserved1[2];
143 u16 pmc; /* abs.ofs: 20 */
144 u8 reserved2[20];
145 u8 mac_address[6]; /* abs.ofs: 42 */
146 u8 reserved3[58];
147 u16 board_revision; /* abs.ofs: 106 */
148 u8 reserved4[11];
149 u8 board_pba_number[9]; /* abs.ofs: 119 */
150 u8 reserved5[8];
151 u16 version; /* abs.ofs: 136 */
152 u8 sku_cap; /* abs.ofs: 138 */
153 u8 leds_mode; /* abs.ofs: 139 */
154 u16 oem_mode;
155 u16 wowlan_mode; /* abs.ofs: 142 */
156 u16 leds_time_interval; /* abs.ofs: 144 */
157 u8 leds_off_time; /* abs.ofs: 146 */
158 u8 leds_on_time; /* abs.ofs: 147 */
159 u8 almgor_m_version; /* abs.ofs: 148 */
160 u8 antenna_switch_type; /* abs.ofs: 149 */
161 u8 reserved6[42];
162 u8 sku_id[4]; /* abs.ofs: 192 */
163
164/*
165 * Per-channel regulatory data.
166 *
167 * Each channel that *might* be supported by 3945 has a fixed location
168 * in EEPROM containing EEPROM_CHANNEL_* usage flags (LSB) and max regulatory
169 * txpower (MSB).
170 *
171 * Entries immediately below are for 20 MHz channel width.
172 *
173 * 2.4 GHz channels 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14
174 */
175 u16 band_1_count; /* abs.ofs: 196 */
176 struct iwl_eeprom_channel band_1_channels[14]; /* abs.ofs: 198 */
177
178/*
179 * 4.9 GHz channels 183, 184, 185, 187, 188, 189, 192, 196,
180 * 5.0 GHz channels 7, 8, 11, 12, 16
181 * (4915-5080MHz) (none of these is ever supported)
182 */
183 u16 band_2_count; /* abs.ofs: 226 */
184 struct iwl_eeprom_channel band_2_channels[13]; /* abs.ofs: 228 */
185
186/*
187 * 5.2 GHz channels 34, 36, 38, 40, 42, 44, 46, 48, 52, 56, 60, 64
188 * (5170-5320MHz)
189 */
190 u16 band_3_count; /* abs.ofs: 254 */
191 struct iwl_eeprom_channel band_3_channels[12]; /* abs.ofs: 256 */
192
193/*
194 * 5.5 GHz channels 100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140
195 * (5500-5700MHz)
196 */
197 u16 band_4_count; /* abs.ofs: 280 */
198 struct iwl_eeprom_channel band_4_channels[11]; /* abs.ofs: 282 */
199
200/*
201 * 5.7 GHz channels 145, 149, 153, 157, 161, 165
202 * (5725-5825MHz)
203 */
204 u16 band_5_count; /* abs.ofs: 304 */
205 struct iwl_eeprom_channel band_5_channels[6]; /* abs.ofs: 306 */
206
207 u8 reserved9[194];
208
209/*
210 * 3945 Txpower calibration data.
211 */
212#define IWL_NUM_TX_CALIB_GROUPS 5
213 struct iwl3945_eeprom_txpower_group groups[IWL_NUM_TX_CALIB_GROUPS];
214/* abs.ofs: 512 */
215 struct iwl3945_eeprom_temperature_corr corrections; /* abs.ofs: 832 */
216 u8 reserved16[172]; /* fill out to full 1024 byte block */
217} __packed;
218
219#define IWL3945_EEPROM_IMG_SIZE 1024
220
221/* End of EEPROM */
222
223#define PCI_CFG_REV_ID_BIT_BASIC_SKU (0x40) /* bit 6 */
224#define PCI_CFG_REV_ID_BIT_RTP (0x80) /* bit 7 */
225
226/* 4 DATA + 1 CMD. There are 2 HCCA queues that are not used. */
227#define IWL39_NUM_QUEUES 5
228#define IWL39_CMD_QUEUE_NUM 4
229
230#define IWL_DEFAULT_TX_RETRY 15
231
232/*********************************************/
233
234#define RFD_SIZE 4
235#define NUM_TFD_CHUNKS 4
236
237#define RX_QUEUE_SIZE 256
238#define RX_QUEUE_MASK 255
239#define RX_QUEUE_SIZE_LOG 8
240
241#define U32_PAD(n) ((4-(n))&0x3)
242
243#define TFD_CTL_COUNT_SET(n) (n << 24)
244#define TFD_CTL_COUNT_GET(ctl) ((ctl >> 24) & 7)
245#define TFD_CTL_PAD_SET(n) (n << 28)
246#define TFD_CTL_PAD_GET(ctl) (ctl >> 28)
247
248/* Sizes and addresses for instruction and data memory (SRAM) in
249 * 3945's embedded processor. Driver access is via HBUS_TARG_MEM_* regs. */
250#define IWL39_RTC_INST_LOWER_BOUND (0x000000)
251#define IWL39_RTC_INST_UPPER_BOUND (0x014000)
252
253#define IWL39_RTC_DATA_LOWER_BOUND (0x800000)
254#define IWL39_RTC_DATA_UPPER_BOUND (0x808000)
255
256#define IWL39_RTC_INST_SIZE (IWL39_RTC_INST_UPPER_BOUND - \
257 IWL39_RTC_INST_LOWER_BOUND)
258#define IWL39_RTC_DATA_SIZE (IWL39_RTC_DATA_UPPER_BOUND - \
259 IWL39_RTC_DATA_LOWER_BOUND)
260
261#define IWL39_MAX_INST_SIZE IWL39_RTC_INST_SIZE
262#define IWL39_MAX_DATA_SIZE IWL39_RTC_DATA_SIZE
263
264/* Size of uCode instruction memory in bootstrap state machine */
265#define IWL39_MAX_BSM_SIZE IWL39_RTC_INST_SIZE
266
267static inline int iwl3945_hw_valid_rtc_data_addr(u32 addr)
268{
269 return (addr >= IWL39_RTC_DATA_LOWER_BOUND) &&
270 (addr < IWL39_RTC_DATA_UPPER_BOUND);
271}
272
273/* Base physical address of iwl3945_shared is provided to FH_TSSR_CBB_BASE
274 * and &iwl3945_shared.rx_read_ptr[0] is provided to FH_RCSR_RPTR_ADDR(0) */
275struct iwl3945_shared {
276 __le32 tx_base_ptr[8];
277} __packed;
278
279static inline u8 iwl3945_hw_get_rate(__le16 rate_n_flags)
280{
281 return le16_to_cpu(rate_n_flags) & 0xFF;
282}
283
284static inline u16 iwl3945_hw_get_rate_n_flags(__le16 rate_n_flags)
285{
286 return le16_to_cpu(rate_n_flags);
287}
288
289static inline __le16 iwl3945_hw_set_rate_n_flags(u8 rate, u16 flags)
290{
291 return cpu_to_le16((u16)rate|flags);
292}
293#endif
diff --git a/drivers/net/wireless/iwlegacy/iwl-3945-led.c b/drivers/net/wireless/iwlegacy/iwl-3945-led.c
new file mode 100644
index 000000000000..abd923558d48
--- /dev/null
+++ b/drivers/net/wireless/iwlegacy/iwl-3945-led.c
@@ -0,0 +1,64 @@
1/******************************************************************************
2 *
3 * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of version 2 of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
17 *
18 * The full GNU General Public License is included in this distribution in the
19 * file called LICENSE.
20 *
21 * Contact Information:
22 * Intel Linux Wireless <ilw@linux.intel.com>
23 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
24 *
25 *****************************************************************************/
26
27#include <linux/kernel.h>
28#include <linux/module.h>
29#include <linux/init.h>
30#include <linux/pci.h>
31#include <linux/dma-mapping.h>
32#include <linux/delay.h>
33#include <linux/skbuff.h>
34#include <linux/netdevice.h>
35#include <linux/wireless.h>
36#include <net/mac80211.h>
37#include <linux/etherdevice.h>
38#include <asm/unaligned.h>
39
40#include "iwl-commands.h"
41#include "iwl-3945.h"
42#include "iwl-core.h"
43#include "iwl-dev.h"
44#include "iwl-3945-led.h"
45
46
47/* Send led command */
48static int iwl3945_send_led_cmd(struct iwl_priv *priv,
49 struct iwl_led_cmd *led_cmd)
50{
51 struct iwl_host_cmd cmd = {
52 .id = REPLY_LEDS_CMD,
53 .len = sizeof(struct iwl_led_cmd),
54 .data = led_cmd,
55 .flags = CMD_ASYNC,
56 .callback = NULL,
57 };
58
59 return iwl_legacy_send_cmd(priv, &cmd);
60}
61
62const struct iwl_led_ops iwl3945_led_ops = {
63 .cmd = iwl3945_send_led_cmd,
64};
diff --git a/drivers/net/wireless/iwlegacy/iwl-3945-led.h b/drivers/net/wireless/iwlegacy/iwl-3945-led.h
new file mode 100644
index 000000000000..96716276eb0d
--- /dev/null
+++ b/drivers/net/wireless/iwlegacy/iwl-3945-led.h
@@ -0,0 +1,32 @@
1/******************************************************************************
2 *
3 * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of version 2 of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
17 *
18 * The full GNU General Public License is included in this distribution in the
19 * file called LICENSE.
20 *
21 * Contact Information:
22 * Intel Linux Wireless <ilw@linux.intel.com>
23 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
24 *
25 *****************************************************************************/
26
27#ifndef __iwl_3945_led_h__
28#define __iwl_3945_led_h__
29
30extern const struct iwl_led_ops iwl3945_led_ops;
31
32#endif /* __iwl_3945_led_h__ */
diff --git a/drivers/net/wireless/iwlegacy/iwl-3945-rs.c b/drivers/net/wireless/iwlegacy/iwl-3945-rs.c
new file mode 100644
index 000000000000..977bd2477c6a
--- /dev/null
+++ b/drivers/net/wireless/iwlegacy/iwl-3945-rs.c
@@ -0,0 +1,994 @@
1/******************************************************************************
2 *
3 * Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of version 2 of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
17 *
18 * The full GNU General Public License is included in this distribution in the
19 * file called LICENSE.
20 *
21 * Contact Information:
22 * Intel Linux Wireless <ilw@linux.intel.com>
23 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
24 *
25 *****************************************************************************/
26
27#include <linux/kernel.h>
28#include <linux/init.h>
29#include <linux/skbuff.h>
30#include <linux/slab.h>
31#include <linux/wireless.h>
32#include <net/mac80211.h>
33
34#include <linux/netdevice.h>
35#include <linux/etherdevice.h>
36#include <linux/delay.h>
37
38#include <linux/workqueue.h>
39
40#include "iwl-commands.h"
41#include "iwl-3945.h"
42#include "iwl-sta.h"
43
44#define RS_NAME "iwl-3945-rs"
45
46static s32 iwl3945_expected_tpt_g[IWL_RATE_COUNT_3945] = {
47 7, 13, 35, 58, 0, 0, 76, 104, 130, 168, 191, 202
48};
49
50static s32 iwl3945_expected_tpt_g_prot[IWL_RATE_COUNT_3945] = {
51 7, 13, 35, 58, 0, 0, 0, 80, 93, 113, 123, 125
52};
53
54static s32 iwl3945_expected_tpt_a[IWL_RATE_COUNT_3945] = {
55 0, 0, 0, 0, 40, 57, 72, 98, 121, 154, 177, 186
56};
57
58static s32 iwl3945_expected_tpt_b[IWL_RATE_COUNT_3945] = {
59 7, 13, 35, 58, 0, 0, 0, 0, 0, 0, 0, 0
60};
61
62struct iwl3945_tpt_entry {
63 s8 min_rssi;
64 u8 index;
65};
66
67static struct iwl3945_tpt_entry iwl3945_tpt_table_a[] = {
68 {-60, IWL_RATE_54M_INDEX},
69 {-64, IWL_RATE_48M_INDEX},
70 {-72, IWL_RATE_36M_INDEX},
71 {-80, IWL_RATE_24M_INDEX},
72 {-84, IWL_RATE_18M_INDEX},
73 {-85, IWL_RATE_12M_INDEX},
74 {-87, IWL_RATE_9M_INDEX},
75 {-89, IWL_RATE_6M_INDEX}
76};
77
78static struct iwl3945_tpt_entry iwl3945_tpt_table_g[] = {
79 {-60, IWL_RATE_54M_INDEX},
80 {-64, IWL_RATE_48M_INDEX},
81 {-68, IWL_RATE_36M_INDEX},
82 {-80, IWL_RATE_24M_INDEX},
83 {-84, IWL_RATE_18M_INDEX},
84 {-85, IWL_RATE_12M_INDEX},
85 {-86, IWL_RATE_11M_INDEX},
86 {-88, IWL_RATE_5M_INDEX},
87 {-90, IWL_RATE_2M_INDEX},
88 {-92, IWL_RATE_1M_INDEX}
89};
90
91#define IWL_RATE_MAX_WINDOW 62
92#define IWL_RATE_FLUSH (3*HZ)
93#define IWL_RATE_WIN_FLUSH (HZ/2)
94#define IWL39_RATE_HIGH_TH 11520
95#define IWL_SUCCESS_UP_TH 8960
96#define IWL_SUCCESS_DOWN_TH 10880
97#define IWL_RATE_MIN_FAILURE_TH 6
98#define IWL_RATE_MIN_SUCCESS_TH 8
99#define IWL_RATE_DECREASE_TH 1920
100#define IWL_RATE_RETRY_TH 15
101
102static u8 iwl3945_get_rate_index_by_rssi(s32 rssi, enum ieee80211_band band)
103{
104 u32 index = 0;
105 u32 table_size = 0;
106 struct iwl3945_tpt_entry *tpt_table = NULL;
107
108 if ((rssi < IWL_MIN_RSSI_VAL) || (rssi > IWL_MAX_RSSI_VAL))
109 rssi = IWL_MIN_RSSI_VAL;
110
111 switch (band) {
112 case IEEE80211_BAND_2GHZ:
113 tpt_table = iwl3945_tpt_table_g;
114 table_size = ARRAY_SIZE(iwl3945_tpt_table_g);
115 break;
116
117 case IEEE80211_BAND_5GHZ:
118 tpt_table = iwl3945_tpt_table_a;
119 table_size = ARRAY_SIZE(iwl3945_tpt_table_a);
120 break;
121
122 default:
123 BUG();
124 break;
125 }
126
127 while ((index < table_size) && (rssi < tpt_table[index].min_rssi))
128 index++;
129
130 index = min(index, (table_size - 1));
131
132 return tpt_table[index].index;
133}
134
135static void iwl3945_clear_window(struct iwl3945_rate_scale_data *window)
136{
137 window->data = 0;
138 window->success_counter = 0;
139 window->success_ratio = -1;
140 window->counter = 0;
141 window->average_tpt = IWL_INVALID_VALUE;
142 window->stamp = 0;
143}
144
145/**
146 * iwl3945_rate_scale_flush_windows - flush out the rate scale windows
147 *
148 * Returns the number of windows that have gathered data but were
149 * not flushed. If there were any that were not flushed, then
150 * reschedule the rate flushing routine.
151 */
152static int iwl3945_rate_scale_flush_windows(struct iwl3945_rs_sta *rs_sta)
153{
154 int unflushed = 0;
155 int i;
156 unsigned long flags;
157 struct iwl_priv *priv __maybe_unused = rs_sta->priv;
158
159 /*
160 * For each rate, if we have collected data on that rate
161 * and it has been more than IWL_RATE_WIN_FLUSH
162 * since we flushed, clear out the gathered statistics
163 */
164 for (i = 0; i < IWL_RATE_COUNT_3945; i++) {
165 if (!rs_sta->win[i].counter)
166 continue;
167
168 spin_lock_irqsave(&rs_sta->lock, flags);
169 if (time_after(jiffies, rs_sta->win[i].stamp +
170 IWL_RATE_WIN_FLUSH)) {
171 IWL_DEBUG_RATE(priv, "flushing %d samples of rate "
172 "index %d\n",
173 rs_sta->win[i].counter, i);
174 iwl3945_clear_window(&rs_sta->win[i]);
175 } else
176 unflushed++;
177 spin_unlock_irqrestore(&rs_sta->lock, flags);
178 }
179
180 return unflushed;
181}
182
183#define IWL_RATE_FLUSH_MAX 5000 /* msec */
184#define IWL_RATE_FLUSH_MIN 50 /* msec */
185#define IWL_AVERAGE_PACKETS 1500
186
187static void iwl3945_bg_rate_scale_flush(unsigned long data)
188{
189 struct iwl3945_rs_sta *rs_sta = (void *)data;
190 struct iwl_priv *priv __maybe_unused = rs_sta->priv;
191 int unflushed = 0;
192 unsigned long flags;
193 u32 packet_count, duration, pps;
194
195 IWL_DEBUG_RATE(priv, "enter\n");
196
197 unflushed = iwl3945_rate_scale_flush_windows(rs_sta);
198
199 spin_lock_irqsave(&rs_sta->lock, flags);
200
201 /* Number of packets Rx'd since last time this timer ran */
202 packet_count = (rs_sta->tx_packets - rs_sta->last_tx_packets) + 1;
203
204 rs_sta->last_tx_packets = rs_sta->tx_packets + 1;
205
206 if (unflushed) {
207 duration =
208 jiffies_to_msecs(jiffies - rs_sta->last_partial_flush);
209
210 IWL_DEBUG_RATE(priv, "Tx'd %d packets in %dms\n",
211 packet_count, duration);
212
213 /* Determine packets per second */
214 if (duration)
215 pps = (packet_count * 1000) / duration;
216 else
217 pps = 0;
218
219 if (pps) {
220 duration = (IWL_AVERAGE_PACKETS * 1000) / pps;
221 if (duration < IWL_RATE_FLUSH_MIN)
222 duration = IWL_RATE_FLUSH_MIN;
223 else if (duration > IWL_RATE_FLUSH_MAX)
224 duration = IWL_RATE_FLUSH_MAX;
225 } else
226 duration = IWL_RATE_FLUSH_MAX;
227
228 rs_sta->flush_time = msecs_to_jiffies(duration);
229
230 IWL_DEBUG_RATE(priv, "new flush period: %d msec ave %d\n",
231 duration, packet_count);
232
233 mod_timer(&rs_sta->rate_scale_flush, jiffies +
234 rs_sta->flush_time);
235
236 rs_sta->last_partial_flush = jiffies;
237 } else {
238 rs_sta->flush_time = IWL_RATE_FLUSH;
239 rs_sta->flush_pending = 0;
240 }
241 /* If there weren't any unflushed entries, we don't schedule the timer
242 * to run again */
243
244 rs_sta->last_flush = jiffies;
245
246 spin_unlock_irqrestore(&rs_sta->lock, flags);
247
248 IWL_DEBUG_RATE(priv, "leave\n");
249}
250
251/**
252 * iwl3945_collect_tx_data - Update the success/failure sliding window
253 *
254 * We keep a sliding window of the last 64 packets transmitted
255 * at this rate. window->data contains the bitmask of successful
256 * packets.
257 */
258static void iwl3945_collect_tx_data(struct iwl3945_rs_sta *rs_sta,
259 struct iwl3945_rate_scale_data *window,
260 int success, int retries, int index)
261{
262 unsigned long flags;
263 s32 fail_count;
264 struct iwl_priv *priv __maybe_unused = rs_sta->priv;
265
266 if (!retries) {
267 IWL_DEBUG_RATE(priv, "leave: retries == 0 -- should be at least 1\n");
268 return;
269 }
270
271 spin_lock_irqsave(&rs_sta->lock, flags);
272
273 /*
274 * Keep track of only the latest 62 tx frame attempts in this rate's
275 * history window; anything older isn't really relevant any more.
276 * If we have filled up the sliding window, drop the oldest attempt;
277 * if the oldest attempt (highest bit in bitmap) shows "success",
278 * subtract "1" from the success counter (this is the main reason
279 * we keep these bitmaps!).
280 * */
281 while (retries > 0) {
282 if (window->counter >= IWL_RATE_MAX_WINDOW) {
283
284 /* remove earliest */
285 window->counter = IWL_RATE_MAX_WINDOW - 1;
286
287 if (window->data & (1ULL << (IWL_RATE_MAX_WINDOW - 1))) {
288 window->data &= ~(1ULL << (IWL_RATE_MAX_WINDOW - 1));
289 window->success_counter--;
290 }
291 }
292
293 /* Increment frames-attempted counter */
294 window->counter++;
295
296 /* Shift bitmap by one frame (throw away oldest history),
297 * OR in "1", and increment "success" if this
298 * frame was successful. */
299 window->data <<= 1;
300 if (success > 0) {
301 window->success_counter++;
302 window->data |= 0x1;
303 success--;
304 }
305
306 retries--;
307 }
308
309 /* Calculate current success ratio, avoid divide-by-0! */
310 if (window->counter > 0)
311 window->success_ratio = 128 * (100 * window->success_counter)
312 / window->counter;
313 else
314 window->success_ratio = IWL_INVALID_VALUE;
315
316 fail_count = window->counter - window->success_counter;
317
318 /* Calculate average throughput, if we have enough history. */
319 if ((fail_count >= IWL_RATE_MIN_FAILURE_TH) ||
320 (window->success_counter >= IWL_RATE_MIN_SUCCESS_TH))
321 window->average_tpt = ((window->success_ratio *
322 rs_sta->expected_tpt[index] + 64) / 128);
323 else
324 window->average_tpt = IWL_INVALID_VALUE;
325
326 /* Tag this window as having been updated */
327 window->stamp = jiffies;
328
329 spin_unlock_irqrestore(&rs_sta->lock, flags);
330
331}
332
333/*
334 * Called after adding a new station to initialize rate scaling
335 */
336void iwl3945_rs_rate_init(struct iwl_priv *priv, struct ieee80211_sta *sta, u8 sta_id)
337{
338 struct ieee80211_hw *hw = priv->hw;
339 struct ieee80211_conf *conf = &priv->hw->conf;
340 struct iwl3945_sta_priv *psta;
341 struct iwl3945_rs_sta *rs_sta;
342 struct ieee80211_supported_band *sband;
343 int i;
344
345 IWL_DEBUG_INFO(priv, "enter\n");
346 if (sta_id == priv->contexts[IWL_RXON_CTX_BSS].bcast_sta_id)
347 goto out;
348
349 psta = (struct iwl3945_sta_priv *) sta->drv_priv;
350 rs_sta = &psta->rs_sta;
351 sband = hw->wiphy->bands[conf->channel->band];
352
353 rs_sta->priv = priv;
354
355 rs_sta->start_rate = IWL_RATE_INVALID;
356
357 /* default to just 802.11b */
358 rs_sta->expected_tpt = iwl3945_expected_tpt_b;
359
360 rs_sta->last_partial_flush = jiffies;
361 rs_sta->last_flush = jiffies;
362 rs_sta->flush_time = IWL_RATE_FLUSH;
363 rs_sta->last_tx_packets = 0;
364
365 rs_sta->rate_scale_flush.data = (unsigned long)rs_sta;
366 rs_sta->rate_scale_flush.function = iwl3945_bg_rate_scale_flush;
367
368 for (i = 0; i < IWL_RATE_COUNT_3945; i++)
369 iwl3945_clear_window(&rs_sta->win[i]);
370
371 /* TODO: what is a good starting rate for STA? About middle? Maybe not
372 * the lowest or the highest rate.. Could consider using RSSI from
373 * previous packets? Need to have IEEE 802.1X auth succeed immediately
374 * after assoc.. */
375
376 for (i = sband->n_bitrates - 1; i >= 0; i--) {
377 if (sta->supp_rates[sband->band] & (1 << i)) {
378 rs_sta->last_txrate_idx = i;
379 break;
380 }
381 }
382
383 priv->_3945.sta_supp_rates = sta->supp_rates[sband->band];
384 /* For 5 GHz band it start at IWL_FIRST_OFDM_RATE */
385 if (sband->band == IEEE80211_BAND_5GHZ) {
386 rs_sta->last_txrate_idx += IWL_FIRST_OFDM_RATE;
387 priv->_3945.sta_supp_rates = priv->_3945.sta_supp_rates <<
388 IWL_FIRST_OFDM_RATE;
389 }
390
391out:
392 priv->stations[sta_id].used &= ~IWL_STA_UCODE_INPROGRESS;
393
394 IWL_DEBUG_INFO(priv, "leave\n");
395}
396
397static void *iwl3945_rs_alloc(struct ieee80211_hw *hw, struct dentry *debugfsdir)
398{
399 return hw->priv;
400}
401
402/* rate scale requires free function to be implemented */
403static void iwl3945_rs_free(void *priv)
404{
405 return;
406}
407
408static void *iwl3945_rs_alloc_sta(void *iwl_priv, struct ieee80211_sta *sta, gfp_t gfp)
409{
410 struct iwl3945_rs_sta *rs_sta;
411 struct iwl3945_sta_priv *psta = (void *) sta->drv_priv;
412 struct iwl_priv *priv __maybe_unused = iwl_priv;
413
414 IWL_DEBUG_RATE(priv, "enter\n");
415
416 rs_sta = &psta->rs_sta;
417
418 spin_lock_init(&rs_sta->lock);
419 init_timer(&rs_sta->rate_scale_flush);
420
421 IWL_DEBUG_RATE(priv, "leave\n");
422
423 return rs_sta;
424}
425
426static void iwl3945_rs_free_sta(void *iwl_priv, struct ieee80211_sta *sta,
427 void *priv_sta)
428{
429 struct iwl3945_rs_sta *rs_sta = priv_sta;
430
431 /*
432 * Be careful not to use any members of iwl3945_rs_sta (like trying
433 * to use iwl_priv to print out debugging) since it may not be fully
434 * initialized at this point.
435 */
436 del_timer_sync(&rs_sta->rate_scale_flush);
437}
438
439
440/**
441 * iwl3945_rs_tx_status - Update rate control values based on Tx results
442 *
443 * NOTE: Uses iwl_priv->retry_rate for the # of retries attempted by
444 * the hardware for each rate.
445 */
446static void iwl3945_rs_tx_status(void *priv_rate, struct ieee80211_supported_band *sband,
447 struct ieee80211_sta *sta, void *priv_sta,
448 struct sk_buff *skb)
449{
450 s8 retries = 0, current_count;
451 int scale_rate_index, first_index, last_index;
452 unsigned long flags;
453 struct iwl_priv *priv = (struct iwl_priv *)priv_rate;
454 struct iwl3945_rs_sta *rs_sta = priv_sta;
455 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
456
457 IWL_DEBUG_RATE(priv, "enter\n");
458
459 retries = info->status.rates[0].count;
460 /* Sanity Check for retries */
461 if (retries > IWL_RATE_RETRY_TH)
462 retries = IWL_RATE_RETRY_TH;
463
464 first_index = sband->bitrates[info->status.rates[0].idx].hw_value;
465 if ((first_index < 0) || (first_index >= IWL_RATE_COUNT_3945)) {
466 IWL_DEBUG_RATE(priv, "leave: Rate out of bounds: %d\n", first_index);
467 return;
468 }
469
470 if (!priv_sta) {
471 IWL_DEBUG_RATE(priv, "leave: No STA priv data to update!\n");
472 return;
473 }
474
475 /* Treat uninitialized rate scaling data same as non-existing. */
476 if (!rs_sta->priv) {
477 IWL_DEBUG_RATE(priv, "leave: STA priv data uninitialized!\n");
478 return;
479 }
480
481
482 rs_sta->tx_packets++;
483
484 scale_rate_index = first_index;
485 last_index = first_index;
486
487 /*
488 * Update the window for each rate. We determine which rates
489 * were Tx'd based on the total number of retries vs. the number
490 * of retries configured for each rate -- currently set to the
491 * priv value 'retry_rate' vs. rate specific
492 *
493 * On exit from this while loop last_index indicates the rate
494 * at which the frame was finally transmitted (or failed if no
495 * ACK)
496 */
497 while (retries > 1) {
498 if ((retries - 1) < priv->retry_rate) {
499 current_count = (retries - 1);
500 last_index = scale_rate_index;
501 } else {
502 current_count = priv->retry_rate;
503 last_index = iwl3945_rs_next_rate(priv,
504 scale_rate_index);
505 }
506
507 /* Update this rate accounting for as many retries
508 * as was used for it (per current_count) */
509 iwl3945_collect_tx_data(rs_sta,
510 &rs_sta->win[scale_rate_index],
511 0, current_count, scale_rate_index);
512 IWL_DEBUG_RATE(priv, "Update rate %d for %d retries.\n",
513 scale_rate_index, current_count);
514
515 retries -= current_count;
516
517 scale_rate_index = last_index;
518 }
519
520
521 /* Update the last index window with success/failure based on ACK */
522 IWL_DEBUG_RATE(priv, "Update rate %d with %s.\n",
523 last_index,
524 (info->flags & IEEE80211_TX_STAT_ACK) ?
525 "success" : "failure");
526 iwl3945_collect_tx_data(rs_sta,
527 &rs_sta->win[last_index],
528 info->flags & IEEE80211_TX_STAT_ACK, 1, last_index);
529
530 /* We updated the rate scale window -- if its been more than
531 * flush_time since the last run, schedule the flush
532 * again */
533 spin_lock_irqsave(&rs_sta->lock, flags);
534
535 if (!rs_sta->flush_pending &&
536 time_after(jiffies, rs_sta->last_flush +
537 rs_sta->flush_time)) {
538
539 rs_sta->last_partial_flush = jiffies;
540 rs_sta->flush_pending = 1;
541 mod_timer(&rs_sta->rate_scale_flush,
542 jiffies + rs_sta->flush_time);
543 }
544
545 spin_unlock_irqrestore(&rs_sta->lock, flags);
546
547 IWL_DEBUG_RATE(priv, "leave\n");
548}
549
550static u16 iwl3945_get_adjacent_rate(struct iwl3945_rs_sta *rs_sta,
551 u8 index, u16 rate_mask, enum ieee80211_band band)
552{
553 u8 high = IWL_RATE_INVALID;
554 u8 low = IWL_RATE_INVALID;
555 struct iwl_priv *priv __maybe_unused = rs_sta->priv;
556
557 /* 802.11A walks to the next literal adjacent rate in
558 * the rate table */
559 if (unlikely(band == IEEE80211_BAND_5GHZ)) {
560 int i;
561 u32 mask;
562
563 /* Find the previous rate that is in the rate mask */
564 i = index - 1;
565 for (mask = (1 << i); i >= 0; i--, mask >>= 1) {
566 if (rate_mask & mask) {
567 low = i;
568 break;
569 }
570 }
571
572 /* Find the next rate that is in the rate mask */
573 i = index + 1;
574 for (mask = (1 << i); i < IWL_RATE_COUNT_3945;
575 i++, mask <<= 1) {
576 if (rate_mask & mask) {
577 high = i;
578 break;
579 }
580 }
581
582 return (high << 8) | low;
583 }
584
585 low = index;
586 while (low != IWL_RATE_INVALID) {
587 if (rs_sta->tgg)
588 low = iwl3945_rates[low].prev_rs_tgg;
589 else
590 low = iwl3945_rates[low].prev_rs;
591 if (low == IWL_RATE_INVALID)
592 break;
593 if (rate_mask & (1 << low))
594 break;
595 IWL_DEBUG_RATE(priv, "Skipping masked lower rate: %d\n", low);
596 }
597
598 high = index;
599 while (high != IWL_RATE_INVALID) {
600 if (rs_sta->tgg)
601 high = iwl3945_rates[high].next_rs_tgg;
602 else
603 high = iwl3945_rates[high].next_rs;
604 if (high == IWL_RATE_INVALID)
605 break;
606 if (rate_mask & (1 << high))
607 break;
608 IWL_DEBUG_RATE(priv, "Skipping masked higher rate: %d\n", high);
609 }
610
611 return (high << 8) | low;
612}
613
614/**
615 * iwl3945_rs_get_rate - find the rate for the requested packet
616 *
617 * Returns the ieee80211_rate structure allocated by the driver.
618 *
619 * The rate control algorithm has no internal mapping between hw_mode's
620 * rate ordering and the rate ordering used by the rate control algorithm.
621 *
622 * The rate control algorithm uses a single table of rates that goes across
623 * the entire A/B/G spectrum vs. being limited to just one particular
624 * hw_mode.
625 *
626 * As such, we can't convert the index obtained below into the hw_mode's
627 * rate table and must reference the driver allocated rate table
628 *
629 */
630static void iwl3945_rs_get_rate(void *priv_r, struct ieee80211_sta *sta,
631 void *priv_sta, struct ieee80211_tx_rate_control *txrc)
632{
633 struct ieee80211_supported_band *sband = txrc->sband;
634 struct sk_buff *skb = txrc->skb;
635 u8 low = IWL_RATE_INVALID;
636 u8 high = IWL_RATE_INVALID;
637 u16 high_low;
638 int index;
639 struct iwl3945_rs_sta *rs_sta = priv_sta;
640 struct iwl3945_rate_scale_data *window = NULL;
641 int current_tpt = IWL_INVALID_VALUE;
642 int low_tpt = IWL_INVALID_VALUE;
643 int high_tpt = IWL_INVALID_VALUE;
644 u32 fail_count;
645 s8 scale_action = 0;
646 unsigned long flags;
647 u16 rate_mask;
648 s8 max_rate_idx = -1;
649 struct iwl_priv *priv __maybe_unused = (struct iwl_priv *)priv_r;
650 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
651
652 IWL_DEBUG_RATE(priv, "enter\n");
653
654 /* Treat uninitialized rate scaling data same as non-existing. */
655 if (rs_sta && !rs_sta->priv) {
656 IWL_DEBUG_RATE(priv, "Rate scaling information not initialized yet.\n");
657 priv_sta = NULL;
658 }
659
660 if (rate_control_send_low(sta, priv_sta, txrc))
661 return;
662
663 rate_mask = sta->supp_rates[sband->band];
664
665 /* get user max rate if set */
666 max_rate_idx = txrc->max_rate_idx;
667 if ((sband->band == IEEE80211_BAND_5GHZ) && (max_rate_idx != -1))
668 max_rate_idx += IWL_FIRST_OFDM_RATE;
669 if ((max_rate_idx < 0) || (max_rate_idx >= IWL_RATE_COUNT))
670 max_rate_idx = -1;
671
672 index = min(rs_sta->last_txrate_idx & 0xffff, IWL_RATE_COUNT_3945 - 1);
673
674 if (sband->band == IEEE80211_BAND_5GHZ)
675 rate_mask = rate_mask << IWL_FIRST_OFDM_RATE;
676
677 spin_lock_irqsave(&rs_sta->lock, flags);
678
679 /* for recent assoc, choose best rate regarding
680 * to rssi value
681 */
682 if (rs_sta->start_rate != IWL_RATE_INVALID) {
683 if (rs_sta->start_rate < index &&
684 (rate_mask & (1 << rs_sta->start_rate)))
685 index = rs_sta->start_rate;
686 rs_sta->start_rate = IWL_RATE_INVALID;
687 }
688
689 /* force user max rate if set by user */
690 if ((max_rate_idx != -1) && (max_rate_idx < index)) {
691 if (rate_mask & (1 << max_rate_idx))
692 index = max_rate_idx;
693 }
694
695 window = &(rs_sta->win[index]);
696
697 fail_count = window->counter - window->success_counter;
698
699 if (((fail_count < IWL_RATE_MIN_FAILURE_TH) &&
700 (window->success_counter < IWL_RATE_MIN_SUCCESS_TH))) {
701 spin_unlock_irqrestore(&rs_sta->lock, flags);
702
703 IWL_DEBUG_RATE(priv, "Invalid average_tpt on rate %d: "
704 "counter: %d, success_counter: %d, "
705 "expected_tpt is %sNULL\n",
706 index,
707 window->counter,
708 window->success_counter,
709 rs_sta->expected_tpt ? "not " : "");
710
711 /* Can't calculate this yet; not enough history */
712 window->average_tpt = IWL_INVALID_VALUE;
713 goto out;
714
715 }
716
717 current_tpt = window->average_tpt;
718
719 high_low = iwl3945_get_adjacent_rate(rs_sta, index, rate_mask,
720 sband->band);
721 low = high_low & 0xff;
722 high = (high_low >> 8) & 0xff;
723
724 /* If user set max rate, dont allow higher than user constrain */
725 if ((max_rate_idx != -1) && (max_rate_idx < high))
726 high = IWL_RATE_INVALID;
727
728 /* Collect Measured throughputs of adjacent rates */
729 if (low != IWL_RATE_INVALID)
730 low_tpt = rs_sta->win[low].average_tpt;
731
732 if (high != IWL_RATE_INVALID)
733 high_tpt = rs_sta->win[high].average_tpt;
734
735 spin_unlock_irqrestore(&rs_sta->lock, flags);
736
737 scale_action = 0;
738
739 /* Low success ratio , need to drop the rate */
740 if ((window->success_ratio < IWL_RATE_DECREASE_TH) || !current_tpt) {
741 IWL_DEBUG_RATE(priv, "decrease rate because of low success_ratio\n");
742 scale_action = -1;
743 /* No throughput measured yet for adjacent rates,
744 * try increase */
745 } else if ((low_tpt == IWL_INVALID_VALUE) &&
746 (high_tpt == IWL_INVALID_VALUE)) {
747
748 if (high != IWL_RATE_INVALID && window->success_ratio >= IWL_RATE_INCREASE_TH)
749 scale_action = 1;
750 else if (low != IWL_RATE_INVALID)
751 scale_action = 0;
752
753 /* Both adjacent throughputs are measured, but neither one has
754 * better throughput; we're using the best rate, don't change
755 * it! */
756 } else if ((low_tpt != IWL_INVALID_VALUE) &&
757 (high_tpt != IWL_INVALID_VALUE) &&
758 (low_tpt < current_tpt) && (high_tpt < current_tpt)) {
759
760 IWL_DEBUG_RATE(priv, "No action -- low [%d] & high [%d] < "
761 "current_tpt [%d]\n",
762 low_tpt, high_tpt, current_tpt);
763 scale_action = 0;
764
765 /* At least one of the rates has better throughput */
766 } else {
767 if (high_tpt != IWL_INVALID_VALUE) {
768
769 /* High rate has better throughput, Increase
770 * rate */
771 if (high_tpt > current_tpt &&
772 window->success_ratio >= IWL_RATE_INCREASE_TH)
773 scale_action = 1;
774 else {
775 IWL_DEBUG_RATE(priv,
776 "decrease rate because of high tpt\n");
777 scale_action = 0;
778 }
779 } else if (low_tpt != IWL_INVALID_VALUE) {
780 if (low_tpt > current_tpt) {
781 IWL_DEBUG_RATE(priv,
782 "decrease rate because of low tpt\n");
783 scale_action = -1;
784 } else if (window->success_ratio >= IWL_RATE_INCREASE_TH) {
785 /* Lower rate has better
786 * throughput,decrease rate */
787 scale_action = 1;
788 }
789 }
790 }
791
792 /* Sanity check; asked for decrease, but success rate or throughput
793 * has been good at old rate. Don't change it. */
794 if ((scale_action == -1) && (low != IWL_RATE_INVALID) &&
795 ((window->success_ratio > IWL_RATE_HIGH_TH) ||
796 (current_tpt > (100 * rs_sta->expected_tpt[low]))))
797 scale_action = 0;
798
799 switch (scale_action) {
800 case -1:
801
802 /* Decrese rate */
803 if (low != IWL_RATE_INVALID)
804 index = low;
805 break;
806
807 case 1:
808 /* Increase rate */
809 if (high != IWL_RATE_INVALID)
810 index = high;
811
812 break;
813
814 case 0:
815 default:
816 /* No change */
817 break;
818 }
819
820 IWL_DEBUG_RATE(priv, "Selected %d (action %d) - low %d high %d\n",
821 index, scale_action, low, high);
822
823 out:
824
825 rs_sta->last_txrate_idx = index;
826 if (sband->band == IEEE80211_BAND_5GHZ)
827 info->control.rates[0].idx = rs_sta->last_txrate_idx -
828 IWL_FIRST_OFDM_RATE;
829 else
830 info->control.rates[0].idx = rs_sta->last_txrate_idx;
831
832 IWL_DEBUG_RATE(priv, "leave: %d\n", index);
833}
834
835#ifdef CONFIG_MAC80211_DEBUGFS
836static int iwl3945_open_file_generic(struct inode *inode, struct file *file)
837{
838 file->private_data = inode->i_private;
839 return 0;
840}
841
842static ssize_t iwl3945_sta_dbgfs_stats_table_read(struct file *file,
843 char __user *user_buf,
844 size_t count, loff_t *ppos)
845{
846 char *buff;
847 int desc = 0;
848 int j;
849 ssize_t ret;
850 struct iwl3945_rs_sta *lq_sta = file->private_data;
851
852 buff = kmalloc(1024, GFP_KERNEL);
853 if (!buff)
854 return -ENOMEM;
855
856 desc += sprintf(buff + desc, "tx packets=%d last rate index=%d\n"
857 "rate=0x%X flush time %d\n",
858 lq_sta->tx_packets,
859 lq_sta->last_txrate_idx,
860 lq_sta->start_rate, jiffies_to_msecs(lq_sta->flush_time));
861 for (j = 0; j < IWL_RATE_COUNT_3945; j++) {
862 desc += sprintf(buff+desc,
863 "counter=%d success=%d %%=%d\n",
864 lq_sta->win[j].counter,
865 lq_sta->win[j].success_counter,
866 lq_sta->win[j].success_ratio);
867 }
868 ret = simple_read_from_buffer(user_buf, count, ppos, buff, desc);
869 kfree(buff);
870 return ret;
871}
872
873static const struct file_operations rs_sta_dbgfs_stats_table_ops = {
874 .read = iwl3945_sta_dbgfs_stats_table_read,
875 .open = iwl3945_open_file_generic,
876 .llseek = default_llseek,
877};
878
879static void iwl3945_add_debugfs(void *priv, void *priv_sta,
880 struct dentry *dir)
881{
882 struct iwl3945_rs_sta *lq_sta = priv_sta;
883
884 lq_sta->rs_sta_dbgfs_stats_table_file =
885 debugfs_create_file("rate_stats_table", 0600, dir,
886 lq_sta, &rs_sta_dbgfs_stats_table_ops);
887
888}
889
890static void iwl3945_remove_debugfs(void *priv, void *priv_sta)
891{
892 struct iwl3945_rs_sta *lq_sta = priv_sta;
893 debugfs_remove(lq_sta->rs_sta_dbgfs_stats_table_file);
894}
895#endif
896
897/*
898 * Initialization of rate scaling information is done by driver after
899 * the station is added. Since mac80211 calls this function before a
900 * station is added we ignore it.
901 */
902static void iwl3945_rs_rate_init_stub(void *priv_r,
903 struct ieee80211_supported_band *sband,
904 struct ieee80211_sta *sta, void *priv_sta)
905{
906}
907
908static struct rate_control_ops rs_ops = {
909 .module = NULL,
910 .name = RS_NAME,
911 .tx_status = iwl3945_rs_tx_status,
912 .get_rate = iwl3945_rs_get_rate,
913 .rate_init = iwl3945_rs_rate_init_stub,
914 .alloc = iwl3945_rs_alloc,
915 .free = iwl3945_rs_free,
916 .alloc_sta = iwl3945_rs_alloc_sta,
917 .free_sta = iwl3945_rs_free_sta,
918#ifdef CONFIG_MAC80211_DEBUGFS
919 .add_sta_debugfs = iwl3945_add_debugfs,
920 .remove_sta_debugfs = iwl3945_remove_debugfs,
921#endif
922
923};
924void iwl3945_rate_scale_init(struct ieee80211_hw *hw, s32 sta_id)
925{
926 struct iwl_priv *priv = hw->priv;
927 s32 rssi = 0;
928 unsigned long flags;
929 struct iwl3945_rs_sta *rs_sta;
930 struct ieee80211_sta *sta;
931 struct iwl3945_sta_priv *psta;
932
933 IWL_DEBUG_RATE(priv, "enter\n");
934
935 rcu_read_lock();
936
937 sta = ieee80211_find_sta(priv->contexts[IWL_RXON_CTX_BSS].vif,
938 priv->stations[sta_id].sta.sta.addr);
939 if (!sta) {
940 IWL_DEBUG_RATE(priv, "Unable to find station to initialize rate scaling.\n");
941 rcu_read_unlock();
942 return;
943 }
944
945 psta = (void *) sta->drv_priv;
946 rs_sta = &psta->rs_sta;
947
948 spin_lock_irqsave(&rs_sta->lock, flags);
949
950 rs_sta->tgg = 0;
951 switch (priv->band) {
952 case IEEE80211_BAND_2GHZ:
953 /* TODO: this always does G, not a regression */
954 if (priv->contexts[IWL_RXON_CTX_BSS].active.flags &
955 RXON_FLG_TGG_PROTECT_MSK) {
956 rs_sta->tgg = 1;
957 rs_sta->expected_tpt = iwl3945_expected_tpt_g_prot;
958 } else
959 rs_sta->expected_tpt = iwl3945_expected_tpt_g;
960 break;
961
962 case IEEE80211_BAND_5GHZ:
963 rs_sta->expected_tpt = iwl3945_expected_tpt_a;
964 break;
965 case IEEE80211_NUM_BANDS:
966 BUG();
967 break;
968 }
969
970 spin_unlock_irqrestore(&rs_sta->lock, flags);
971
972 rssi = priv->_3945.last_rx_rssi;
973 if (rssi == 0)
974 rssi = IWL_MIN_RSSI_VAL;
975
976 IWL_DEBUG_RATE(priv, "Network RSSI: %d\n", rssi);
977
978 rs_sta->start_rate = iwl3945_get_rate_index_by_rssi(rssi, priv->band);
979
980 IWL_DEBUG_RATE(priv, "leave: rssi %d assign rate index: "
981 "%d (plcp 0x%x)\n", rssi, rs_sta->start_rate,
982 iwl3945_rates[rs_sta->start_rate].plcp);
983 rcu_read_unlock();
984}
985
986int iwl3945_rate_control_register(void)
987{
988 return ieee80211_rate_control_register(&rs_ops);
989}
990
991void iwl3945_rate_control_unregister(void)
992{
993 ieee80211_rate_control_unregister(&rs_ops);
994}
diff --git a/drivers/net/wireless/iwlegacy/iwl-3945.c b/drivers/net/wireless/iwlegacy/iwl-3945.c
new file mode 100644
index 000000000000..d096dc28204d
--- /dev/null
+++ b/drivers/net/wireless/iwlegacy/iwl-3945.c
@@ -0,0 +1,2742 @@
1/******************************************************************************
2 *
3 * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of version 2 of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
17 *
18 * The full GNU General Public License is included in this distribution in the
19 * file called LICENSE.
20 *
21 * Contact Information:
22 * Intel Linux Wireless <ilw@linux.intel.com>
23 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
24 *
25 *****************************************************************************/
26
27#include <linux/kernel.h>
28#include <linux/module.h>
29#include <linux/init.h>
30#include <linux/slab.h>
31#include <linux/pci.h>
32#include <linux/dma-mapping.h>
33#include <linux/delay.h>
34#include <linux/sched.h>
35#include <linux/skbuff.h>
36#include <linux/netdevice.h>
37#include <linux/wireless.h>
38#include <linux/firmware.h>
39#include <linux/etherdevice.h>
40#include <asm/unaligned.h>
41#include <net/mac80211.h>
42
43#include "iwl-fh.h"
44#include "iwl-3945-fh.h"
45#include "iwl-commands.h"
46#include "iwl-sta.h"
47#include "iwl-3945.h"
48#include "iwl-eeprom.h"
49#include "iwl-core.h"
50#include "iwl-helpers.h"
51#include "iwl-led.h"
52#include "iwl-3945-led.h"
53#include "iwl-3945-debugfs.h"
54
55#define IWL_DECLARE_RATE_INFO(r, ip, in, rp, rn, pp, np) \
56 [IWL_RATE_##r##M_INDEX] = { IWL_RATE_##r##M_PLCP, \
57 IWL_RATE_##r##M_IEEE, \
58 IWL_RATE_##ip##M_INDEX, \
59 IWL_RATE_##in##M_INDEX, \
60 IWL_RATE_##rp##M_INDEX, \
61 IWL_RATE_##rn##M_INDEX, \
62 IWL_RATE_##pp##M_INDEX, \
63 IWL_RATE_##np##M_INDEX, \
64 IWL_RATE_##r##M_INDEX_TABLE, \
65 IWL_RATE_##ip##M_INDEX_TABLE }
66
67/*
68 * Parameter order:
69 * rate, prev rate, next rate, prev tgg rate, next tgg rate
70 *
71 * If there isn't a valid next or previous rate then INV is used which
72 * maps to IWL_RATE_INVALID
73 *
74 */
75const struct iwl3945_rate_info iwl3945_rates[IWL_RATE_COUNT_3945] = {
76 IWL_DECLARE_RATE_INFO(1, INV, 2, INV, 2, INV, 2), /* 1mbps */
77 IWL_DECLARE_RATE_INFO(2, 1, 5, 1, 5, 1, 5), /* 2mbps */
78 IWL_DECLARE_RATE_INFO(5, 2, 6, 2, 11, 2, 11), /*5.5mbps */
79 IWL_DECLARE_RATE_INFO(11, 9, 12, 5, 12, 5, 18), /* 11mbps */
80 IWL_DECLARE_RATE_INFO(6, 5, 9, 5, 11, 5, 11), /* 6mbps */
81 IWL_DECLARE_RATE_INFO(9, 6, 11, 5, 11, 5, 11), /* 9mbps */
82 IWL_DECLARE_RATE_INFO(12, 11, 18, 11, 18, 11, 18), /* 12mbps */
83 IWL_DECLARE_RATE_INFO(18, 12, 24, 12, 24, 11, 24), /* 18mbps */
84 IWL_DECLARE_RATE_INFO(24, 18, 36, 18, 36, 18, 36), /* 24mbps */
85 IWL_DECLARE_RATE_INFO(36, 24, 48, 24, 48, 24, 48), /* 36mbps */
86 IWL_DECLARE_RATE_INFO(48, 36, 54, 36, 54, 36, 54), /* 48mbps */
87 IWL_DECLARE_RATE_INFO(54, 48, INV, 48, INV, 48, INV),/* 54mbps */
88};
89
90static inline u8 iwl3945_get_prev_ieee_rate(u8 rate_index)
91{
92 u8 rate = iwl3945_rates[rate_index].prev_ieee;
93
94 if (rate == IWL_RATE_INVALID)
95 rate = rate_index;
96 return rate;
97}
98
99/* 1 = enable the iwl3945_disable_events() function */
100#define IWL_EVT_DISABLE (0)
101#define IWL_EVT_DISABLE_SIZE (1532/32)
102
103/**
104 * iwl3945_disable_events - Disable selected events in uCode event log
105 *
106 * Disable an event by writing "1"s into "disable"
107 * bitmap in SRAM. Bit position corresponds to Event # (id/type).
108 * Default values of 0 enable uCode events to be logged.
109 * Use for only special debugging. This function is just a placeholder as-is,
110 * you'll need to provide the special bits! ...
111 * ... and set IWL_EVT_DISABLE to 1. */
112void iwl3945_disable_events(struct iwl_priv *priv)
113{
114 int i;
115 u32 base; /* SRAM address of event log header */
116 u32 disable_ptr; /* SRAM address of event-disable bitmap array */
117 u32 array_size; /* # of u32 entries in array */
118 static const u32 evt_disable[IWL_EVT_DISABLE_SIZE] = {
119 0x00000000, /* 31 - 0 Event id numbers */
120 0x00000000, /* 63 - 32 */
121 0x00000000, /* 95 - 64 */
122 0x00000000, /* 127 - 96 */
123 0x00000000, /* 159 - 128 */
124 0x00000000, /* 191 - 160 */
125 0x00000000, /* 223 - 192 */
126 0x00000000, /* 255 - 224 */
127 0x00000000, /* 287 - 256 */
128 0x00000000, /* 319 - 288 */
129 0x00000000, /* 351 - 320 */
130 0x00000000, /* 383 - 352 */
131 0x00000000, /* 415 - 384 */
132 0x00000000, /* 447 - 416 */
133 0x00000000, /* 479 - 448 */
134 0x00000000, /* 511 - 480 */
135 0x00000000, /* 543 - 512 */
136 0x00000000, /* 575 - 544 */
137 0x00000000, /* 607 - 576 */
138 0x00000000, /* 639 - 608 */
139 0x00000000, /* 671 - 640 */
140 0x00000000, /* 703 - 672 */
141 0x00000000, /* 735 - 704 */
142 0x00000000, /* 767 - 736 */
143 0x00000000, /* 799 - 768 */
144 0x00000000, /* 831 - 800 */
145 0x00000000, /* 863 - 832 */
146 0x00000000, /* 895 - 864 */
147 0x00000000, /* 927 - 896 */
148 0x00000000, /* 959 - 928 */
149 0x00000000, /* 991 - 960 */
150 0x00000000, /* 1023 - 992 */
151 0x00000000, /* 1055 - 1024 */
152 0x00000000, /* 1087 - 1056 */
153 0x00000000, /* 1119 - 1088 */
154 0x00000000, /* 1151 - 1120 */
155 0x00000000, /* 1183 - 1152 */
156 0x00000000, /* 1215 - 1184 */
157 0x00000000, /* 1247 - 1216 */
158 0x00000000, /* 1279 - 1248 */
159 0x00000000, /* 1311 - 1280 */
160 0x00000000, /* 1343 - 1312 */
161 0x00000000, /* 1375 - 1344 */
162 0x00000000, /* 1407 - 1376 */
163 0x00000000, /* 1439 - 1408 */
164 0x00000000, /* 1471 - 1440 */
165 0x00000000, /* 1503 - 1472 */
166 };
167
168 base = le32_to_cpu(priv->card_alive.log_event_table_ptr);
169 if (!iwl3945_hw_valid_rtc_data_addr(base)) {
170 IWL_ERR(priv, "Invalid event log pointer 0x%08X\n", base);
171 return;
172 }
173
174 disable_ptr = iwl_legacy_read_targ_mem(priv, base + (4 * sizeof(u32)));
175 array_size = iwl_legacy_read_targ_mem(priv, base + (5 * sizeof(u32)));
176
177 if (IWL_EVT_DISABLE && (array_size == IWL_EVT_DISABLE_SIZE)) {
178 IWL_DEBUG_INFO(priv, "Disabling selected uCode log events at 0x%x\n",
179 disable_ptr);
180 for (i = 0; i < IWL_EVT_DISABLE_SIZE; i++)
181 iwl_legacy_write_targ_mem(priv,
182 disable_ptr + (i * sizeof(u32)),
183 evt_disable[i]);
184
185 } else {
186 IWL_DEBUG_INFO(priv, "Selected uCode log events may be disabled\n");
187 IWL_DEBUG_INFO(priv, " by writing \"1\"s into disable bitmap\n");
188 IWL_DEBUG_INFO(priv, " in SRAM at 0x%x, size %d u32s\n",
189 disable_ptr, array_size);
190 }
191
192}
193
194static int iwl3945_hwrate_to_plcp_idx(u8 plcp)
195{
196 int idx;
197
198 for (idx = 0; idx < IWL_RATE_COUNT_3945; idx++)
199 if (iwl3945_rates[idx].plcp == plcp)
200 return idx;
201 return -1;
202}
203
204#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
205#define TX_STATUS_ENTRY(x) case TX_3945_STATUS_FAIL_ ## x: return #x
206
207static const char *iwl3945_get_tx_fail_reason(u32 status)
208{
209 switch (status & TX_STATUS_MSK) {
210 case TX_3945_STATUS_SUCCESS:
211 return "SUCCESS";
212 TX_STATUS_ENTRY(SHORT_LIMIT);
213 TX_STATUS_ENTRY(LONG_LIMIT);
214 TX_STATUS_ENTRY(FIFO_UNDERRUN);
215 TX_STATUS_ENTRY(MGMNT_ABORT);
216 TX_STATUS_ENTRY(NEXT_FRAG);
217 TX_STATUS_ENTRY(LIFE_EXPIRE);
218 TX_STATUS_ENTRY(DEST_PS);
219 TX_STATUS_ENTRY(ABORTED);
220 TX_STATUS_ENTRY(BT_RETRY);
221 TX_STATUS_ENTRY(STA_INVALID);
222 TX_STATUS_ENTRY(FRAG_DROPPED);
223 TX_STATUS_ENTRY(TID_DISABLE);
224 TX_STATUS_ENTRY(FRAME_FLUSHED);
225 TX_STATUS_ENTRY(INSUFFICIENT_CF_POLL);
226 TX_STATUS_ENTRY(TX_LOCKED);
227 TX_STATUS_ENTRY(NO_BEACON_ON_RADAR);
228 }
229
230 return "UNKNOWN";
231}
232#else
233static inline const char *iwl3945_get_tx_fail_reason(u32 status)
234{
235 return "";
236}
237#endif
238
239/*
240 * get ieee prev rate from rate scale table.
241 * for A and B mode we need to overright prev
242 * value
243 */
244int iwl3945_rs_next_rate(struct iwl_priv *priv, int rate)
245{
246 int next_rate = iwl3945_get_prev_ieee_rate(rate);
247
248 switch (priv->band) {
249 case IEEE80211_BAND_5GHZ:
250 if (rate == IWL_RATE_12M_INDEX)
251 next_rate = IWL_RATE_9M_INDEX;
252 else if (rate == IWL_RATE_6M_INDEX)
253 next_rate = IWL_RATE_6M_INDEX;
254 break;
255 case IEEE80211_BAND_2GHZ:
256 if (!(priv->_3945.sta_supp_rates & IWL_OFDM_RATES_MASK) &&
257 iwl_legacy_is_associated(priv, IWL_RXON_CTX_BSS)) {
258 if (rate == IWL_RATE_11M_INDEX)
259 next_rate = IWL_RATE_5M_INDEX;
260 }
261 break;
262
263 default:
264 break;
265 }
266
267 return next_rate;
268}
269
270
271/**
272 * iwl3945_tx_queue_reclaim - Reclaim Tx queue entries already Tx'd
273 *
274 * When FW advances 'R' index, all entries between old and new 'R' index
275 * need to be reclaimed. As result, some free space forms. If there is
276 * enough free space (> low mark), wake the stack that feeds us.
277 */
278static void iwl3945_tx_queue_reclaim(struct iwl_priv *priv,
279 int txq_id, int index)
280{
281 struct iwl_tx_queue *txq = &priv->txq[txq_id];
282 struct iwl_queue *q = &txq->q;
283 struct iwl_tx_info *tx_info;
284
285 BUG_ON(txq_id == IWL39_CMD_QUEUE_NUM);
286
287 for (index = iwl_legacy_queue_inc_wrap(index, q->n_bd);
288 q->read_ptr != index;
289 q->read_ptr = iwl_legacy_queue_inc_wrap(q->read_ptr, q->n_bd)) {
290
291 tx_info = &txq->txb[txq->q.read_ptr];
292 ieee80211_tx_status_irqsafe(priv->hw, tx_info->skb);
293 tx_info->skb = NULL;
294 priv->cfg->ops->lib->txq_free_tfd(priv, txq);
295 }
296
297 if (iwl_legacy_queue_space(q) > q->low_mark && (txq_id >= 0) &&
298 (txq_id != IWL39_CMD_QUEUE_NUM) &&
299 priv->mac80211_registered)
300 iwl_legacy_wake_queue(priv, txq);
301}
302
303/**
304 * iwl3945_rx_reply_tx - Handle Tx response
305 */
306static void iwl3945_rx_reply_tx(struct iwl_priv *priv,
307 struct iwl_rx_mem_buffer *rxb)
308{
309 struct iwl_rx_packet *pkt = rxb_addr(rxb);
310 u16 sequence = le16_to_cpu(pkt->hdr.sequence);
311 int txq_id = SEQ_TO_QUEUE(sequence);
312 int index = SEQ_TO_INDEX(sequence);
313 struct iwl_tx_queue *txq = &priv->txq[txq_id];
314 struct ieee80211_tx_info *info;
315 struct iwl3945_tx_resp *tx_resp = (void *)&pkt->u.raw[0];
316 u32 status = le32_to_cpu(tx_resp->status);
317 int rate_idx;
318 int fail;
319
320 if ((index >= txq->q.n_bd) || (iwl_legacy_queue_used(&txq->q, index) == 0)) {
321 IWL_ERR(priv, "Read index for DMA queue txq_id (%d) index %d "
322 "is out of range [0-%d] %d %d\n", txq_id,
323 index, txq->q.n_bd, txq->q.write_ptr,
324 txq->q.read_ptr);
325 return;
326 }
327
328 txq->time_stamp = jiffies;
329 info = IEEE80211_SKB_CB(txq->txb[txq->q.read_ptr].skb);
330 ieee80211_tx_info_clear_status(info);
331
332 /* Fill the MRR chain with some info about on-chip retransmissions */
333 rate_idx = iwl3945_hwrate_to_plcp_idx(tx_resp->rate);
334 if (info->band == IEEE80211_BAND_5GHZ)
335 rate_idx -= IWL_FIRST_OFDM_RATE;
336
337 fail = tx_resp->failure_frame;
338
339 info->status.rates[0].idx = rate_idx;
340 info->status.rates[0].count = fail + 1; /* add final attempt */
341
342 /* tx_status->rts_retry_count = tx_resp->failure_rts; */
343 info->flags |= ((status & TX_STATUS_MSK) == TX_STATUS_SUCCESS) ?
344 IEEE80211_TX_STAT_ACK : 0;
345
346 IWL_DEBUG_TX(priv, "Tx queue %d Status %s (0x%08x) plcp rate %d retries %d\n",
347 txq_id, iwl3945_get_tx_fail_reason(status), status,
348 tx_resp->rate, tx_resp->failure_frame);
349
350 IWL_DEBUG_TX_REPLY(priv, "Tx queue reclaim %d\n", index);
351 iwl3945_tx_queue_reclaim(priv, txq_id, index);
352
353 if (status & TX_ABORT_REQUIRED_MSK)
354 IWL_ERR(priv, "TODO: Implement Tx ABORT REQUIRED!!!\n");
355}
356
357
358
359/*****************************************************************************
360 *
361 * Intel PRO/Wireless 3945ABG/BG Network Connection
362 *
363 * RX handler implementations
364 *
365 *****************************************************************************/
366#ifdef CONFIG_IWLWIFI_LEGACY_DEBUGFS
367static void iwl3945_accumulative_statistics(struct iwl_priv *priv,
368 __le32 *stats)
369{
370 int i;
371 __le32 *prev_stats;
372 u32 *accum_stats;
373 u32 *delta, *max_delta;
374
375 prev_stats = (__le32 *)&priv->_3945.statistics;
376 accum_stats = (u32 *)&priv->_3945.accum_statistics;
377 delta = (u32 *)&priv->_3945.delta_statistics;
378 max_delta = (u32 *)&priv->_3945.max_delta;
379
380 for (i = sizeof(__le32); i < sizeof(struct iwl3945_notif_statistics);
381 i += sizeof(__le32), stats++, prev_stats++, delta++,
382 max_delta++, accum_stats++) {
383 if (le32_to_cpu(*stats) > le32_to_cpu(*prev_stats)) {
384 *delta = (le32_to_cpu(*stats) -
385 le32_to_cpu(*prev_stats));
386 *accum_stats += *delta;
387 if (*delta > *max_delta)
388 *max_delta = *delta;
389 }
390 }
391
392 /* reset accumulative statistics for "no-counter" type statistics */
393 priv->_3945.accum_statistics.general.temperature =
394 priv->_3945.statistics.general.temperature;
395 priv->_3945.accum_statistics.general.ttl_timestamp =
396 priv->_3945.statistics.general.ttl_timestamp;
397}
398#endif
399
400void iwl3945_hw_rx_statistics(struct iwl_priv *priv,
401 struct iwl_rx_mem_buffer *rxb)
402{
403 struct iwl_rx_packet *pkt = rxb_addr(rxb);
404
405 IWL_DEBUG_RX(priv, "Statistics notification received (%d vs %d).\n",
406 (int)sizeof(struct iwl3945_notif_statistics),
407 le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK);
408#ifdef CONFIG_IWLWIFI_LEGACY_DEBUGFS
409 iwl3945_accumulative_statistics(priv, (__le32 *)&pkt->u.raw);
410#endif
411 iwl_legacy_recover_from_statistics(priv, pkt);
412
413 memcpy(&priv->_3945.statistics, pkt->u.raw, sizeof(priv->_3945.statistics));
414}
415
416void iwl3945_reply_statistics(struct iwl_priv *priv,
417 struct iwl_rx_mem_buffer *rxb)
418{
419 struct iwl_rx_packet *pkt = rxb_addr(rxb);
420 __le32 *flag = (__le32 *)&pkt->u.raw;
421
422 if (le32_to_cpu(*flag) & UCODE_STATISTICS_CLEAR_MSK) {
423#ifdef CONFIG_IWLWIFI_LEGACY_DEBUGFS
424 memset(&priv->_3945.accum_statistics, 0,
425 sizeof(struct iwl3945_notif_statistics));
426 memset(&priv->_3945.delta_statistics, 0,
427 sizeof(struct iwl3945_notif_statistics));
428 memset(&priv->_3945.max_delta, 0,
429 sizeof(struct iwl3945_notif_statistics));
430#endif
431 IWL_DEBUG_RX(priv, "Statistics have been cleared\n");
432 }
433 iwl3945_hw_rx_statistics(priv, rxb);
434}
435
436
437/******************************************************************************
438 *
439 * Misc. internal state and helper functions
440 *
441 ******************************************************************************/
442
443/* This is necessary only for a number of statistics, see the caller. */
444static int iwl3945_is_network_packet(struct iwl_priv *priv,
445 struct ieee80211_hdr *header)
446{
447 /* Filter incoming packets to determine if they are targeted toward
448 * this network, discarding packets coming from ourselves */
449 switch (priv->iw_mode) {
450 case NL80211_IFTYPE_ADHOC: /* Header: Dest. | Source | BSSID */
451 /* packets to our IBSS update information */
452 return !compare_ether_addr(header->addr3, priv->bssid);
453 case NL80211_IFTYPE_STATION: /* Header: Dest. | AP{BSSID} | Source */
454 /* packets to our IBSS update information */
455 return !compare_ether_addr(header->addr2, priv->bssid);
456 default:
457 return 1;
458 }
459}
460
461static void iwl3945_pass_packet_to_mac80211(struct iwl_priv *priv,
462 struct iwl_rx_mem_buffer *rxb,
463 struct ieee80211_rx_status *stats)
464{
465 struct iwl_rx_packet *pkt = rxb_addr(rxb);
466 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)IWL_RX_DATA(pkt);
467 struct iwl3945_rx_frame_hdr *rx_hdr = IWL_RX_HDR(pkt);
468 struct iwl3945_rx_frame_end *rx_end = IWL_RX_END(pkt);
469 u16 len = le16_to_cpu(rx_hdr->len);
470 struct sk_buff *skb;
471 __le16 fc = hdr->frame_control;
472
473 /* We received data from the HW, so stop the watchdog */
474 if (unlikely(len + IWL39_RX_FRAME_SIZE >
475 PAGE_SIZE << priv->hw_params.rx_page_order)) {
476 IWL_DEBUG_DROP(priv, "Corruption detected!\n");
477 return;
478 }
479
480 /* We only process data packets if the interface is open */
481 if (unlikely(!priv->is_open)) {
482 IWL_DEBUG_DROP_LIMIT(priv,
483 "Dropping packet while interface is not open.\n");
484 return;
485 }
486
487 skb = dev_alloc_skb(128);
488 if (!skb) {
489 IWL_ERR(priv, "dev_alloc_skb failed\n");
490 return;
491 }
492
493 if (!iwl3945_mod_params.sw_crypto)
494 iwl_legacy_set_decrypted_flag(priv,
495 (struct ieee80211_hdr *)rxb_addr(rxb),
496 le32_to_cpu(rx_end->status), stats);
497
498 skb_add_rx_frag(skb, 0, rxb->page,
499 (void *)rx_hdr->payload - (void *)pkt, len);
500
501 iwl_legacy_update_stats(priv, false, fc, len);
502 memcpy(IEEE80211_SKB_RXCB(skb), stats, sizeof(*stats));
503
504 ieee80211_rx(priv->hw, skb);
505 priv->alloc_rxb_page--;
506 rxb->page = NULL;
507}
508
509#define IWL_DELAY_NEXT_SCAN_AFTER_ASSOC (HZ*6)
510
511static void iwl3945_rx_reply_rx(struct iwl_priv *priv,
512 struct iwl_rx_mem_buffer *rxb)
513{
514 struct ieee80211_hdr *header;
515 struct ieee80211_rx_status rx_status;
516 struct iwl_rx_packet *pkt = rxb_addr(rxb);
517 struct iwl3945_rx_frame_stats *rx_stats = IWL_RX_STATS(pkt);
518 struct iwl3945_rx_frame_hdr *rx_hdr = IWL_RX_HDR(pkt);
519 struct iwl3945_rx_frame_end *rx_end = IWL_RX_END(pkt);
520 u16 rx_stats_sig_avg __maybe_unused = le16_to_cpu(rx_stats->sig_avg);
521 u16 rx_stats_noise_diff __maybe_unused = le16_to_cpu(rx_stats->noise_diff);
522 u8 network_packet;
523
524 rx_status.flag = 0;
525 rx_status.mactime = le64_to_cpu(rx_end->timestamp);
526 rx_status.band = (rx_hdr->phy_flags & RX_RES_PHY_FLAGS_BAND_24_MSK) ?
527 IEEE80211_BAND_2GHZ : IEEE80211_BAND_5GHZ;
528 rx_status.freq =
529 ieee80211_channel_to_frequency(le16_to_cpu(rx_hdr->channel),
530 rx_status.band);
531
532 rx_status.rate_idx = iwl3945_hwrate_to_plcp_idx(rx_hdr->rate);
533 if (rx_status.band == IEEE80211_BAND_5GHZ)
534 rx_status.rate_idx -= IWL_FIRST_OFDM_RATE;
535
536 rx_status.antenna = (le16_to_cpu(rx_hdr->phy_flags) &
537 RX_RES_PHY_FLAGS_ANTENNA_MSK) >> 4;
538
539 /* set the preamble flag if appropriate */
540 if (rx_hdr->phy_flags & RX_RES_PHY_FLAGS_SHORT_PREAMBLE_MSK)
541 rx_status.flag |= RX_FLAG_SHORTPRE;
542
543 if ((unlikely(rx_stats->phy_count > 20))) {
544 IWL_DEBUG_DROP(priv, "dsp size out of range [0,20]: %d/n",
545 rx_stats->phy_count);
546 return;
547 }
548
549 if (!(rx_end->status & RX_RES_STATUS_NO_CRC32_ERROR)
550 || !(rx_end->status & RX_RES_STATUS_NO_RXE_OVERFLOW)) {
551 IWL_DEBUG_RX(priv, "Bad CRC or FIFO: 0x%08X.\n", rx_end->status);
552 return;
553 }
554
555
556
557 /* Convert 3945's rssi indicator to dBm */
558 rx_status.signal = rx_stats->rssi - IWL39_RSSI_OFFSET;
559
560 IWL_DEBUG_STATS(priv, "Rssi %d sig_avg %d noise_diff %d\n",
561 rx_status.signal, rx_stats_sig_avg,
562 rx_stats_noise_diff);
563
564 header = (struct ieee80211_hdr *)IWL_RX_DATA(pkt);
565
566 network_packet = iwl3945_is_network_packet(priv, header);
567
568 IWL_DEBUG_STATS_LIMIT(priv, "[%c] %d RSSI:%d Signal:%u, Rate:%u\n",
569 network_packet ? '*' : ' ',
570 le16_to_cpu(rx_hdr->channel),
571 rx_status.signal, rx_status.signal,
572 rx_status.rate_idx);
573
574 iwl_legacy_dbg_log_rx_data_frame(priv, le16_to_cpu(rx_hdr->len),
575 header);
576
577 if (network_packet) {
578 priv->_3945.last_beacon_time =
579 le32_to_cpu(rx_end->beacon_timestamp);
580 priv->_3945.last_tsf = le64_to_cpu(rx_end->timestamp);
581 priv->_3945.last_rx_rssi = rx_status.signal;
582 }
583
584 iwl3945_pass_packet_to_mac80211(priv, rxb, &rx_status);
585}
586
587int iwl3945_hw_txq_attach_buf_to_tfd(struct iwl_priv *priv,
588 struct iwl_tx_queue *txq,
589 dma_addr_t addr, u16 len, u8 reset, u8 pad)
590{
591 int count;
592 struct iwl_queue *q;
593 struct iwl3945_tfd *tfd, *tfd_tmp;
594
595 q = &txq->q;
596 tfd_tmp = (struct iwl3945_tfd *)txq->tfds;
597 tfd = &tfd_tmp[q->write_ptr];
598
599 if (reset)
600 memset(tfd, 0, sizeof(*tfd));
601
602 count = TFD_CTL_COUNT_GET(le32_to_cpu(tfd->control_flags));
603
604 if ((count >= NUM_TFD_CHUNKS) || (count < 0)) {
605 IWL_ERR(priv, "Error can not send more than %d chunks\n",
606 NUM_TFD_CHUNKS);
607 return -EINVAL;
608 }
609
610 tfd->tbs[count].addr = cpu_to_le32(addr);
611 tfd->tbs[count].len = cpu_to_le32(len);
612
613 count++;
614
615 tfd->control_flags = cpu_to_le32(TFD_CTL_COUNT_SET(count) |
616 TFD_CTL_PAD_SET(pad));
617
618 return 0;
619}
620
621/**
622 * iwl3945_hw_txq_free_tfd - Free one TFD, those at index [txq->q.read_ptr]
623 *
624 * Does NOT advance any indexes
625 */
626void iwl3945_hw_txq_free_tfd(struct iwl_priv *priv, struct iwl_tx_queue *txq)
627{
628 struct iwl3945_tfd *tfd_tmp = (struct iwl3945_tfd *)txq->tfds;
629 int index = txq->q.read_ptr;
630 struct iwl3945_tfd *tfd = &tfd_tmp[index];
631 struct pci_dev *dev = priv->pci_dev;
632 int i;
633 int counter;
634
635 /* sanity check */
636 counter = TFD_CTL_COUNT_GET(le32_to_cpu(tfd->control_flags));
637 if (counter > NUM_TFD_CHUNKS) {
638 IWL_ERR(priv, "Too many chunks: %i\n", counter);
639 /* @todo issue fatal error, it is quite serious situation */
640 return;
641 }
642
643 /* Unmap tx_cmd */
644 if (counter)
645 pci_unmap_single(dev,
646 dma_unmap_addr(&txq->meta[index], mapping),
647 dma_unmap_len(&txq->meta[index], len),
648 PCI_DMA_TODEVICE);
649
650 /* unmap chunks if any */
651
652 for (i = 1; i < counter; i++)
653 pci_unmap_single(dev, le32_to_cpu(tfd->tbs[i].addr),
654 le32_to_cpu(tfd->tbs[i].len), PCI_DMA_TODEVICE);
655
656 /* free SKB */
657 if (txq->txb) {
658 struct sk_buff *skb;
659
660 skb = txq->txb[txq->q.read_ptr].skb;
661
662 /* can be called from irqs-disabled context */
663 if (skb) {
664 dev_kfree_skb_any(skb);
665 txq->txb[txq->q.read_ptr].skb = NULL;
666 }
667 }
668}
669
670/**
671 * iwl3945_hw_build_tx_cmd_rate - Add rate portion to TX_CMD:
672 *
673*/
674void iwl3945_hw_build_tx_cmd_rate(struct iwl_priv *priv,
675 struct iwl_device_cmd *cmd,
676 struct ieee80211_tx_info *info,
677 struct ieee80211_hdr *hdr,
678 int sta_id, int tx_id)
679{
680 u16 hw_value = ieee80211_get_tx_rate(priv->hw, info)->hw_value;
681 u16 rate_index = min(hw_value & 0xffff, IWL_RATE_COUNT_3945);
682 u16 rate_mask;
683 int rate;
684 u8 rts_retry_limit;
685 u8 data_retry_limit;
686 __le32 tx_flags;
687 __le16 fc = hdr->frame_control;
688 struct iwl3945_tx_cmd *tx_cmd = (struct iwl3945_tx_cmd *)cmd->cmd.payload;
689
690 rate = iwl3945_rates[rate_index].plcp;
691 tx_flags = tx_cmd->tx_flags;
692
693 /* We need to figure out how to get the sta->supp_rates while
694 * in this running context */
695 rate_mask = IWL_RATES_MASK_3945;
696
697 /* Set retry limit on DATA packets and Probe Responses*/
698 if (ieee80211_is_probe_resp(fc))
699 data_retry_limit = 3;
700 else
701 data_retry_limit = IWL_DEFAULT_TX_RETRY;
702 tx_cmd->data_retry_limit = data_retry_limit;
703
704 if (tx_id >= IWL39_CMD_QUEUE_NUM)
705 rts_retry_limit = 3;
706 else
707 rts_retry_limit = 7;
708
709 if (data_retry_limit < rts_retry_limit)
710 rts_retry_limit = data_retry_limit;
711 tx_cmd->rts_retry_limit = rts_retry_limit;
712
713 tx_cmd->rate = rate;
714 tx_cmd->tx_flags = tx_flags;
715
716 /* OFDM */
717 tx_cmd->supp_rates[0] =
718 ((rate_mask & IWL_OFDM_RATES_MASK) >> IWL_FIRST_OFDM_RATE) & 0xFF;
719
720 /* CCK */
721 tx_cmd->supp_rates[1] = (rate_mask & 0xF);
722
723 IWL_DEBUG_RATE(priv, "Tx sta id: %d, rate: %d (plcp), flags: 0x%4X "
724 "cck/ofdm mask: 0x%x/0x%x\n", sta_id,
725 tx_cmd->rate, le32_to_cpu(tx_cmd->tx_flags),
726 tx_cmd->supp_rates[1], tx_cmd->supp_rates[0]);
727}
728
729static u8 iwl3945_sync_sta(struct iwl_priv *priv, int sta_id, u16 tx_rate)
730{
731 unsigned long flags_spin;
732 struct iwl_station_entry *station;
733
734 if (sta_id == IWL_INVALID_STATION)
735 return IWL_INVALID_STATION;
736
737 spin_lock_irqsave(&priv->sta_lock, flags_spin);
738 station = &priv->stations[sta_id];
739
740 station->sta.sta.modify_mask = STA_MODIFY_TX_RATE_MSK;
741 station->sta.rate_n_flags = cpu_to_le16(tx_rate);
742 station->sta.mode = STA_CONTROL_MODIFY_MSK;
743 iwl_legacy_send_add_sta(priv, &station->sta, CMD_ASYNC);
744 spin_unlock_irqrestore(&priv->sta_lock, flags_spin);
745
746 IWL_DEBUG_RATE(priv, "SCALE sync station %d to rate %d\n",
747 sta_id, tx_rate);
748 return sta_id;
749}
750
751static void iwl3945_set_pwr_vmain(struct iwl_priv *priv)
752{
753/*
754 * (for documentation purposes)
755 * to set power to V_AUX, do
756
757 if (pci_pme_capable(priv->pci_dev, PCI_D3cold)) {
758 iwl_legacy_set_bits_mask_prph(priv, APMG_PS_CTRL_REG,
759 APMG_PS_CTRL_VAL_PWR_SRC_VAUX,
760 ~APMG_PS_CTRL_MSK_PWR_SRC);
761
762 iwl_poll_bit(priv, CSR_GPIO_IN,
763 CSR_GPIO_IN_VAL_VAUX_PWR_SRC,
764 CSR_GPIO_IN_BIT_AUX_POWER, 5000);
765 }
766 */
767
768 iwl_legacy_set_bits_mask_prph(priv, APMG_PS_CTRL_REG,
769 APMG_PS_CTRL_VAL_PWR_SRC_VMAIN,
770 ~APMG_PS_CTRL_MSK_PWR_SRC);
771
772 iwl_poll_bit(priv, CSR_GPIO_IN, CSR_GPIO_IN_VAL_VMAIN_PWR_SRC,
773 CSR_GPIO_IN_BIT_AUX_POWER, 5000); /* uS */
774}
775
776static int iwl3945_rx_init(struct iwl_priv *priv, struct iwl_rx_queue *rxq)
777{
778 iwl_legacy_write_direct32(priv, FH39_RCSR_RBD_BASE(0), rxq->bd_dma);
779 iwl_legacy_write_direct32(priv, FH39_RCSR_RPTR_ADDR(0),
780 rxq->rb_stts_dma);
781 iwl_legacy_write_direct32(priv, FH39_RCSR_WPTR(0), 0);
782 iwl_legacy_write_direct32(priv, FH39_RCSR_CONFIG(0),
783 FH39_RCSR_RX_CONFIG_REG_VAL_DMA_CHNL_EN_ENABLE |
784 FH39_RCSR_RX_CONFIG_REG_VAL_RDRBD_EN_ENABLE |
785 FH39_RCSR_RX_CONFIG_REG_BIT_WR_STTS_EN |
786 FH39_RCSR_RX_CONFIG_REG_VAL_MAX_FRAG_SIZE_128 |
787 (RX_QUEUE_SIZE_LOG << FH39_RCSR_RX_CONFIG_REG_POS_RBDC_SIZE) |
788 FH39_RCSR_RX_CONFIG_REG_VAL_IRQ_DEST_INT_HOST |
789 (1 << FH39_RCSR_RX_CONFIG_REG_POS_IRQ_RBTH) |
790 FH39_RCSR_RX_CONFIG_REG_VAL_MSG_MODE_FH);
791
792 /* fake read to flush all prev I/O */
793 iwl_legacy_read_direct32(priv, FH39_RSSR_CTRL);
794
795 return 0;
796}
797
798static int iwl3945_tx_reset(struct iwl_priv *priv)
799{
800
801 /* bypass mode */
802 iwl_legacy_write_prph(priv, ALM_SCD_MODE_REG, 0x2);
803
804 /* RA 0 is active */
805 iwl_legacy_write_prph(priv, ALM_SCD_ARASTAT_REG, 0x01);
806
807 /* all 6 fifo are active */
808 iwl_legacy_write_prph(priv, ALM_SCD_TXFACT_REG, 0x3f);
809
810 iwl_legacy_write_prph(priv, ALM_SCD_SBYP_MODE_1_REG, 0x010000);
811 iwl_legacy_write_prph(priv, ALM_SCD_SBYP_MODE_2_REG, 0x030002);
812 iwl_legacy_write_prph(priv, ALM_SCD_TXF4MF_REG, 0x000004);
813 iwl_legacy_write_prph(priv, ALM_SCD_TXF5MF_REG, 0x000005);
814
815 iwl_legacy_write_direct32(priv, FH39_TSSR_CBB_BASE,
816 priv->_3945.shared_phys);
817
818 iwl_legacy_write_direct32(priv, FH39_TSSR_MSG_CONFIG,
819 FH39_TSSR_TX_MSG_CONFIG_REG_VAL_SNOOP_RD_TXPD_ON |
820 FH39_TSSR_TX_MSG_CONFIG_REG_VAL_ORDER_RD_TXPD_ON |
821 FH39_TSSR_TX_MSG_CONFIG_REG_VAL_MAX_FRAG_SIZE_128B |
822 FH39_TSSR_TX_MSG_CONFIG_REG_VAL_SNOOP_RD_TFD_ON |
823 FH39_TSSR_TX_MSG_CONFIG_REG_VAL_ORDER_RD_CBB_ON |
824 FH39_TSSR_TX_MSG_CONFIG_REG_VAL_ORDER_RSP_WAIT_TH |
825 FH39_TSSR_TX_MSG_CONFIG_REG_VAL_RSP_WAIT_TH);
826
827
828 return 0;
829}
830
831/**
832 * iwl3945_txq_ctx_reset - Reset TX queue context
833 *
834 * Destroys all DMA structures and initialize them again
835 */
836static int iwl3945_txq_ctx_reset(struct iwl_priv *priv)
837{
838 int rc;
839 int txq_id, slots_num;
840
841 iwl3945_hw_txq_ctx_free(priv);
842
843 /* allocate tx queue structure */
844 rc = iwl_legacy_alloc_txq_mem(priv);
845 if (rc)
846 return rc;
847
848 /* Tx CMD queue */
849 rc = iwl3945_tx_reset(priv);
850 if (rc)
851 goto error;
852
853 /* Tx queue(s) */
854 for (txq_id = 0; txq_id < priv->hw_params.max_txq_num; txq_id++) {
855 slots_num = (txq_id == IWL39_CMD_QUEUE_NUM) ?
856 TFD_CMD_SLOTS : TFD_TX_CMD_SLOTS;
857 rc = iwl_legacy_tx_queue_init(priv, &priv->txq[txq_id],
858 slots_num, txq_id);
859 if (rc) {
860 IWL_ERR(priv, "Tx %d queue init failed\n", txq_id);
861 goto error;
862 }
863 }
864
865 return rc;
866
867 error:
868 iwl3945_hw_txq_ctx_free(priv);
869 return rc;
870}
871
872
873/*
874 * Start up 3945's basic functionality after it has been reset
875 * (e.g. after platform boot, or shutdown via iwl_legacy_apm_stop())
876 * NOTE: This does not load uCode nor start the embedded processor
877 */
878static int iwl3945_apm_init(struct iwl_priv *priv)
879{
880 int ret = iwl_legacy_apm_init(priv);
881
882 /* Clear APMG (NIC's internal power management) interrupts */
883 iwl_legacy_write_prph(priv, APMG_RTC_INT_MSK_REG, 0x0);
884 iwl_legacy_write_prph(priv, APMG_RTC_INT_STT_REG, 0xFFFFFFFF);
885
886 /* Reset radio chip */
887 iwl_legacy_set_bits_prph(priv, APMG_PS_CTRL_REG,
888 APMG_PS_CTRL_VAL_RESET_REQ);
889 udelay(5);
890 iwl_legacy_clear_bits_prph(priv, APMG_PS_CTRL_REG,
891 APMG_PS_CTRL_VAL_RESET_REQ);
892
893 return ret;
894}
895
896static void iwl3945_nic_config(struct iwl_priv *priv)
897{
898 struct iwl3945_eeprom *eeprom = (struct iwl3945_eeprom *)priv->eeprom;
899 unsigned long flags;
900 u8 rev_id = priv->pci_dev->revision;
901
902 spin_lock_irqsave(&priv->lock, flags);
903
904 /* Determine HW type */
905 IWL_DEBUG_INFO(priv, "HW Revision ID = 0x%X\n", rev_id);
906
907 if (rev_id & PCI_CFG_REV_ID_BIT_RTP)
908 IWL_DEBUG_INFO(priv, "RTP type\n");
909 else if (rev_id & PCI_CFG_REV_ID_BIT_BASIC_SKU) {
910 IWL_DEBUG_INFO(priv, "3945 RADIO-MB type\n");
911 iwl_legacy_set_bit(priv, CSR_HW_IF_CONFIG_REG,
912 CSR39_HW_IF_CONFIG_REG_BIT_3945_MB);
913 } else {
914 IWL_DEBUG_INFO(priv, "3945 RADIO-MM type\n");
915 iwl_legacy_set_bit(priv, CSR_HW_IF_CONFIG_REG,
916 CSR39_HW_IF_CONFIG_REG_BIT_3945_MM);
917 }
918
919 if (EEPROM_SKU_CAP_OP_MODE_MRC == eeprom->sku_cap) {
920 IWL_DEBUG_INFO(priv, "SKU OP mode is mrc\n");
921 iwl_legacy_set_bit(priv, CSR_HW_IF_CONFIG_REG,
922 CSR39_HW_IF_CONFIG_REG_BIT_SKU_MRC);
923 } else
924 IWL_DEBUG_INFO(priv, "SKU OP mode is basic\n");
925
926 if ((eeprom->board_revision & 0xF0) == 0xD0) {
927 IWL_DEBUG_INFO(priv, "3945ABG revision is 0x%X\n",
928 eeprom->board_revision);
929 iwl_legacy_set_bit(priv, CSR_HW_IF_CONFIG_REG,
930 CSR39_HW_IF_CONFIG_REG_BIT_BOARD_TYPE);
931 } else {
932 IWL_DEBUG_INFO(priv, "3945ABG revision is 0x%X\n",
933 eeprom->board_revision);
934 iwl_legacy_clear_bit(priv, CSR_HW_IF_CONFIG_REG,
935 CSR39_HW_IF_CONFIG_REG_BIT_BOARD_TYPE);
936 }
937
938 if (eeprom->almgor_m_version <= 1) {
939 iwl_legacy_set_bit(priv, CSR_HW_IF_CONFIG_REG,
940 CSR39_HW_IF_CONFIG_REG_BITS_SILICON_TYPE_A);
941 IWL_DEBUG_INFO(priv, "Card M type A version is 0x%X\n",
942 eeprom->almgor_m_version);
943 } else {
944 IWL_DEBUG_INFO(priv, "Card M type B version is 0x%X\n",
945 eeprom->almgor_m_version);
946 iwl_legacy_set_bit(priv, CSR_HW_IF_CONFIG_REG,
947 CSR39_HW_IF_CONFIG_REG_BITS_SILICON_TYPE_B);
948 }
949 spin_unlock_irqrestore(&priv->lock, flags);
950
951 if (eeprom->sku_cap & EEPROM_SKU_CAP_SW_RF_KILL_ENABLE)
952 IWL_DEBUG_RF_KILL(priv, "SW RF KILL supported in EEPROM.\n");
953
954 if (eeprom->sku_cap & EEPROM_SKU_CAP_HW_RF_KILL_ENABLE)
955 IWL_DEBUG_RF_KILL(priv, "HW RF KILL supported in EEPROM.\n");
956}
957
958int iwl3945_hw_nic_init(struct iwl_priv *priv)
959{
960 int rc;
961 unsigned long flags;
962 struct iwl_rx_queue *rxq = &priv->rxq;
963
964 spin_lock_irqsave(&priv->lock, flags);
965 priv->cfg->ops->lib->apm_ops.init(priv);
966 spin_unlock_irqrestore(&priv->lock, flags);
967
968 iwl3945_set_pwr_vmain(priv);
969
970 priv->cfg->ops->lib->apm_ops.config(priv);
971
972 /* Allocate the RX queue, or reset if it is already allocated */
973 if (!rxq->bd) {
974 rc = iwl_legacy_rx_queue_alloc(priv);
975 if (rc) {
976 IWL_ERR(priv, "Unable to initialize Rx queue\n");
977 return -ENOMEM;
978 }
979 } else
980 iwl3945_rx_queue_reset(priv, rxq);
981
982 iwl3945_rx_replenish(priv);
983
984 iwl3945_rx_init(priv, rxq);
985
986
987 /* Look at using this instead:
988 rxq->need_update = 1;
989 iwl_legacy_rx_queue_update_write_ptr(priv, rxq);
990 */
991
992 iwl_legacy_write_direct32(priv, FH39_RCSR_WPTR(0), rxq->write & ~7);
993
994 rc = iwl3945_txq_ctx_reset(priv);
995 if (rc)
996 return rc;
997
998 set_bit(STATUS_INIT, &priv->status);
999
1000 return 0;
1001}
1002
1003/**
1004 * iwl3945_hw_txq_ctx_free - Free TXQ Context
1005 *
1006 * Destroy all TX DMA queues and structures
1007 */
1008void iwl3945_hw_txq_ctx_free(struct iwl_priv *priv)
1009{
1010 int txq_id;
1011
1012 /* Tx queues */
1013 if (priv->txq)
1014 for (txq_id = 0; txq_id < priv->hw_params.max_txq_num;
1015 txq_id++)
1016 if (txq_id == IWL39_CMD_QUEUE_NUM)
1017 iwl_legacy_cmd_queue_free(priv);
1018 else
1019 iwl_legacy_tx_queue_free(priv, txq_id);
1020
1021 /* free tx queue structure */
1022 iwl_legacy_txq_mem(priv);
1023}
1024
1025void iwl3945_hw_txq_ctx_stop(struct iwl_priv *priv)
1026{
1027 int txq_id;
1028
1029 /* stop SCD */
1030 iwl_legacy_write_prph(priv, ALM_SCD_MODE_REG, 0);
1031 iwl_legacy_write_prph(priv, ALM_SCD_TXFACT_REG, 0);
1032
1033 /* reset TFD queues */
1034 for (txq_id = 0; txq_id < priv->hw_params.max_txq_num; txq_id++) {
1035 iwl_legacy_write_direct32(priv, FH39_TCSR_CONFIG(txq_id), 0x0);
1036 iwl_poll_direct_bit(priv, FH39_TSSR_TX_STATUS,
1037 FH39_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(txq_id),
1038 1000);
1039 }
1040
1041 iwl3945_hw_txq_ctx_free(priv);
1042}
1043
1044/**
1045 * iwl3945_hw_reg_adjust_power_by_temp
1046 * return index delta into power gain settings table
1047*/
1048static int iwl3945_hw_reg_adjust_power_by_temp(int new_reading, int old_reading)
1049{
1050 return (new_reading - old_reading) * (-11) / 100;
1051}
1052
1053/**
1054 * iwl3945_hw_reg_temp_out_of_range - Keep temperature in sane range
1055 */
1056static inline int iwl3945_hw_reg_temp_out_of_range(int temperature)
1057{
1058 return ((temperature < -260) || (temperature > 25)) ? 1 : 0;
1059}
1060
1061int iwl3945_hw_get_temperature(struct iwl_priv *priv)
1062{
1063 return iwl_read32(priv, CSR_UCODE_DRV_GP2);
1064}
1065
1066/**
1067 * iwl3945_hw_reg_txpower_get_temperature
1068 * get the current temperature by reading from NIC
1069*/
1070static int iwl3945_hw_reg_txpower_get_temperature(struct iwl_priv *priv)
1071{
1072 struct iwl3945_eeprom *eeprom = (struct iwl3945_eeprom *)priv->eeprom;
1073 int temperature;
1074
1075 temperature = iwl3945_hw_get_temperature(priv);
1076
1077 /* driver's okay range is -260 to +25.
1078 * human readable okay range is 0 to +285 */
1079 IWL_DEBUG_INFO(priv, "Temperature: %d\n", temperature + IWL_TEMP_CONVERT);
1080
1081 /* handle insane temp reading */
1082 if (iwl3945_hw_reg_temp_out_of_range(temperature)) {
1083 IWL_ERR(priv, "Error bad temperature value %d\n", temperature);
1084
1085 /* if really really hot(?),
1086 * substitute the 3rd band/group's temp measured at factory */
1087 if (priv->last_temperature > 100)
1088 temperature = eeprom->groups[2].temperature;
1089 else /* else use most recent "sane" value from driver */
1090 temperature = priv->last_temperature;
1091 }
1092
1093 return temperature; /* raw, not "human readable" */
1094}
1095
1096/* Adjust Txpower only if temperature variance is greater than threshold.
1097 *
1098 * Both are lower than older versions' 9 degrees */
1099#define IWL_TEMPERATURE_LIMIT_TIMER 6
1100
1101/**
1102 * iwl3945_is_temp_calib_needed - determines if new calibration is needed
1103 *
1104 * records new temperature in tx_mgr->temperature.
1105 * replaces tx_mgr->last_temperature *only* if calib needed
1106 * (assumes caller will actually do the calibration!). */
1107static int iwl3945_is_temp_calib_needed(struct iwl_priv *priv)
1108{
1109 int temp_diff;
1110
1111 priv->temperature = iwl3945_hw_reg_txpower_get_temperature(priv);
1112 temp_diff = priv->temperature - priv->last_temperature;
1113
1114 /* get absolute value */
1115 if (temp_diff < 0) {
1116 IWL_DEBUG_POWER(priv, "Getting cooler, delta %d,\n", temp_diff);
1117 temp_diff = -temp_diff;
1118 } else if (temp_diff == 0)
1119 IWL_DEBUG_POWER(priv, "Same temp,\n");
1120 else
1121 IWL_DEBUG_POWER(priv, "Getting warmer, delta %d,\n", temp_diff);
1122
1123 /* if we don't need calibration, *don't* update last_temperature */
1124 if (temp_diff < IWL_TEMPERATURE_LIMIT_TIMER) {
1125 IWL_DEBUG_POWER(priv, "Timed thermal calib not needed\n");
1126 return 0;
1127 }
1128
1129 IWL_DEBUG_POWER(priv, "Timed thermal calib needed\n");
1130
1131 /* assume that caller will actually do calib ...
1132 * update the "last temperature" value */
1133 priv->last_temperature = priv->temperature;
1134 return 1;
1135}
1136
1137#define IWL_MAX_GAIN_ENTRIES 78
1138#define IWL_CCK_FROM_OFDM_POWER_DIFF -5
1139#define IWL_CCK_FROM_OFDM_INDEX_DIFF (10)
1140
1141/* radio and DSP power table, each step is 1/2 dB.
1142 * 1st number is for RF analog gain, 2nd number is for DSP pre-DAC gain. */
1143static struct iwl3945_tx_power power_gain_table[2][IWL_MAX_GAIN_ENTRIES] = {
1144 {
1145 {251, 127}, /* 2.4 GHz, highest power */
1146 {251, 127},
1147 {251, 127},
1148 {251, 127},
1149 {251, 125},
1150 {251, 110},
1151 {251, 105},
1152 {251, 98},
1153 {187, 125},
1154 {187, 115},
1155 {187, 108},
1156 {187, 99},
1157 {243, 119},
1158 {243, 111},
1159 {243, 105},
1160 {243, 97},
1161 {243, 92},
1162 {211, 106},
1163 {211, 100},
1164 {179, 120},
1165 {179, 113},
1166 {179, 107},
1167 {147, 125},
1168 {147, 119},
1169 {147, 112},
1170 {147, 106},
1171 {147, 101},
1172 {147, 97},
1173 {147, 91},
1174 {115, 107},
1175 {235, 121},
1176 {235, 115},
1177 {235, 109},
1178 {203, 127},
1179 {203, 121},
1180 {203, 115},
1181 {203, 108},
1182 {203, 102},
1183 {203, 96},
1184 {203, 92},
1185 {171, 110},
1186 {171, 104},
1187 {171, 98},
1188 {139, 116},
1189 {227, 125},
1190 {227, 119},
1191 {227, 113},
1192 {227, 107},
1193 {227, 101},
1194 {227, 96},
1195 {195, 113},
1196 {195, 106},
1197 {195, 102},
1198 {195, 95},
1199 {163, 113},
1200 {163, 106},
1201 {163, 102},
1202 {163, 95},
1203 {131, 113},
1204 {131, 106},
1205 {131, 102},
1206 {131, 95},
1207 {99, 113},
1208 {99, 106},
1209 {99, 102},
1210 {99, 95},
1211 {67, 113},
1212 {67, 106},
1213 {67, 102},
1214 {67, 95},
1215 {35, 113},
1216 {35, 106},
1217 {35, 102},
1218 {35, 95},
1219 {3, 113},
1220 {3, 106},
1221 {3, 102},
1222 {3, 95} }, /* 2.4 GHz, lowest power */
1223 {
1224 {251, 127}, /* 5.x GHz, highest power */
1225 {251, 120},
1226 {251, 114},
1227 {219, 119},
1228 {219, 101},
1229 {187, 113},
1230 {187, 102},
1231 {155, 114},
1232 {155, 103},
1233 {123, 117},
1234 {123, 107},
1235 {123, 99},
1236 {123, 92},
1237 {91, 108},
1238 {59, 125},
1239 {59, 118},
1240 {59, 109},
1241 {59, 102},
1242 {59, 96},
1243 {59, 90},
1244 {27, 104},
1245 {27, 98},
1246 {27, 92},
1247 {115, 118},
1248 {115, 111},
1249 {115, 104},
1250 {83, 126},
1251 {83, 121},
1252 {83, 113},
1253 {83, 105},
1254 {83, 99},
1255 {51, 118},
1256 {51, 111},
1257 {51, 104},
1258 {51, 98},
1259 {19, 116},
1260 {19, 109},
1261 {19, 102},
1262 {19, 98},
1263 {19, 93},
1264 {171, 113},
1265 {171, 107},
1266 {171, 99},
1267 {139, 120},
1268 {139, 113},
1269 {139, 107},
1270 {139, 99},
1271 {107, 120},
1272 {107, 113},
1273 {107, 107},
1274 {107, 99},
1275 {75, 120},
1276 {75, 113},
1277 {75, 107},
1278 {75, 99},
1279 {43, 120},
1280 {43, 113},
1281 {43, 107},
1282 {43, 99},
1283 {11, 120},
1284 {11, 113},
1285 {11, 107},
1286 {11, 99},
1287 {131, 107},
1288 {131, 99},
1289 {99, 120},
1290 {99, 113},
1291 {99, 107},
1292 {99, 99},
1293 {67, 120},
1294 {67, 113},
1295 {67, 107},
1296 {67, 99},
1297 {35, 120},
1298 {35, 113},
1299 {35, 107},
1300 {35, 99},
1301 {3, 120} } /* 5.x GHz, lowest power */
1302};
1303
1304static inline u8 iwl3945_hw_reg_fix_power_index(int index)
1305{
1306 if (index < 0)
1307 return 0;
1308 if (index >= IWL_MAX_GAIN_ENTRIES)
1309 return IWL_MAX_GAIN_ENTRIES - 1;
1310 return (u8) index;
1311}
1312
1313/* Kick off thermal recalibration check every 60 seconds */
1314#define REG_RECALIB_PERIOD (60)
1315
1316/**
1317 * iwl3945_hw_reg_set_scan_power - Set Tx power for scan probe requests
1318 *
1319 * Set (in our channel info database) the direct scan Tx power for 1 Mbit (CCK)
1320 * or 6 Mbit (OFDM) rates.
1321 */
1322static void iwl3945_hw_reg_set_scan_power(struct iwl_priv *priv, u32 scan_tbl_index,
1323 s32 rate_index, const s8 *clip_pwrs,
1324 struct iwl_channel_info *ch_info,
1325 int band_index)
1326{
1327 struct iwl3945_scan_power_info *scan_power_info;
1328 s8 power;
1329 u8 power_index;
1330
1331 scan_power_info = &ch_info->scan_pwr_info[scan_tbl_index];
1332
1333 /* use this channel group's 6Mbit clipping/saturation pwr,
1334 * but cap at regulatory scan power restriction (set during init
1335 * based on eeprom channel data) for this channel. */
1336 power = min(ch_info->scan_power, clip_pwrs[IWL_RATE_6M_INDEX_TABLE]);
1337
1338 power = min(power, priv->tx_power_user_lmt);
1339 scan_power_info->requested_power = power;
1340
1341 /* find difference between new scan *power* and current "normal"
1342 * Tx *power* for 6Mb. Use this difference (x2) to adjust the
1343 * current "normal" temperature-compensated Tx power *index* for
1344 * this rate (1Mb or 6Mb) to yield new temp-compensated scan power
1345 * *index*. */
1346 power_index = ch_info->power_info[rate_index].power_table_index
1347 - (power - ch_info->power_info
1348 [IWL_RATE_6M_INDEX_TABLE].requested_power) * 2;
1349
1350 /* store reference index that we use when adjusting *all* scan
1351 * powers. So we can accommodate user (all channel) or spectrum
1352 * management (single channel) power changes "between" temperature
1353 * feedback compensation procedures.
1354 * don't force fit this reference index into gain table; it may be a
1355 * negative number. This will help avoid errors when we're at
1356 * the lower bounds (highest gains, for warmest temperatures)
1357 * of the table. */
1358
1359 /* don't exceed table bounds for "real" setting */
1360 power_index = iwl3945_hw_reg_fix_power_index(power_index);
1361
1362 scan_power_info->power_table_index = power_index;
1363 scan_power_info->tpc.tx_gain =
1364 power_gain_table[band_index][power_index].tx_gain;
1365 scan_power_info->tpc.dsp_atten =
1366 power_gain_table[band_index][power_index].dsp_atten;
1367}
1368
1369/**
1370 * iwl3945_send_tx_power - fill in Tx Power command with gain settings
1371 *
1372 * Configures power settings for all rates for the current channel,
1373 * using values from channel info struct, and send to NIC
1374 */
1375static int iwl3945_send_tx_power(struct iwl_priv *priv)
1376{
1377 int rate_idx, i;
1378 const struct iwl_channel_info *ch_info = NULL;
1379 struct iwl3945_txpowertable_cmd txpower = {
1380 .channel = priv->contexts[IWL_RXON_CTX_BSS].active.channel,
1381 };
1382 u16 chan;
1383
1384 if (WARN_ONCE(test_bit(STATUS_SCAN_HW, &priv->status),
1385 "TX Power requested while scanning!\n"))
1386 return -EAGAIN;
1387
1388 chan = le16_to_cpu(priv->contexts[IWL_RXON_CTX_BSS].active.channel);
1389
1390 txpower.band = (priv->band == IEEE80211_BAND_5GHZ) ? 0 : 1;
1391 ch_info = iwl_legacy_get_channel_info(priv, priv->band, chan);
1392 if (!ch_info) {
1393 IWL_ERR(priv,
1394 "Failed to get channel info for channel %d [%d]\n",
1395 chan, priv->band);
1396 return -EINVAL;
1397 }
1398
1399 if (!iwl_legacy_is_channel_valid(ch_info)) {
1400 IWL_DEBUG_POWER(priv, "Not calling TX_PWR_TABLE_CMD on "
1401 "non-Tx channel.\n");
1402 return 0;
1403 }
1404
1405 /* fill cmd with power settings for all rates for current channel */
1406 /* Fill OFDM rate */
1407 for (rate_idx = IWL_FIRST_OFDM_RATE, i = 0;
1408 rate_idx <= IWL39_LAST_OFDM_RATE; rate_idx++, i++) {
1409
1410 txpower.power[i].tpc = ch_info->power_info[i].tpc;
1411 txpower.power[i].rate = iwl3945_rates[rate_idx].plcp;
1412
1413 IWL_DEBUG_POWER(priv, "ch %d:%d rf %d dsp %3d rate code 0x%02x\n",
1414 le16_to_cpu(txpower.channel),
1415 txpower.band,
1416 txpower.power[i].tpc.tx_gain,
1417 txpower.power[i].tpc.dsp_atten,
1418 txpower.power[i].rate);
1419 }
1420 /* Fill CCK rates */
1421 for (rate_idx = IWL_FIRST_CCK_RATE;
1422 rate_idx <= IWL_LAST_CCK_RATE; rate_idx++, i++) {
1423 txpower.power[i].tpc = ch_info->power_info[i].tpc;
1424 txpower.power[i].rate = iwl3945_rates[rate_idx].plcp;
1425
1426 IWL_DEBUG_POWER(priv, "ch %d:%d rf %d dsp %3d rate code 0x%02x\n",
1427 le16_to_cpu(txpower.channel),
1428 txpower.band,
1429 txpower.power[i].tpc.tx_gain,
1430 txpower.power[i].tpc.dsp_atten,
1431 txpower.power[i].rate);
1432 }
1433
1434 return iwl_legacy_send_cmd_pdu(priv, REPLY_TX_PWR_TABLE_CMD,
1435 sizeof(struct iwl3945_txpowertable_cmd),
1436 &txpower);
1437
1438}
1439
1440/**
1441 * iwl3945_hw_reg_set_new_power - Configures power tables at new levels
1442 * @ch_info: Channel to update. Uses power_info.requested_power.
1443 *
1444 * Replace requested_power and base_power_index ch_info fields for
1445 * one channel.
1446 *
1447 * Called if user or spectrum management changes power preferences.
1448 * Takes into account h/w and modulation limitations (clip power).
1449 *
1450 * This does *not* send anything to NIC, just sets up ch_info for one channel.
1451 *
1452 * NOTE: reg_compensate_for_temperature_dif() *must* be run after this to
1453 * properly fill out the scan powers, and actual h/w gain settings,
1454 * and send changes to NIC
1455 */
1456static int iwl3945_hw_reg_set_new_power(struct iwl_priv *priv,
1457 struct iwl_channel_info *ch_info)
1458{
1459 struct iwl3945_channel_power_info *power_info;
1460 int power_changed = 0;
1461 int i;
1462 const s8 *clip_pwrs;
1463 int power;
1464
1465 /* Get this chnlgrp's rate-to-max/clip-powers table */
1466 clip_pwrs = priv->_3945.clip_groups[ch_info->group_index].clip_powers;
1467
1468 /* Get this channel's rate-to-current-power settings table */
1469 power_info = ch_info->power_info;
1470
1471 /* update OFDM Txpower settings */
1472 for (i = IWL_RATE_6M_INDEX_TABLE; i <= IWL_RATE_54M_INDEX_TABLE;
1473 i++, ++power_info) {
1474 int delta_idx;
1475
1476 /* limit new power to be no more than h/w capability */
1477 power = min(ch_info->curr_txpow, clip_pwrs[i]);
1478 if (power == power_info->requested_power)
1479 continue;
1480
1481 /* find difference between old and new requested powers,
1482 * update base (non-temp-compensated) power index */
1483 delta_idx = (power - power_info->requested_power) * 2;
1484 power_info->base_power_index -= delta_idx;
1485
1486 /* save new requested power value */
1487 power_info->requested_power = power;
1488
1489 power_changed = 1;
1490 }
1491
1492 /* update CCK Txpower settings, based on OFDM 12M setting ...
1493 * ... all CCK power settings for a given channel are the *same*. */
1494 if (power_changed) {
1495 power =
1496 ch_info->power_info[IWL_RATE_12M_INDEX_TABLE].
1497 requested_power + IWL_CCK_FROM_OFDM_POWER_DIFF;
1498
1499 /* do all CCK rates' iwl3945_channel_power_info structures */
1500 for (i = IWL_RATE_1M_INDEX_TABLE; i <= IWL_RATE_11M_INDEX_TABLE; i++) {
1501 power_info->requested_power = power;
1502 power_info->base_power_index =
1503 ch_info->power_info[IWL_RATE_12M_INDEX_TABLE].
1504 base_power_index + IWL_CCK_FROM_OFDM_INDEX_DIFF;
1505 ++power_info;
1506 }
1507 }
1508
1509 return 0;
1510}
1511
1512/**
1513 * iwl3945_hw_reg_get_ch_txpower_limit - returns new power limit for channel
1514 *
1515 * NOTE: Returned power limit may be less (but not more) than requested,
1516 * based strictly on regulatory (eeprom and spectrum mgt) limitations
1517 * (no consideration for h/w clipping limitations).
1518 */
1519static int iwl3945_hw_reg_get_ch_txpower_limit(struct iwl_channel_info *ch_info)
1520{
1521 s8 max_power;
1522
1523#if 0
1524 /* if we're using TGd limits, use lower of TGd or EEPROM */
1525 if (ch_info->tgd_data.max_power != 0)
1526 max_power = min(ch_info->tgd_data.max_power,
1527 ch_info->eeprom.max_power_avg);
1528
1529 /* else just use EEPROM limits */
1530 else
1531#endif
1532 max_power = ch_info->eeprom.max_power_avg;
1533
1534 return min(max_power, ch_info->max_power_avg);
1535}
1536
1537/**
1538 * iwl3945_hw_reg_comp_txpower_temp - Compensate for temperature
1539 *
1540 * Compensate txpower settings of *all* channels for temperature.
1541 * This only accounts for the difference between current temperature
1542 * and the factory calibration temperatures, and bases the new settings
1543 * on the channel's base_power_index.
1544 *
1545 * If RxOn is "associated", this sends the new Txpower to NIC!
1546 */
1547static int iwl3945_hw_reg_comp_txpower_temp(struct iwl_priv *priv)
1548{
1549 struct iwl_channel_info *ch_info = NULL;
1550 struct iwl3945_eeprom *eeprom = (struct iwl3945_eeprom *)priv->eeprom;
1551 int delta_index;
1552 const s8 *clip_pwrs; /* array of h/w max power levels for each rate */
1553 u8 a_band;
1554 u8 rate_index;
1555 u8 scan_tbl_index;
1556 u8 i;
1557 int ref_temp;
1558 int temperature = priv->temperature;
1559
1560 if (priv->disable_tx_power_cal ||
1561 test_bit(STATUS_SCANNING, &priv->status)) {
1562 /* do not perform tx power calibration */
1563 return 0;
1564 }
1565 /* set up new Tx power info for each and every channel, 2.4 and 5.x */
1566 for (i = 0; i < priv->channel_count; i++) {
1567 ch_info = &priv->channel_info[i];
1568 a_band = iwl_legacy_is_channel_a_band(ch_info);
1569
1570 /* Get this chnlgrp's factory calibration temperature */
1571 ref_temp = (s16)eeprom->groups[ch_info->group_index].
1572 temperature;
1573
1574 /* get power index adjustment based on current and factory
1575 * temps */
1576 delta_index = iwl3945_hw_reg_adjust_power_by_temp(temperature,
1577 ref_temp);
1578
1579 /* set tx power value for all rates, OFDM and CCK */
1580 for (rate_index = 0; rate_index < IWL_RATE_COUNT_3945;
1581 rate_index++) {
1582 int power_idx =
1583 ch_info->power_info[rate_index].base_power_index;
1584
1585 /* temperature compensate */
1586 power_idx += delta_index;
1587
1588 /* stay within table range */
1589 power_idx = iwl3945_hw_reg_fix_power_index(power_idx);
1590 ch_info->power_info[rate_index].
1591 power_table_index = (u8) power_idx;
1592 ch_info->power_info[rate_index].tpc =
1593 power_gain_table[a_band][power_idx];
1594 }
1595
1596 /* Get this chnlgrp's rate-to-max/clip-powers table */
1597 clip_pwrs = priv->_3945.clip_groups[ch_info->group_index].clip_powers;
1598
1599 /* set scan tx power, 1Mbit for CCK, 6Mbit for OFDM */
1600 for (scan_tbl_index = 0;
1601 scan_tbl_index < IWL_NUM_SCAN_RATES; scan_tbl_index++) {
1602 s32 actual_index = (scan_tbl_index == 0) ?
1603 IWL_RATE_1M_INDEX_TABLE : IWL_RATE_6M_INDEX_TABLE;
1604 iwl3945_hw_reg_set_scan_power(priv, scan_tbl_index,
1605 actual_index, clip_pwrs,
1606 ch_info, a_band);
1607 }
1608 }
1609
1610 /* send Txpower command for current channel to ucode */
1611 return priv->cfg->ops->lib->send_tx_power(priv);
1612}
1613
1614int iwl3945_hw_reg_set_txpower(struct iwl_priv *priv, s8 power)
1615{
1616 struct iwl_channel_info *ch_info;
1617 s8 max_power;
1618 u8 a_band;
1619 u8 i;
1620
1621 if (priv->tx_power_user_lmt == power) {
1622 IWL_DEBUG_POWER(priv, "Requested Tx power same as current "
1623 "limit: %ddBm.\n", power);
1624 return 0;
1625 }
1626
1627 IWL_DEBUG_POWER(priv, "Setting upper limit clamp to %ddBm.\n", power);
1628 priv->tx_power_user_lmt = power;
1629
1630 /* set up new Tx powers for each and every channel, 2.4 and 5.x */
1631
1632 for (i = 0; i < priv->channel_count; i++) {
1633 ch_info = &priv->channel_info[i];
1634 a_band = iwl_legacy_is_channel_a_band(ch_info);
1635
1636 /* find minimum power of all user and regulatory constraints
1637 * (does not consider h/w clipping limitations) */
1638 max_power = iwl3945_hw_reg_get_ch_txpower_limit(ch_info);
1639 max_power = min(power, max_power);
1640 if (max_power != ch_info->curr_txpow) {
1641 ch_info->curr_txpow = max_power;
1642
1643 /* this considers the h/w clipping limitations */
1644 iwl3945_hw_reg_set_new_power(priv, ch_info);
1645 }
1646 }
1647
1648 /* update txpower settings for all channels,
1649 * send to NIC if associated. */
1650 iwl3945_is_temp_calib_needed(priv);
1651 iwl3945_hw_reg_comp_txpower_temp(priv);
1652
1653 return 0;
1654}
1655
1656static int iwl3945_send_rxon_assoc(struct iwl_priv *priv,
1657 struct iwl_rxon_context *ctx)
1658{
1659 int rc = 0;
1660 struct iwl_rx_packet *pkt;
1661 struct iwl3945_rxon_assoc_cmd rxon_assoc;
1662 struct iwl_host_cmd cmd = {
1663 .id = REPLY_RXON_ASSOC,
1664 .len = sizeof(rxon_assoc),
1665 .flags = CMD_WANT_SKB,
1666 .data = &rxon_assoc,
1667 };
1668 const struct iwl_legacy_rxon_cmd *rxon1 = &ctx->staging;
1669 const struct iwl_legacy_rxon_cmd *rxon2 = &ctx->active;
1670
1671 if ((rxon1->flags == rxon2->flags) &&
1672 (rxon1->filter_flags == rxon2->filter_flags) &&
1673 (rxon1->cck_basic_rates == rxon2->cck_basic_rates) &&
1674 (rxon1->ofdm_basic_rates == rxon2->ofdm_basic_rates)) {
1675 IWL_DEBUG_INFO(priv, "Using current RXON_ASSOC. Not resending.\n");
1676 return 0;
1677 }
1678
1679 rxon_assoc.flags = ctx->staging.flags;
1680 rxon_assoc.filter_flags = ctx->staging.filter_flags;
1681 rxon_assoc.ofdm_basic_rates = ctx->staging.ofdm_basic_rates;
1682 rxon_assoc.cck_basic_rates = ctx->staging.cck_basic_rates;
1683 rxon_assoc.reserved = 0;
1684
1685 rc = iwl_legacy_send_cmd_sync(priv, &cmd);
1686 if (rc)
1687 return rc;
1688
1689 pkt = (struct iwl_rx_packet *)cmd.reply_page;
1690 if (pkt->hdr.flags & IWL_CMD_FAILED_MSK) {
1691 IWL_ERR(priv, "Bad return from REPLY_RXON_ASSOC command\n");
1692 rc = -EIO;
1693 }
1694
1695 iwl_legacy_free_pages(priv, cmd.reply_page);
1696
1697 return rc;
1698}
1699
1700/**
1701 * iwl3945_commit_rxon - commit staging_rxon to hardware
1702 *
1703 * The RXON command in staging_rxon is committed to the hardware and
1704 * the active_rxon structure is updated with the new data. This
1705 * function correctly transitions out of the RXON_ASSOC_MSK state if
1706 * a HW tune is required based on the RXON structure changes.
1707 */
1708int iwl3945_commit_rxon(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
1709{
1710 /* cast away the const for active_rxon in this function */
1711 struct iwl3945_rxon_cmd *active_rxon = (void *)&ctx->active;
1712 struct iwl3945_rxon_cmd *staging_rxon = (void *)&ctx->staging;
1713 int rc = 0;
1714 bool new_assoc = !!(staging_rxon->filter_flags & RXON_FILTER_ASSOC_MSK);
1715
1716 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
1717 return -EINVAL;
1718
1719 if (!iwl_legacy_is_alive(priv))
1720 return -1;
1721
1722 /* always get timestamp with Rx frame */
1723 staging_rxon->flags |= RXON_FLG_TSF2HOST_MSK;
1724
1725 /* select antenna */
1726 staging_rxon->flags &=
1727 ~(RXON_FLG_DIS_DIV_MSK | RXON_FLG_ANT_SEL_MSK);
1728 staging_rxon->flags |= iwl3945_get_antenna_flags(priv);
1729
1730 rc = iwl_legacy_check_rxon_cmd(priv, ctx);
1731 if (rc) {
1732 IWL_ERR(priv, "Invalid RXON configuration. Not committing.\n");
1733 return -EINVAL;
1734 }
1735
1736 /* If we don't need to send a full RXON, we can use
1737 * iwl3945_rxon_assoc_cmd which is used to reconfigure filter
1738 * and other flags for the current radio configuration. */
1739 if (!iwl_legacy_full_rxon_required(priv,
1740 &priv->contexts[IWL_RXON_CTX_BSS])) {
1741 rc = iwl_legacy_send_rxon_assoc(priv,
1742 &priv->contexts[IWL_RXON_CTX_BSS]);
1743 if (rc) {
1744 IWL_ERR(priv, "Error setting RXON_ASSOC "
1745 "configuration (%d).\n", rc);
1746 return rc;
1747 }
1748
1749 memcpy(active_rxon, staging_rxon, sizeof(*active_rxon));
1750
1751 return 0;
1752 }
1753
1754 /* If we are currently associated and the new config requires
1755 * an RXON_ASSOC and the new config wants the associated mask enabled,
1756 * we must clear the associated from the active configuration
1757 * before we apply the new config */
1758 if (iwl_legacy_is_associated(priv, IWL_RXON_CTX_BSS) && new_assoc) {
1759 IWL_DEBUG_INFO(priv, "Toggling associated bit on current RXON\n");
1760 active_rxon->filter_flags &= ~RXON_FILTER_ASSOC_MSK;
1761
1762 /*
1763 * reserved4 and 5 could have been filled by the iwlcore code.
1764 * Let's clear them before pushing to the 3945.
1765 */
1766 active_rxon->reserved4 = 0;
1767 active_rxon->reserved5 = 0;
1768 rc = iwl_legacy_send_cmd_pdu(priv, REPLY_RXON,
1769 sizeof(struct iwl3945_rxon_cmd),
1770 &priv->contexts[IWL_RXON_CTX_BSS].active);
1771
1772 /* If the mask clearing failed then we set
1773 * active_rxon back to what it was previously */
1774 if (rc) {
1775 active_rxon->filter_flags |= RXON_FILTER_ASSOC_MSK;
1776 IWL_ERR(priv, "Error clearing ASSOC_MSK on current "
1777 "configuration (%d).\n", rc);
1778 return rc;
1779 }
1780 iwl_legacy_clear_ucode_stations(priv,
1781 &priv->contexts[IWL_RXON_CTX_BSS]);
1782 iwl_legacy_restore_stations(priv,
1783 &priv->contexts[IWL_RXON_CTX_BSS]);
1784 }
1785
1786 IWL_DEBUG_INFO(priv, "Sending RXON\n"
1787 "* with%s RXON_FILTER_ASSOC_MSK\n"
1788 "* channel = %d\n"
1789 "* bssid = %pM\n",
1790 (new_assoc ? "" : "out"),
1791 le16_to_cpu(staging_rxon->channel),
1792 staging_rxon->bssid_addr);
1793
1794 /*
1795 * reserved4 and 5 could have been filled by the iwlcore code.
1796 * Let's clear them before pushing to the 3945.
1797 */
1798 staging_rxon->reserved4 = 0;
1799 staging_rxon->reserved5 = 0;
1800
1801 iwl_legacy_set_rxon_hwcrypto(priv, ctx, !iwl3945_mod_params.sw_crypto);
1802
1803 /* Apply the new configuration */
1804 rc = iwl_legacy_send_cmd_pdu(priv, REPLY_RXON,
1805 sizeof(struct iwl3945_rxon_cmd),
1806 staging_rxon);
1807 if (rc) {
1808 IWL_ERR(priv, "Error setting new configuration (%d).\n", rc);
1809 return rc;
1810 }
1811
1812 memcpy(active_rxon, staging_rxon, sizeof(*active_rxon));
1813
1814 if (!new_assoc) {
1815 iwl_legacy_clear_ucode_stations(priv,
1816 &priv->contexts[IWL_RXON_CTX_BSS]);
1817 iwl_legacy_restore_stations(priv,
1818 &priv->contexts[IWL_RXON_CTX_BSS]);
1819 }
1820
1821 /* If we issue a new RXON command which required a tune then we must
1822 * send a new TXPOWER command or we won't be able to Tx any frames */
1823 rc = iwl_legacy_set_tx_power(priv, priv->tx_power_next, true);
1824 if (rc) {
1825 IWL_ERR(priv, "Error setting Tx power (%d).\n", rc);
1826 return rc;
1827 }
1828
1829 /* Init the hardware's rate fallback order based on the band */
1830 rc = iwl3945_init_hw_rate_table(priv);
1831 if (rc) {
1832 IWL_ERR(priv, "Error setting HW rate table: %02X\n", rc);
1833 return -EIO;
1834 }
1835
1836 return 0;
1837}
1838
1839/**
1840 * iwl3945_reg_txpower_periodic - called when time to check our temperature.
1841 *
1842 * -- reset periodic timer
1843 * -- see if temp has changed enough to warrant re-calibration ... if so:
1844 * -- correct coeffs for temp (can reset temp timer)
1845 * -- save this temp as "last",
1846 * -- send new set of gain settings to NIC
1847 * NOTE: This should continue working, even when we're not associated,
1848 * so we can keep our internal table of scan powers current. */
1849void iwl3945_reg_txpower_periodic(struct iwl_priv *priv)
1850{
1851 /* This will kick in the "brute force"
1852 * iwl3945_hw_reg_comp_txpower_temp() below */
1853 if (!iwl3945_is_temp_calib_needed(priv))
1854 goto reschedule;
1855
1856 /* Set up a new set of temp-adjusted TxPowers, send to NIC.
1857 * This is based *only* on current temperature,
1858 * ignoring any previous power measurements */
1859 iwl3945_hw_reg_comp_txpower_temp(priv);
1860
1861 reschedule:
1862 queue_delayed_work(priv->workqueue,
1863 &priv->_3945.thermal_periodic, REG_RECALIB_PERIOD * HZ);
1864}
1865
1866static void iwl3945_bg_reg_txpower_periodic(struct work_struct *work)
1867{
1868 struct iwl_priv *priv = container_of(work, struct iwl_priv,
1869 _3945.thermal_periodic.work);
1870
1871 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
1872 return;
1873
1874 mutex_lock(&priv->mutex);
1875 iwl3945_reg_txpower_periodic(priv);
1876 mutex_unlock(&priv->mutex);
1877}
1878
1879/**
1880 * iwl3945_hw_reg_get_ch_grp_index - find the channel-group index (0-4)
1881 * for the channel.
1882 *
1883 * This function is used when initializing channel-info structs.
1884 *
1885 * NOTE: These channel groups do *NOT* match the bands above!
1886 * These channel groups are based on factory-tested channels;
1887 * on A-band, EEPROM's "group frequency" entries represent the top
1888 * channel in each group 1-4. Group 5 All B/G channels are in group 0.
1889 */
1890static u16 iwl3945_hw_reg_get_ch_grp_index(struct iwl_priv *priv,
1891 const struct iwl_channel_info *ch_info)
1892{
1893 struct iwl3945_eeprom *eeprom = (struct iwl3945_eeprom *)priv->eeprom;
1894 struct iwl3945_eeprom_txpower_group *ch_grp = &eeprom->groups[0];
1895 u8 group;
1896 u16 group_index = 0; /* based on factory calib frequencies */
1897 u8 grp_channel;
1898
1899 /* Find the group index for the channel ... don't use index 1(?) */
1900 if (iwl_legacy_is_channel_a_band(ch_info)) {
1901 for (group = 1; group < 5; group++) {
1902 grp_channel = ch_grp[group].group_channel;
1903 if (ch_info->channel <= grp_channel) {
1904 group_index = group;
1905 break;
1906 }
1907 }
1908 /* group 4 has a few channels *above* its factory cal freq */
1909 if (group == 5)
1910 group_index = 4;
1911 } else
1912 group_index = 0; /* 2.4 GHz, group 0 */
1913
1914 IWL_DEBUG_POWER(priv, "Chnl %d mapped to grp %d\n", ch_info->channel,
1915 group_index);
1916 return group_index;
1917}
1918
1919/**
1920 * iwl3945_hw_reg_get_matched_power_index - Interpolate to get nominal index
1921 *
1922 * Interpolate to get nominal (i.e. at factory calibration temperature) index
1923 * into radio/DSP gain settings table for requested power.
1924 */
1925static int iwl3945_hw_reg_get_matched_power_index(struct iwl_priv *priv,
1926 s8 requested_power,
1927 s32 setting_index, s32 *new_index)
1928{
1929 const struct iwl3945_eeprom_txpower_group *chnl_grp = NULL;
1930 struct iwl3945_eeprom *eeprom = (struct iwl3945_eeprom *)priv->eeprom;
1931 s32 index0, index1;
1932 s32 power = 2 * requested_power;
1933 s32 i;
1934 const struct iwl3945_eeprom_txpower_sample *samples;
1935 s32 gains0, gains1;
1936 s32 res;
1937 s32 denominator;
1938
1939 chnl_grp = &eeprom->groups[setting_index];
1940 samples = chnl_grp->samples;
1941 for (i = 0; i < 5; i++) {
1942 if (power == samples[i].power) {
1943 *new_index = samples[i].gain_index;
1944 return 0;
1945 }
1946 }
1947
1948 if (power > samples[1].power) {
1949 index0 = 0;
1950 index1 = 1;
1951 } else if (power > samples[2].power) {
1952 index0 = 1;
1953 index1 = 2;
1954 } else if (power > samples[3].power) {
1955 index0 = 2;
1956 index1 = 3;
1957 } else {
1958 index0 = 3;
1959 index1 = 4;
1960 }
1961
1962 denominator = (s32) samples[index1].power - (s32) samples[index0].power;
1963 if (denominator == 0)
1964 return -EINVAL;
1965 gains0 = (s32) samples[index0].gain_index * (1 << 19);
1966 gains1 = (s32) samples[index1].gain_index * (1 << 19);
1967 res = gains0 + (gains1 - gains0) *
1968 ((s32) power - (s32) samples[index0].power) / denominator +
1969 (1 << 18);
1970 *new_index = res >> 19;
1971 return 0;
1972}
1973
1974static void iwl3945_hw_reg_init_channel_groups(struct iwl_priv *priv)
1975{
1976 u32 i;
1977 s32 rate_index;
1978 struct iwl3945_eeprom *eeprom = (struct iwl3945_eeprom *)priv->eeprom;
1979 const struct iwl3945_eeprom_txpower_group *group;
1980
1981 IWL_DEBUG_POWER(priv, "Initializing factory calib info from EEPROM\n");
1982
1983 for (i = 0; i < IWL_NUM_TX_CALIB_GROUPS; i++) {
1984 s8 *clip_pwrs; /* table of power levels for each rate */
1985 s8 satur_pwr; /* saturation power for each chnl group */
1986 group = &eeprom->groups[i];
1987
1988 /* sanity check on factory saturation power value */
1989 if (group->saturation_power < 40) {
1990 IWL_WARN(priv, "Error: saturation power is %d, "
1991 "less than minimum expected 40\n",
1992 group->saturation_power);
1993 return;
1994 }
1995
1996 /*
1997 * Derive requested power levels for each rate, based on
1998 * hardware capabilities (saturation power for band).
1999 * Basic value is 3dB down from saturation, with further
2000 * power reductions for highest 3 data rates. These
2001 * backoffs provide headroom for high rate modulation
2002 * power peaks, without too much distortion (clipping).
2003 */
2004 /* we'll fill in this array with h/w max power levels */
2005 clip_pwrs = (s8 *) priv->_3945.clip_groups[i].clip_powers;
2006
2007 /* divide factory saturation power by 2 to find -3dB level */
2008 satur_pwr = (s8) (group->saturation_power >> 1);
2009
2010 /* fill in channel group's nominal powers for each rate */
2011 for (rate_index = 0;
2012 rate_index < IWL_RATE_COUNT_3945; rate_index++, clip_pwrs++) {
2013 switch (rate_index) {
2014 case IWL_RATE_36M_INDEX_TABLE:
2015 if (i == 0) /* B/G */
2016 *clip_pwrs = satur_pwr;
2017 else /* A */
2018 *clip_pwrs = satur_pwr - 5;
2019 break;
2020 case IWL_RATE_48M_INDEX_TABLE:
2021 if (i == 0)
2022 *clip_pwrs = satur_pwr - 7;
2023 else
2024 *clip_pwrs = satur_pwr - 10;
2025 break;
2026 case IWL_RATE_54M_INDEX_TABLE:
2027 if (i == 0)
2028 *clip_pwrs = satur_pwr - 9;
2029 else
2030 *clip_pwrs = satur_pwr - 12;
2031 break;
2032 default:
2033 *clip_pwrs = satur_pwr;
2034 break;
2035 }
2036 }
2037 }
2038}
2039
2040/**
2041 * iwl3945_txpower_set_from_eeprom - Set channel power info based on EEPROM
2042 *
2043 * Second pass (during init) to set up priv->channel_info
2044 *
2045 * Set up Tx-power settings in our channel info database for each VALID
2046 * (for this geo/SKU) channel, at all Tx data rates, based on eeprom values
2047 * and current temperature.
2048 *
2049 * Since this is based on current temperature (at init time), these values may
2050 * not be valid for very long, but it gives us a starting/default point,
2051 * and allows us to active (i.e. using Tx) scan.
2052 *
2053 * This does *not* write values to NIC, just sets up our internal table.
2054 */
2055int iwl3945_txpower_set_from_eeprom(struct iwl_priv *priv)
2056{
2057 struct iwl_channel_info *ch_info = NULL;
2058 struct iwl3945_channel_power_info *pwr_info;
2059 struct iwl3945_eeprom *eeprom = (struct iwl3945_eeprom *)priv->eeprom;
2060 int delta_index;
2061 u8 rate_index;
2062 u8 scan_tbl_index;
2063 const s8 *clip_pwrs; /* array of power levels for each rate */
2064 u8 gain, dsp_atten;
2065 s8 power;
2066 u8 pwr_index, base_pwr_index, a_band;
2067 u8 i;
2068 int temperature;
2069
2070 /* save temperature reference,
2071 * so we can determine next time to calibrate */
2072 temperature = iwl3945_hw_reg_txpower_get_temperature(priv);
2073 priv->last_temperature = temperature;
2074
2075 iwl3945_hw_reg_init_channel_groups(priv);
2076
2077 /* initialize Tx power info for each and every channel, 2.4 and 5.x */
2078 for (i = 0, ch_info = priv->channel_info; i < priv->channel_count;
2079 i++, ch_info++) {
2080 a_band = iwl_legacy_is_channel_a_band(ch_info);
2081 if (!iwl_legacy_is_channel_valid(ch_info))
2082 continue;
2083
2084 /* find this channel's channel group (*not* "band") index */
2085 ch_info->group_index =
2086 iwl3945_hw_reg_get_ch_grp_index(priv, ch_info);
2087
2088 /* Get this chnlgrp's rate->max/clip-powers table */
2089 clip_pwrs = priv->_3945.clip_groups[ch_info->group_index].clip_powers;
2090
2091 /* calculate power index *adjustment* value according to
2092 * diff between current temperature and factory temperature */
2093 delta_index = iwl3945_hw_reg_adjust_power_by_temp(temperature,
2094 eeprom->groups[ch_info->group_index].
2095 temperature);
2096
2097 IWL_DEBUG_POWER(priv, "Delta index for channel %d: %d [%d]\n",
2098 ch_info->channel, delta_index, temperature +
2099 IWL_TEMP_CONVERT);
2100
2101 /* set tx power value for all OFDM rates */
2102 for (rate_index = 0; rate_index < IWL_OFDM_RATES;
2103 rate_index++) {
2104 s32 uninitialized_var(power_idx);
2105 int rc;
2106
2107 /* use channel group's clip-power table,
2108 * but don't exceed channel's max power */
2109 s8 pwr = min(ch_info->max_power_avg,
2110 clip_pwrs[rate_index]);
2111
2112 pwr_info = &ch_info->power_info[rate_index];
2113
2114 /* get base (i.e. at factory-measured temperature)
2115 * power table index for this rate's power */
2116 rc = iwl3945_hw_reg_get_matched_power_index(priv, pwr,
2117 ch_info->group_index,
2118 &power_idx);
2119 if (rc) {
2120 IWL_ERR(priv, "Invalid power index\n");
2121 return rc;
2122 }
2123 pwr_info->base_power_index = (u8) power_idx;
2124
2125 /* temperature compensate */
2126 power_idx += delta_index;
2127
2128 /* stay within range of gain table */
2129 power_idx = iwl3945_hw_reg_fix_power_index(power_idx);
2130
2131 /* fill 1 OFDM rate's iwl3945_channel_power_info struct */
2132 pwr_info->requested_power = pwr;
2133 pwr_info->power_table_index = (u8) power_idx;
2134 pwr_info->tpc.tx_gain =
2135 power_gain_table[a_band][power_idx].tx_gain;
2136 pwr_info->tpc.dsp_atten =
2137 power_gain_table[a_band][power_idx].dsp_atten;
2138 }
2139
2140 /* set tx power for CCK rates, based on OFDM 12 Mbit settings*/
2141 pwr_info = &ch_info->power_info[IWL_RATE_12M_INDEX_TABLE];
2142 power = pwr_info->requested_power +
2143 IWL_CCK_FROM_OFDM_POWER_DIFF;
2144 pwr_index = pwr_info->power_table_index +
2145 IWL_CCK_FROM_OFDM_INDEX_DIFF;
2146 base_pwr_index = pwr_info->base_power_index +
2147 IWL_CCK_FROM_OFDM_INDEX_DIFF;
2148
2149 /* stay within table range */
2150 pwr_index = iwl3945_hw_reg_fix_power_index(pwr_index);
2151 gain = power_gain_table[a_band][pwr_index].tx_gain;
2152 dsp_atten = power_gain_table[a_band][pwr_index].dsp_atten;
2153
2154 /* fill each CCK rate's iwl3945_channel_power_info structure
2155 * NOTE: All CCK-rate Txpwrs are the same for a given chnl!
2156 * NOTE: CCK rates start at end of OFDM rates! */
2157 for (rate_index = 0;
2158 rate_index < IWL_CCK_RATES; rate_index++) {
2159 pwr_info = &ch_info->power_info[rate_index+IWL_OFDM_RATES];
2160 pwr_info->requested_power = power;
2161 pwr_info->power_table_index = pwr_index;
2162 pwr_info->base_power_index = base_pwr_index;
2163 pwr_info->tpc.tx_gain = gain;
2164 pwr_info->tpc.dsp_atten = dsp_atten;
2165 }
2166
2167 /* set scan tx power, 1Mbit for CCK, 6Mbit for OFDM */
2168 for (scan_tbl_index = 0;
2169 scan_tbl_index < IWL_NUM_SCAN_RATES; scan_tbl_index++) {
2170 s32 actual_index = (scan_tbl_index == 0) ?
2171 IWL_RATE_1M_INDEX_TABLE : IWL_RATE_6M_INDEX_TABLE;
2172 iwl3945_hw_reg_set_scan_power(priv, scan_tbl_index,
2173 actual_index, clip_pwrs, ch_info, a_band);
2174 }
2175 }
2176
2177 return 0;
2178}
2179
2180int iwl3945_hw_rxq_stop(struct iwl_priv *priv)
2181{
2182 int rc;
2183
2184 iwl_legacy_write_direct32(priv, FH39_RCSR_CONFIG(0), 0);
2185 rc = iwl_poll_direct_bit(priv, FH39_RSSR_STATUS,
2186 FH39_RSSR_CHNL0_RX_STATUS_CHNL_IDLE, 1000);
2187 if (rc < 0)
2188 IWL_ERR(priv, "Can't stop Rx DMA.\n");
2189
2190 return 0;
2191}
2192
2193int iwl3945_hw_tx_queue_init(struct iwl_priv *priv, struct iwl_tx_queue *txq)
2194{
2195 int txq_id = txq->q.id;
2196
2197 struct iwl3945_shared *shared_data = priv->_3945.shared_virt;
2198
2199 shared_data->tx_base_ptr[txq_id] = cpu_to_le32((u32)txq->q.dma_addr);
2200
2201 iwl_legacy_write_direct32(priv, FH39_CBCC_CTRL(txq_id), 0);
2202 iwl_legacy_write_direct32(priv, FH39_CBCC_BASE(txq_id), 0);
2203
2204 iwl_legacy_write_direct32(priv, FH39_TCSR_CONFIG(txq_id),
2205 FH39_TCSR_TX_CONFIG_REG_VAL_CIRQ_RTC_NOINT |
2206 FH39_TCSR_TX_CONFIG_REG_VAL_MSG_MODE_TXF |
2207 FH39_TCSR_TX_CONFIG_REG_VAL_CIRQ_HOST_IFTFD |
2208 FH39_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE_VAL |
2209 FH39_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE);
2210
2211 /* fake read to flush all prev. writes */
2212 iwl_read32(priv, FH39_TSSR_CBB_BASE);
2213
2214 return 0;
2215}
2216
2217/*
2218 * HCMD utils
2219 */
2220static u16 iwl3945_get_hcmd_size(u8 cmd_id, u16 len)
2221{
2222 switch (cmd_id) {
2223 case REPLY_RXON:
2224 return sizeof(struct iwl3945_rxon_cmd);
2225 case POWER_TABLE_CMD:
2226 return sizeof(struct iwl3945_powertable_cmd);
2227 default:
2228 return len;
2229 }
2230}
2231
2232
2233static u16 iwl3945_build_addsta_hcmd(const struct iwl_legacy_addsta_cmd *cmd,
2234 u8 *data)
2235{
2236 struct iwl3945_addsta_cmd *addsta = (struct iwl3945_addsta_cmd *)data;
2237 addsta->mode = cmd->mode;
2238 memcpy(&addsta->sta, &cmd->sta, sizeof(struct sta_id_modify));
2239 memcpy(&addsta->key, &cmd->key, sizeof(struct iwl4965_keyinfo));
2240 addsta->station_flags = cmd->station_flags;
2241 addsta->station_flags_msk = cmd->station_flags_msk;
2242 addsta->tid_disable_tx = cpu_to_le16(0);
2243 addsta->rate_n_flags = cmd->rate_n_flags;
2244 addsta->add_immediate_ba_tid = cmd->add_immediate_ba_tid;
2245 addsta->remove_immediate_ba_tid = cmd->remove_immediate_ba_tid;
2246 addsta->add_immediate_ba_ssn = cmd->add_immediate_ba_ssn;
2247
2248 return (u16)sizeof(struct iwl3945_addsta_cmd);
2249}
2250
2251static int iwl3945_add_bssid_station(struct iwl_priv *priv,
2252 const u8 *addr, u8 *sta_id_r)
2253{
2254 struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
2255 int ret;
2256 u8 sta_id;
2257 unsigned long flags;
2258
2259 if (sta_id_r)
2260 *sta_id_r = IWL_INVALID_STATION;
2261
2262 ret = iwl_legacy_add_station_common(priv, ctx, addr, 0, NULL, &sta_id);
2263 if (ret) {
2264 IWL_ERR(priv, "Unable to add station %pM\n", addr);
2265 return ret;
2266 }
2267
2268 if (sta_id_r)
2269 *sta_id_r = sta_id;
2270
2271 spin_lock_irqsave(&priv->sta_lock, flags);
2272 priv->stations[sta_id].used |= IWL_STA_LOCAL;
2273 spin_unlock_irqrestore(&priv->sta_lock, flags);
2274
2275 return 0;
2276}
2277static int iwl3945_manage_ibss_station(struct iwl_priv *priv,
2278 struct ieee80211_vif *vif, bool add)
2279{
2280 struct iwl_vif_priv *vif_priv = (void *)vif->drv_priv;
2281 int ret;
2282
2283 if (add) {
2284 ret = iwl3945_add_bssid_station(priv, vif->bss_conf.bssid,
2285 &vif_priv->ibss_bssid_sta_id);
2286 if (ret)
2287 return ret;
2288
2289 iwl3945_sync_sta(priv, vif_priv->ibss_bssid_sta_id,
2290 (priv->band == IEEE80211_BAND_5GHZ) ?
2291 IWL_RATE_6M_PLCP : IWL_RATE_1M_PLCP);
2292 iwl3945_rate_scale_init(priv->hw, vif_priv->ibss_bssid_sta_id);
2293
2294 return 0;
2295 }
2296
2297 return iwl_legacy_remove_station(priv, vif_priv->ibss_bssid_sta_id,
2298 vif->bss_conf.bssid);
2299}
2300
2301/**
2302 * iwl3945_init_hw_rate_table - Initialize the hardware rate fallback table
2303 */
2304int iwl3945_init_hw_rate_table(struct iwl_priv *priv)
2305{
2306 int rc, i, index, prev_index;
2307 struct iwl3945_rate_scaling_cmd rate_cmd = {
2308 .reserved = {0, 0, 0},
2309 };
2310 struct iwl3945_rate_scaling_info *table = rate_cmd.table;
2311
2312 for (i = 0; i < ARRAY_SIZE(iwl3945_rates); i++) {
2313 index = iwl3945_rates[i].table_rs_index;
2314
2315 table[index].rate_n_flags =
2316 iwl3945_hw_set_rate_n_flags(iwl3945_rates[i].plcp, 0);
2317 table[index].try_cnt = priv->retry_rate;
2318 prev_index = iwl3945_get_prev_ieee_rate(i);
2319 table[index].next_rate_index =
2320 iwl3945_rates[prev_index].table_rs_index;
2321 }
2322
2323 switch (priv->band) {
2324 case IEEE80211_BAND_5GHZ:
2325 IWL_DEBUG_RATE(priv, "Select A mode rate scale\n");
2326 /* If one of the following CCK rates is used,
2327 * have it fall back to the 6M OFDM rate */
2328 for (i = IWL_RATE_1M_INDEX_TABLE;
2329 i <= IWL_RATE_11M_INDEX_TABLE; i++)
2330 table[i].next_rate_index =
2331 iwl3945_rates[IWL_FIRST_OFDM_RATE].table_rs_index;
2332
2333 /* Don't fall back to CCK rates */
2334 table[IWL_RATE_12M_INDEX_TABLE].next_rate_index =
2335 IWL_RATE_9M_INDEX_TABLE;
2336
2337 /* Don't drop out of OFDM rates */
2338 table[IWL_RATE_6M_INDEX_TABLE].next_rate_index =
2339 iwl3945_rates[IWL_FIRST_OFDM_RATE].table_rs_index;
2340 break;
2341
2342 case IEEE80211_BAND_2GHZ:
2343 IWL_DEBUG_RATE(priv, "Select B/G mode rate scale\n");
2344 /* If an OFDM rate is used, have it fall back to the
2345 * 1M CCK rates */
2346
2347 if (!(priv->_3945.sta_supp_rates & IWL_OFDM_RATES_MASK) &&
2348 iwl_legacy_is_associated(priv, IWL_RXON_CTX_BSS)) {
2349
2350 index = IWL_FIRST_CCK_RATE;
2351 for (i = IWL_RATE_6M_INDEX_TABLE;
2352 i <= IWL_RATE_54M_INDEX_TABLE; i++)
2353 table[i].next_rate_index =
2354 iwl3945_rates[index].table_rs_index;
2355
2356 index = IWL_RATE_11M_INDEX_TABLE;
2357 /* CCK shouldn't fall back to OFDM... */
2358 table[index].next_rate_index = IWL_RATE_5M_INDEX_TABLE;
2359 }
2360 break;
2361
2362 default:
2363 WARN_ON(1);
2364 break;
2365 }
2366
2367 /* Update the rate scaling for control frame Tx */
2368 rate_cmd.table_id = 0;
2369 rc = iwl_legacy_send_cmd_pdu(priv, REPLY_RATE_SCALE, sizeof(rate_cmd),
2370 &rate_cmd);
2371 if (rc)
2372 return rc;
2373
2374 /* Update the rate scaling for data frame Tx */
2375 rate_cmd.table_id = 1;
2376 return iwl_legacy_send_cmd_pdu(priv, REPLY_RATE_SCALE, sizeof(rate_cmd),
2377 &rate_cmd);
2378}
2379
2380/* Called when initializing driver */
2381int iwl3945_hw_set_hw_params(struct iwl_priv *priv)
2382{
2383 memset((void *)&priv->hw_params, 0,
2384 sizeof(struct iwl_hw_params));
2385
2386 priv->_3945.shared_virt =
2387 dma_alloc_coherent(&priv->pci_dev->dev,
2388 sizeof(struct iwl3945_shared),
2389 &priv->_3945.shared_phys, GFP_KERNEL);
2390 if (!priv->_3945.shared_virt) {
2391 IWL_ERR(priv, "failed to allocate pci memory\n");
2392 return -ENOMEM;
2393 }
2394
2395 /* Assign number of Usable TX queues */
2396 priv->hw_params.max_txq_num = priv->cfg->base_params->num_of_queues;
2397
2398 priv->hw_params.tfd_size = sizeof(struct iwl3945_tfd);
2399 priv->hw_params.rx_page_order = get_order(IWL_RX_BUF_SIZE_3K);
2400 priv->hw_params.max_rxq_size = RX_QUEUE_SIZE;
2401 priv->hw_params.max_rxq_log = RX_QUEUE_SIZE_LOG;
2402 priv->hw_params.max_stations = IWL3945_STATION_COUNT;
2403 priv->contexts[IWL_RXON_CTX_BSS].bcast_sta_id = IWL3945_BROADCAST_ID;
2404
2405 priv->sta_key_max_num = STA_KEY_MAX_NUM;
2406
2407 priv->hw_params.rx_wrt_ptr_reg = FH39_RSCSR_CHNL0_WPTR;
2408 priv->hw_params.max_beacon_itrvl = IWL39_MAX_UCODE_BEACON_INTERVAL;
2409 priv->hw_params.beacon_time_tsf_bits = IWL3945_EXT_BEACON_TIME_POS;
2410
2411 return 0;
2412}
2413
2414unsigned int iwl3945_hw_get_beacon_cmd(struct iwl_priv *priv,
2415 struct iwl3945_frame *frame, u8 rate)
2416{
2417 struct iwl3945_tx_beacon_cmd *tx_beacon_cmd;
2418 unsigned int frame_size;
2419
2420 tx_beacon_cmd = (struct iwl3945_tx_beacon_cmd *)&frame->u;
2421 memset(tx_beacon_cmd, 0, sizeof(*tx_beacon_cmd));
2422
2423 tx_beacon_cmd->tx.sta_id =
2424 priv->contexts[IWL_RXON_CTX_BSS].bcast_sta_id;
2425 tx_beacon_cmd->tx.stop_time.life_time = TX_CMD_LIFE_TIME_INFINITE;
2426
2427 frame_size = iwl3945_fill_beacon_frame(priv,
2428 tx_beacon_cmd->frame,
2429 sizeof(frame->u) - sizeof(*tx_beacon_cmd));
2430
2431 BUG_ON(frame_size > MAX_MPDU_SIZE);
2432 tx_beacon_cmd->tx.len = cpu_to_le16((u16)frame_size);
2433
2434 tx_beacon_cmd->tx.rate = rate;
2435 tx_beacon_cmd->tx.tx_flags = (TX_CMD_FLG_SEQ_CTL_MSK |
2436 TX_CMD_FLG_TSF_MSK);
2437
2438 /* supp_rates[0] == OFDM start at IWL_FIRST_OFDM_RATE*/
2439 tx_beacon_cmd->tx.supp_rates[0] =
2440 (IWL_OFDM_BASIC_RATES_MASK >> IWL_FIRST_OFDM_RATE) & 0xFF;
2441
2442 tx_beacon_cmd->tx.supp_rates[1] =
2443 (IWL_CCK_BASIC_RATES_MASK & 0xF);
2444
2445 return sizeof(struct iwl3945_tx_beacon_cmd) + frame_size;
2446}
2447
2448void iwl3945_hw_rx_handler_setup(struct iwl_priv *priv)
2449{
2450 priv->rx_handlers[REPLY_TX] = iwl3945_rx_reply_tx;
2451 priv->rx_handlers[REPLY_3945_RX] = iwl3945_rx_reply_rx;
2452}
2453
2454void iwl3945_hw_setup_deferred_work(struct iwl_priv *priv)
2455{
2456 INIT_DELAYED_WORK(&priv->_3945.thermal_periodic,
2457 iwl3945_bg_reg_txpower_periodic);
2458}
2459
2460void iwl3945_hw_cancel_deferred_work(struct iwl_priv *priv)
2461{
2462 cancel_delayed_work(&priv->_3945.thermal_periodic);
2463}
2464
2465/* check contents of special bootstrap uCode SRAM */
2466static int iwl3945_verify_bsm(struct iwl_priv *priv)
2467 {
2468 __le32 *image = priv->ucode_boot.v_addr;
2469 u32 len = priv->ucode_boot.len;
2470 u32 reg;
2471 u32 val;
2472
2473 IWL_DEBUG_INFO(priv, "Begin verify bsm\n");
2474
2475 /* verify BSM SRAM contents */
2476 val = iwl_legacy_read_prph(priv, BSM_WR_DWCOUNT_REG);
2477 for (reg = BSM_SRAM_LOWER_BOUND;
2478 reg < BSM_SRAM_LOWER_BOUND + len;
2479 reg += sizeof(u32), image++) {
2480 val = iwl_legacy_read_prph(priv, reg);
2481 if (val != le32_to_cpu(*image)) {
2482 IWL_ERR(priv, "BSM uCode verification failed at "
2483 "addr 0x%08X+%u (of %u), is 0x%x, s/b 0x%x\n",
2484 BSM_SRAM_LOWER_BOUND,
2485 reg - BSM_SRAM_LOWER_BOUND, len,
2486 val, le32_to_cpu(*image));
2487 return -EIO;
2488 }
2489 }
2490
2491 IWL_DEBUG_INFO(priv, "BSM bootstrap uCode image OK\n");
2492
2493 return 0;
2494}
2495
2496
2497/******************************************************************************
2498 *
2499 * EEPROM related functions
2500 *
2501 ******************************************************************************/
2502
2503/*
2504 * Clear the OWNER_MSK, to establish driver (instead of uCode running on
2505 * embedded controller) as EEPROM reader; each read is a series of pulses
2506 * to/from the EEPROM chip, not a single event, so even reads could conflict
2507 * if they weren't arbitrated by some ownership mechanism. Here, the driver
2508 * simply claims ownership, which should be safe when this function is called
2509 * (i.e. before loading uCode!).
2510 */
2511static int iwl3945_eeprom_acquire_semaphore(struct iwl_priv *priv)
2512{
2513 _iwl_legacy_clear_bit(priv, CSR_EEPROM_GP, CSR_EEPROM_GP_IF_OWNER_MSK);
2514 return 0;
2515}
2516
2517
2518static void iwl3945_eeprom_release_semaphore(struct iwl_priv *priv)
2519{
2520 return;
2521}
2522
2523 /**
2524 * iwl3945_load_bsm - Load bootstrap instructions
2525 *
2526 * BSM operation:
2527 *
2528 * The Bootstrap State Machine (BSM) stores a short bootstrap uCode program
2529 * in special SRAM that does not power down during RFKILL. When powering back
2530 * up after power-saving sleeps (or during initial uCode load), the BSM loads
2531 * the bootstrap program into the on-board processor, and starts it.
2532 *
2533 * The bootstrap program loads (via DMA) instructions and data for a new
2534 * program from host DRAM locations indicated by the host driver in the
2535 * BSM_DRAM_* registers. Once the new program is loaded, it starts
2536 * automatically.
2537 *
2538 * When initializing the NIC, the host driver points the BSM to the
2539 * "initialize" uCode image. This uCode sets up some internal data, then
2540 * notifies host via "initialize alive" that it is complete.
2541 *
2542 * The host then replaces the BSM_DRAM_* pointer values to point to the
2543 * normal runtime uCode instructions and a backup uCode data cache buffer
2544 * (filled initially with starting data values for the on-board processor),
2545 * then triggers the "initialize" uCode to load and launch the runtime uCode,
2546 * which begins normal operation.
2547 *
2548 * When doing a power-save shutdown, runtime uCode saves data SRAM into
2549 * the backup data cache in DRAM before SRAM is powered down.
2550 *
2551 * When powering back up, the BSM loads the bootstrap program. This reloads
2552 * the runtime uCode instructions and the backup data cache into SRAM,
2553 * and re-launches the runtime uCode from where it left off.
2554 */
2555static int iwl3945_load_bsm(struct iwl_priv *priv)
2556{
2557 __le32 *image = priv->ucode_boot.v_addr;
2558 u32 len = priv->ucode_boot.len;
2559 dma_addr_t pinst;
2560 dma_addr_t pdata;
2561 u32 inst_len;
2562 u32 data_len;
2563 int rc;
2564 int i;
2565 u32 done;
2566 u32 reg_offset;
2567
2568 IWL_DEBUG_INFO(priv, "Begin load bsm\n");
2569
2570 /* make sure bootstrap program is no larger than BSM's SRAM size */
2571 if (len > IWL39_MAX_BSM_SIZE)
2572 return -EINVAL;
2573
2574 /* Tell bootstrap uCode where to find the "Initialize" uCode
2575 * in host DRAM ... host DRAM physical address bits 31:0 for 3945.
2576 * NOTE: iwl3945_initialize_alive_start() will replace these values,
2577 * after the "initialize" uCode has run, to point to
2578 * runtime/protocol instructions and backup data cache. */
2579 pinst = priv->ucode_init.p_addr;
2580 pdata = priv->ucode_init_data.p_addr;
2581 inst_len = priv->ucode_init.len;
2582 data_len = priv->ucode_init_data.len;
2583
2584 iwl_legacy_write_prph(priv, BSM_DRAM_INST_PTR_REG, pinst);
2585 iwl_legacy_write_prph(priv, BSM_DRAM_DATA_PTR_REG, pdata);
2586 iwl_legacy_write_prph(priv, BSM_DRAM_INST_BYTECOUNT_REG, inst_len);
2587 iwl_legacy_write_prph(priv, BSM_DRAM_DATA_BYTECOUNT_REG, data_len);
2588
2589 /* Fill BSM memory with bootstrap instructions */
2590 for (reg_offset = BSM_SRAM_LOWER_BOUND;
2591 reg_offset < BSM_SRAM_LOWER_BOUND + len;
2592 reg_offset += sizeof(u32), image++)
2593 _iwl_legacy_write_prph(priv, reg_offset,
2594 le32_to_cpu(*image));
2595
2596 rc = iwl3945_verify_bsm(priv);
2597 if (rc)
2598 return rc;
2599
2600 /* Tell BSM to copy from BSM SRAM into instruction SRAM, when asked */
2601 iwl_legacy_write_prph(priv, BSM_WR_MEM_SRC_REG, 0x0);
2602 iwl_legacy_write_prph(priv, BSM_WR_MEM_DST_REG,
2603 IWL39_RTC_INST_LOWER_BOUND);
2604 iwl_legacy_write_prph(priv, BSM_WR_DWCOUNT_REG, len / sizeof(u32));
2605
2606 /* Load bootstrap code into instruction SRAM now,
2607 * to prepare to load "initialize" uCode */
2608 iwl_legacy_write_prph(priv, BSM_WR_CTRL_REG,
2609 BSM_WR_CTRL_REG_BIT_START);
2610
2611 /* Wait for load of bootstrap uCode to finish */
2612 for (i = 0; i < 100; i++) {
2613 done = iwl_legacy_read_prph(priv, BSM_WR_CTRL_REG);
2614 if (!(done & BSM_WR_CTRL_REG_BIT_START))
2615 break;
2616 udelay(10);
2617 }
2618 if (i < 100)
2619 IWL_DEBUG_INFO(priv, "BSM write complete, poll %d iterations\n", i);
2620 else {
2621 IWL_ERR(priv, "BSM write did not complete!\n");
2622 return -EIO;
2623 }
2624
2625 /* Enable future boot loads whenever power management unit triggers it
2626 * (e.g. when powering back up after power-save shutdown) */
2627 iwl_legacy_write_prph(priv, BSM_WR_CTRL_REG,
2628 BSM_WR_CTRL_REG_BIT_START_EN);
2629
2630 return 0;
2631}
2632
2633static struct iwl_hcmd_ops iwl3945_hcmd = {
2634 .rxon_assoc = iwl3945_send_rxon_assoc,
2635 .commit_rxon = iwl3945_commit_rxon,
2636};
2637
2638static struct iwl_lib_ops iwl3945_lib = {
2639 .txq_attach_buf_to_tfd = iwl3945_hw_txq_attach_buf_to_tfd,
2640 .txq_free_tfd = iwl3945_hw_txq_free_tfd,
2641 .txq_init = iwl3945_hw_tx_queue_init,
2642 .load_ucode = iwl3945_load_bsm,
2643 .dump_nic_event_log = iwl3945_dump_nic_event_log,
2644 .dump_nic_error_log = iwl3945_dump_nic_error_log,
2645 .apm_ops = {
2646 .init = iwl3945_apm_init,
2647 .config = iwl3945_nic_config,
2648 },
2649 .eeprom_ops = {
2650 .regulatory_bands = {
2651 EEPROM_REGULATORY_BAND_1_CHANNELS,
2652 EEPROM_REGULATORY_BAND_2_CHANNELS,
2653 EEPROM_REGULATORY_BAND_3_CHANNELS,
2654 EEPROM_REGULATORY_BAND_4_CHANNELS,
2655 EEPROM_REGULATORY_BAND_5_CHANNELS,
2656 EEPROM_REGULATORY_BAND_NO_HT40,
2657 EEPROM_REGULATORY_BAND_NO_HT40,
2658 },
2659 .acquire_semaphore = iwl3945_eeprom_acquire_semaphore,
2660 .release_semaphore = iwl3945_eeprom_release_semaphore,
2661 },
2662 .send_tx_power = iwl3945_send_tx_power,
2663 .is_valid_rtc_data_addr = iwl3945_hw_valid_rtc_data_addr,
2664
2665 .debugfs_ops = {
2666 .rx_stats_read = iwl3945_ucode_rx_stats_read,
2667 .tx_stats_read = iwl3945_ucode_tx_stats_read,
2668 .general_stats_read = iwl3945_ucode_general_stats_read,
2669 },
2670};
2671
2672static const struct iwl_legacy_ops iwl3945_legacy_ops = {
2673 .post_associate = iwl3945_post_associate,
2674 .config_ap = iwl3945_config_ap,
2675 .manage_ibss_station = iwl3945_manage_ibss_station,
2676};
2677
2678static struct iwl_hcmd_utils_ops iwl3945_hcmd_utils = {
2679 .get_hcmd_size = iwl3945_get_hcmd_size,
2680 .build_addsta_hcmd = iwl3945_build_addsta_hcmd,
2681 .request_scan = iwl3945_request_scan,
2682 .post_scan = iwl3945_post_scan,
2683};
2684
2685static const struct iwl_ops iwl3945_ops = {
2686 .lib = &iwl3945_lib,
2687 .hcmd = &iwl3945_hcmd,
2688 .utils = &iwl3945_hcmd_utils,
2689 .led = &iwl3945_led_ops,
2690 .legacy = &iwl3945_legacy_ops,
2691 .ieee80211_ops = &iwl3945_hw_ops,
2692};
2693
2694static struct iwl_base_params iwl3945_base_params = {
2695 .eeprom_size = IWL3945_EEPROM_IMG_SIZE,
2696 .num_of_queues = IWL39_NUM_QUEUES,
2697 .pll_cfg_val = CSR39_ANA_PLL_CFG_VAL,
2698 .set_l0s = false,
2699 .use_bsm = true,
2700 .led_compensation = 64,
2701 .plcp_delta_threshold = IWL_MAX_PLCP_ERR_LONG_THRESHOLD_DEF,
2702 .wd_timeout = IWL_DEF_WD_TIMEOUT,
2703 .max_event_log_size = 512,
2704};
2705
2706static struct iwl_cfg iwl3945_bg_cfg = {
2707 .name = "3945BG",
2708 .fw_name_pre = IWL3945_FW_PRE,
2709 .ucode_api_max = IWL3945_UCODE_API_MAX,
2710 .ucode_api_min = IWL3945_UCODE_API_MIN,
2711 .sku = IWL_SKU_G,
2712 .eeprom_ver = EEPROM_3945_EEPROM_VERSION,
2713 .ops = &iwl3945_ops,
2714 .mod_params = &iwl3945_mod_params,
2715 .base_params = &iwl3945_base_params,
2716 .led_mode = IWL_LED_BLINK,
2717};
2718
2719static struct iwl_cfg iwl3945_abg_cfg = {
2720 .name = "3945ABG",
2721 .fw_name_pre = IWL3945_FW_PRE,
2722 .ucode_api_max = IWL3945_UCODE_API_MAX,
2723 .ucode_api_min = IWL3945_UCODE_API_MIN,
2724 .sku = IWL_SKU_A|IWL_SKU_G,
2725 .eeprom_ver = EEPROM_3945_EEPROM_VERSION,
2726 .ops = &iwl3945_ops,
2727 .mod_params = &iwl3945_mod_params,
2728 .base_params = &iwl3945_base_params,
2729 .led_mode = IWL_LED_BLINK,
2730};
2731
2732DEFINE_PCI_DEVICE_TABLE(iwl3945_hw_card_ids) = {
2733 {IWL_PCI_DEVICE(0x4222, 0x1005, iwl3945_bg_cfg)},
2734 {IWL_PCI_DEVICE(0x4222, 0x1034, iwl3945_bg_cfg)},
2735 {IWL_PCI_DEVICE(0x4222, 0x1044, iwl3945_bg_cfg)},
2736 {IWL_PCI_DEVICE(0x4227, 0x1014, iwl3945_bg_cfg)},
2737 {IWL_PCI_DEVICE(0x4222, PCI_ANY_ID, iwl3945_abg_cfg)},
2738 {IWL_PCI_DEVICE(0x4227, PCI_ANY_ID, iwl3945_abg_cfg)},
2739 {0}
2740};
2741
2742MODULE_DEVICE_TABLE(pci, iwl3945_hw_card_ids);
diff --git a/drivers/net/wireless/iwlegacy/iwl-3945.h b/drivers/net/wireless/iwlegacy/iwl-3945.h
new file mode 100644
index 000000000000..b118b59b71de
--- /dev/null
+++ b/drivers/net/wireless/iwlegacy/iwl-3945.h
@@ -0,0 +1,308 @@
1/******************************************************************************
2 *
3 * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of version 2 of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
17 *
18 * The full GNU General Public License is included in this distribution in the
19 * file called LICENSE.
20 *
21 * Contact Information:
22 * Intel Linux Wireless <ilw@linux.intel.com>
23 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
24 *
25 *****************************************************************************/
26/*
27 * Please use this file (iwl-3945.h) for driver implementation definitions.
28 * Please use iwl-3945-commands.h for uCode API definitions.
29 * Please use iwl-3945-hw.h for hardware-related definitions.
30 */
31
32#ifndef __iwl_3945_h__
33#define __iwl_3945_h__
34
35#include <linux/pci.h> /* for struct pci_device_id */
36#include <linux/kernel.h>
37#include <net/ieee80211_radiotap.h>
38
39/* Hardware specific file defines the PCI IDs table for that hardware module */
40extern const struct pci_device_id iwl3945_hw_card_ids[];
41
42#include "iwl-csr.h"
43#include "iwl-prph.h"
44#include "iwl-fh.h"
45#include "iwl-3945-hw.h"
46#include "iwl-debug.h"
47#include "iwl-power.h"
48#include "iwl-dev.h"
49#include "iwl-led.h"
50
51/* Highest firmware API version supported */
52#define IWL3945_UCODE_API_MAX 2
53
54/* Lowest firmware API version supported */
55#define IWL3945_UCODE_API_MIN 1
56
57#define IWL3945_FW_PRE "iwlwifi-3945-"
58#define _IWL3945_MODULE_FIRMWARE(api) IWL3945_FW_PRE #api ".ucode"
59#define IWL3945_MODULE_FIRMWARE(api) _IWL3945_MODULE_FIRMWARE(api)
60
61/* Default noise level to report when noise measurement is not available.
62 * This may be because we're:
63 * 1) Not associated (4965, no beacon statistics being sent to driver)
64 * 2) Scanning (noise measurement does not apply to associated channel)
65 * 3) Receiving CCK (3945 delivers noise info only for OFDM frames)
66 * Use default noise value of -127 ... this is below the range of measurable
67 * Rx dBm for either 3945 or 4965, so it can indicate "unmeasurable" to user.
68 * Also, -127 works better than 0 when averaging frames with/without
69 * noise info (e.g. averaging might be done in app); measured dBm values are
70 * always negative ... using a negative value as the default keeps all
71 * averages within an s8's (used in some apps) range of negative values. */
72#define IWL_NOISE_MEAS_NOT_AVAILABLE (-127)
73
74/* Module parameters accessible from iwl-*.c */
75extern struct iwl_mod_params iwl3945_mod_params;
76
77struct iwl3945_rate_scale_data {
78 u64 data;
79 s32 success_counter;
80 s32 success_ratio;
81 s32 counter;
82 s32 average_tpt;
83 unsigned long stamp;
84};
85
86struct iwl3945_rs_sta {
87 spinlock_t lock;
88 struct iwl_priv *priv;
89 s32 *expected_tpt;
90 unsigned long last_partial_flush;
91 unsigned long last_flush;
92 u32 flush_time;
93 u32 last_tx_packets;
94 u32 tx_packets;
95 u8 tgg;
96 u8 flush_pending;
97 u8 start_rate;
98 struct timer_list rate_scale_flush;
99 struct iwl3945_rate_scale_data win[IWL_RATE_COUNT_3945];
100#ifdef CONFIG_MAC80211_DEBUGFS
101 struct dentry *rs_sta_dbgfs_stats_table_file;
102#endif
103
104 /* used to be in sta_info */
105 int last_txrate_idx;
106};
107
108
109/*
110 * The common struct MUST be first because it is shared between
111 * 3945 and 4965!
112 */
113struct iwl3945_sta_priv {
114 struct iwl_station_priv_common common;
115 struct iwl3945_rs_sta rs_sta;
116};
117
118enum iwl3945_antenna {
119 IWL_ANTENNA_DIVERSITY,
120 IWL_ANTENNA_MAIN,
121 IWL_ANTENNA_AUX
122};
123
124/*
125 * RTS threshold here is total size [2347] minus 4 FCS bytes
126 * Per spec:
127 * a value of 0 means RTS on all data/management packets
128 * a value > max MSDU size means no RTS
129 * else RTS for data/management frames where MPDU is larger
130 * than RTS value.
131 */
132#define DEFAULT_RTS_THRESHOLD 2347U
133#define MIN_RTS_THRESHOLD 0U
134#define MAX_RTS_THRESHOLD 2347U
135#define MAX_MSDU_SIZE 2304U
136#define MAX_MPDU_SIZE 2346U
137#define DEFAULT_BEACON_INTERVAL 100U
138#define DEFAULT_SHORT_RETRY_LIMIT 7U
139#define DEFAULT_LONG_RETRY_LIMIT 4U
140
141#define IWL_TX_FIFO_AC0 0
142#define IWL_TX_FIFO_AC1 1
143#define IWL_TX_FIFO_AC2 2
144#define IWL_TX_FIFO_AC3 3
145#define IWL_TX_FIFO_HCCA_1 5
146#define IWL_TX_FIFO_HCCA_2 6
147#define IWL_TX_FIFO_NONE 7
148
149#define IEEE80211_DATA_LEN 2304
150#define IEEE80211_4ADDR_LEN 30
151#define IEEE80211_HLEN (IEEE80211_4ADDR_LEN)
152#define IEEE80211_FRAME_LEN (IEEE80211_DATA_LEN + IEEE80211_HLEN)
153
154struct iwl3945_frame {
155 union {
156 struct ieee80211_hdr frame;
157 struct iwl3945_tx_beacon_cmd beacon;
158 u8 raw[IEEE80211_FRAME_LEN];
159 u8 cmd[360];
160 } u;
161 struct list_head list;
162};
163
164#define SEQ_TO_SN(seq) (((seq) & IEEE80211_SCTL_SEQ) >> 4)
165#define SN_TO_SEQ(ssn) (((ssn) << 4) & IEEE80211_SCTL_SEQ)
166#define MAX_SN ((IEEE80211_SCTL_SEQ) >> 4)
167
168#define SUP_RATE_11A_MAX_NUM_CHANNELS 8
169#define SUP_RATE_11B_MAX_NUM_CHANNELS 4
170#define SUP_RATE_11G_MAX_NUM_CHANNELS 12
171
172#define IWL_SUPPORTED_RATES_IE_LEN 8
173
174#define SCAN_INTERVAL 100
175
176#define MAX_TID_COUNT 9
177
178#define IWL_INVALID_RATE 0xFF
179#define IWL_INVALID_VALUE -1
180
181#define STA_PS_STATUS_WAKE 0
182#define STA_PS_STATUS_SLEEP 1
183
184struct iwl3945_ibss_seq {
185 u8 mac[ETH_ALEN];
186 u16 seq_num;
187 u16 frag_num;
188 unsigned long packet_time;
189 struct list_head list;
190};
191
192#define IWL_RX_HDR(x) ((struct iwl3945_rx_frame_hdr *)(\
193 x->u.rx_frame.stats.payload + \
194 x->u.rx_frame.stats.phy_count))
195#define IWL_RX_END(x) ((struct iwl3945_rx_frame_end *)(\
196 IWL_RX_HDR(x)->payload + \
197 le16_to_cpu(IWL_RX_HDR(x)->len)))
198#define IWL_RX_STATS(x) (&x->u.rx_frame.stats)
199#define IWL_RX_DATA(x) (IWL_RX_HDR(x)->payload)
200
201
202/******************************************************************************
203 *
204 * Functions implemented in iwl3945-base.c which are forward declared here
205 * for use by iwl-*.c
206 *
207 *****************************************************************************/
208extern int iwl3945_calc_db_from_ratio(int sig_ratio);
209extern void iwl3945_rx_replenish(void *data);
210extern void iwl3945_rx_queue_reset(struct iwl_priv *priv, struct iwl_rx_queue *rxq);
211extern unsigned int iwl3945_fill_beacon_frame(struct iwl_priv *priv,
212 struct ieee80211_hdr *hdr, int left);
213extern int iwl3945_dump_nic_event_log(struct iwl_priv *priv, bool full_log,
214 char **buf, bool display);
215extern void iwl3945_dump_nic_error_log(struct iwl_priv *priv);
216
217/******************************************************************************
218 *
219 * Functions implemented in iwl-[34]*.c which are forward declared here
220 * for use by iwl3945-base.c
221 *
222 * NOTE: The implementation of these functions are hardware specific
223 * which is why they are in the hardware specific files (vs. iwl-base.c)
224 *
225 * Naming convention --
226 * iwl3945_ <-- Its part of iwlwifi (should be changed to iwl3945_)
227 * iwl3945_hw_ <-- Hardware specific (implemented in iwl-XXXX.c by all HW)
228 * iwlXXXX_ <-- Hardware specific (implemented in iwl-XXXX.c for XXXX)
229 * iwl3945_bg_ <-- Called from work queue context
230 * iwl3945_mac_ <-- mac80211 callback
231 *
232 ****************************************************************************/
233extern void iwl3945_hw_rx_handler_setup(struct iwl_priv *priv);
234extern void iwl3945_hw_setup_deferred_work(struct iwl_priv *priv);
235extern void iwl3945_hw_cancel_deferred_work(struct iwl_priv *priv);
236extern int iwl3945_hw_rxq_stop(struct iwl_priv *priv);
237extern int iwl3945_hw_set_hw_params(struct iwl_priv *priv);
238extern int iwl3945_hw_nic_init(struct iwl_priv *priv);
239extern int iwl3945_hw_nic_stop_master(struct iwl_priv *priv);
240extern void iwl3945_hw_txq_ctx_free(struct iwl_priv *priv);
241extern void iwl3945_hw_txq_ctx_stop(struct iwl_priv *priv);
242extern int iwl3945_hw_nic_reset(struct iwl_priv *priv);
243extern int iwl3945_hw_txq_attach_buf_to_tfd(struct iwl_priv *priv,
244 struct iwl_tx_queue *txq,
245 dma_addr_t addr, u16 len,
246 u8 reset, u8 pad);
247extern void iwl3945_hw_txq_free_tfd(struct iwl_priv *priv,
248 struct iwl_tx_queue *txq);
249extern int iwl3945_hw_get_temperature(struct iwl_priv *priv);
250extern int iwl3945_hw_tx_queue_init(struct iwl_priv *priv,
251 struct iwl_tx_queue *txq);
252extern unsigned int iwl3945_hw_get_beacon_cmd(struct iwl_priv *priv,
253 struct iwl3945_frame *frame, u8 rate);
254void iwl3945_hw_build_tx_cmd_rate(struct iwl_priv *priv,
255 struct iwl_device_cmd *cmd,
256 struct ieee80211_tx_info *info,
257 struct ieee80211_hdr *hdr,
258 int sta_id, int tx_id);
259extern int iwl3945_hw_reg_send_txpower(struct iwl_priv *priv);
260extern int iwl3945_hw_reg_set_txpower(struct iwl_priv *priv, s8 power);
261extern void iwl3945_hw_rx_statistics(struct iwl_priv *priv,
262 struct iwl_rx_mem_buffer *rxb);
263void iwl3945_reply_statistics(struct iwl_priv *priv,
264 struct iwl_rx_mem_buffer *rxb);
265extern void iwl3945_disable_events(struct iwl_priv *priv);
266extern int iwl4965_get_temperature(const struct iwl_priv *priv);
267extern void iwl3945_post_associate(struct iwl_priv *priv);
268extern void iwl3945_config_ap(struct iwl_priv *priv);
269
270extern int iwl3945_commit_rxon(struct iwl_priv *priv,
271 struct iwl_rxon_context *ctx);
272
273/**
274 * iwl3945_hw_find_station - Find station id for a given BSSID
275 * @bssid: MAC address of station ID to find
276 *
277 * NOTE: This should not be hardware specific but the code has
278 * not yet been merged into a single common layer for managing the
279 * station tables.
280 */
281extern u8 iwl3945_hw_find_station(struct iwl_priv *priv, const u8 *bssid);
282
283extern struct ieee80211_ops iwl3945_hw_ops;
284
285/*
286 * Forward declare iwl-3945.c functions for iwl3945-base.c
287 */
288extern __le32 iwl3945_get_antenna_flags(const struct iwl_priv *priv);
289extern int iwl3945_init_hw_rate_table(struct iwl_priv *priv);
290extern void iwl3945_reg_txpower_periodic(struct iwl_priv *priv);
291extern int iwl3945_txpower_set_from_eeprom(struct iwl_priv *priv);
292
293extern const struct iwl_channel_info *iwl3945_get_channel_info(
294 const struct iwl_priv *priv, enum ieee80211_band band, u16 channel);
295
296extern int iwl3945_rs_next_rate(struct iwl_priv *priv, int rate);
297
298/* scanning */
299int iwl3945_request_scan(struct iwl_priv *priv, struct ieee80211_vif *vif);
300void iwl3945_post_scan(struct iwl_priv *priv);
301
302/* rates */
303extern const struct iwl3945_rate_info iwl3945_rates[IWL_RATE_COUNT_3945];
304
305/* Requires full declaration of iwl_priv before including */
306#include "iwl-io.h"
307
308#endif
diff --git a/drivers/net/wireless/iwlegacy/iwl-4965-calib.c b/drivers/net/wireless/iwlegacy/iwl-4965-calib.c
new file mode 100644
index 000000000000..81d6a25eb04f
--- /dev/null
+++ b/drivers/net/wireless/iwlegacy/iwl-4965-calib.c
@@ -0,0 +1,967 @@
1/******************************************************************************
2 *
3 * This file is provided under a dual BSD/GPLv2 license. When using or
4 * redistributing this file, you may do so under either license.
5 *
6 * GPL LICENSE SUMMARY
7 *
8 * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of version 2 of the GNU General Public License as
12 * published by the Free Software Foundation.
13 *
14 * This program is distributed in the hope that it will be useful, but
15 * WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
22 * USA
23 *
24 * The full GNU General Public License is included in this distribution
25 * in the file called LICENSE.GPL.
26 *
27 * Contact Information:
28 * Intel Linux Wireless <ilw@linux.intel.com>
29 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
30 *
31 * BSD LICENSE
32 *
33 * Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved.
34 * All rights reserved.
35 *
36 * Redistribution and use in source and binary forms, with or without
37 * modification, are permitted provided that the following conditions
38 * are met:
39 *
40 * * Redistributions of source code must retain the above copyright
41 * notice, this list of conditions and the following disclaimer.
42 * * Redistributions in binary form must reproduce the above copyright
43 * notice, this list of conditions and the following disclaimer in
44 * the documentation and/or other materials provided with the
45 * distribution.
46 * * Neither the name Intel Corporation nor the names of its
47 * contributors may be used to endorse or promote products derived
48 * from this software without specific prior written permission.
49 *
50 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
51 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
52 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
53 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
54 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
55 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
56 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
57 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
58 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
59 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
60 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
61 *****************************************************************************/
62
63#include <linux/slab.h>
64#include <net/mac80211.h>
65
66#include "iwl-dev.h"
67#include "iwl-core.h"
68#include "iwl-4965-calib.h"
69
70/*****************************************************************************
71 * INIT calibrations framework
72 *****************************************************************************/
73
74struct statistics_general_data {
75 u32 beacon_silence_rssi_a;
76 u32 beacon_silence_rssi_b;
77 u32 beacon_silence_rssi_c;
78 u32 beacon_energy_a;
79 u32 beacon_energy_b;
80 u32 beacon_energy_c;
81};
82
83void iwl4965_calib_free_results(struct iwl_priv *priv)
84{
85 int i;
86
87 for (i = 0; i < IWL_CALIB_MAX; i++) {
88 kfree(priv->calib_results[i].buf);
89 priv->calib_results[i].buf = NULL;
90 priv->calib_results[i].buf_len = 0;
91 }
92}
93
94/*****************************************************************************
95 * RUNTIME calibrations framework
96 *****************************************************************************/
97
98/* "false alarms" are signals that our DSP tries to lock onto,
99 * but then determines that they are either noise, or transmissions
100 * from a distant wireless network (also "noise", really) that get
101 * "stepped on" by stronger transmissions within our own network.
102 * This algorithm attempts to set a sensitivity level that is high
103 * enough to receive all of our own network traffic, but not so
104 * high that our DSP gets too busy trying to lock onto non-network
105 * activity/noise. */
106static int iwl4965_sens_energy_cck(struct iwl_priv *priv,
107 u32 norm_fa,
108 u32 rx_enable_time,
109 struct statistics_general_data *rx_info)
110{
111 u32 max_nrg_cck = 0;
112 int i = 0;
113 u8 max_silence_rssi = 0;
114 u32 silence_ref = 0;
115 u8 silence_rssi_a = 0;
116 u8 silence_rssi_b = 0;
117 u8 silence_rssi_c = 0;
118 u32 val;
119
120 /* "false_alarms" values below are cross-multiplications to assess the
121 * numbers of false alarms within the measured period of actual Rx
122 * (Rx is off when we're txing), vs the min/max expected false alarms
123 * (some should be expected if rx is sensitive enough) in a
124 * hypothetical listening period of 200 time units (TU), 204.8 msec:
125 *
126 * MIN_FA/fixed-time < false_alarms/actual-rx-time < MAX_FA/beacon-time
127 *
128 * */
129 u32 false_alarms = norm_fa * 200 * 1024;
130 u32 max_false_alarms = MAX_FA_CCK * rx_enable_time;
131 u32 min_false_alarms = MIN_FA_CCK * rx_enable_time;
132 struct iwl_sensitivity_data *data = NULL;
133 const struct iwl_sensitivity_ranges *ranges = priv->hw_params.sens;
134
135 data = &(priv->sensitivity_data);
136
137 data->nrg_auto_corr_silence_diff = 0;
138
139 /* Find max silence rssi among all 3 receivers.
140 * This is background noise, which may include transmissions from other
141 * networks, measured during silence before our network's beacon */
142 silence_rssi_a = (u8)((rx_info->beacon_silence_rssi_a &
143 ALL_BAND_FILTER) >> 8);
144 silence_rssi_b = (u8)((rx_info->beacon_silence_rssi_b &
145 ALL_BAND_FILTER) >> 8);
146 silence_rssi_c = (u8)((rx_info->beacon_silence_rssi_c &
147 ALL_BAND_FILTER) >> 8);
148
149 val = max(silence_rssi_b, silence_rssi_c);
150 max_silence_rssi = max(silence_rssi_a, (u8) val);
151
152 /* Store silence rssi in 20-beacon history table */
153 data->nrg_silence_rssi[data->nrg_silence_idx] = max_silence_rssi;
154 data->nrg_silence_idx++;
155 if (data->nrg_silence_idx >= NRG_NUM_PREV_STAT_L)
156 data->nrg_silence_idx = 0;
157
158 /* Find max silence rssi across 20 beacon history */
159 for (i = 0; i < NRG_NUM_PREV_STAT_L; i++) {
160 val = data->nrg_silence_rssi[i];
161 silence_ref = max(silence_ref, val);
162 }
163 IWL_DEBUG_CALIB(priv, "silence a %u, b %u, c %u, 20-bcn max %u\n",
164 silence_rssi_a, silence_rssi_b, silence_rssi_c,
165 silence_ref);
166
167 /* Find max rx energy (min value!) among all 3 receivers,
168 * measured during beacon frame.
169 * Save it in 10-beacon history table. */
170 i = data->nrg_energy_idx;
171 val = min(rx_info->beacon_energy_b, rx_info->beacon_energy_c);
172 data->nrg_value[i] = min(rx_info->beacon_energy_a, val);
173
174 data->nrg_energy_idx++;
175 if (data->nrg_energy_idx >= 10)
176 data->nrg_energy_idx = 0;
177
178 /* Find min rx energy (max value) across 10 beacon history.
179 * This is the minimum signal level that we want to receive well.
180 * Add backoff (margin so we don't miss slightly lower energy frames).
181 * This establishes an upper bound (min value) for energy threshold. */
182 max_nrg_cck = data->nrg_value[0];
183 for (i = 1; i < 10; i++)
184 max_nrg_cck = (u32) max(max_nrg_cck, (data->nrg_value[i]));
185 max_nrg_cck += 6;
186
187 IWL_DEBUG_CALIB(priv, "rx energy a %u, b %u, c %u, 10-bcn max/min %u\n",
188 rx_info->beacon_energy_a, rx_info->beacon_energy_b,
189 rx_info->beacon_energy_c, max_nrg_cck - 6);
190
191 /* Count number of consecutive beacons with fewer-than-desired
192 * false alarms. */
193 if (false_alarms < min_false_alarms)
194 data->num_in_cck_no_fa++;
195 else
196 data->num_in_cck_no_fa = 0;
197 IWL_DEBUG_CALIB(priv, "consecutive bcns with few false alarms = %u\n",
198 data->num_in_cck_no_fa);
199
200 /* If we got too many false alarms this time, reduce sensitivity */
201 if ((false_alarms > max_false_alarms) &&
202 (data->auto_corr_cck > AUTO_CORR_MAX_TH_CCK)) {
203 IWL_DEBUG_CALIB(priv, "norm FA %u > max FA %u\n",
204 false_alarms, max_false_alarms);
205 IWL_DEBUG_CALIB(priv, "... reducing sensitivity\n");
206 data->nrg_curr_state = IWL_FA_TOO_MANY;
207 /* Store for "fewer than desired" on later beacon */
208 data->nrg_silence_ref = silence_ref;
209
210 /* increase energy threshold (reduce nrg value)
211 * to decrease sensitivity */
212 data->nrg_th_cck = data->nrg_th_cck - NRG_STEP_CCK;
213 /* Else if we got fewer than desired, increase sensitivity */
214 } else if (false_alarms < min_false_alarms) {
215 data->nrg_curr_state = IWL_FA_TOO_FEW;
216
217 /* Compare silence level with silence level for most recent
218 * healthy number or too many false alarms */
219 data->nrg_auto_corr_silence_diff = (s32)data->nrg_silence_ref -
220 (s32)silence_ref;
221
222 IWL_DEBUG_CALIB(priv,
223 "norm FA %u < min FA %u, silence diff %d\n",
224 false_alarms, min_false_alarms,
225 data->nrg_auto_corr_silence_diff);
226
227 /* Increase value to increase sensitivity, but only if:
228 * 1a) previous beacon did *not* have *too many* false alarms
229 * 1b) AND there's a significant difference in Rx levels
230 * from a previous beacon with too many, or healthy # FAs
231 * OR 2) We've seen a lot of beacons (100) with too few
232 * false alarms */
233 if ((data->nrg_prev_state != IWL_FA_TOO_MANY) &&
234 ((data->nrg_auto_corr_silence_diff > NRG_DIFF) ||
235 (data->num_in_cck_no_fa > MAX_NUMBER_CCK_NO_FA))) {
236
237 IWL_DEBUG_CALIB(priv, "... increasing sensitivity\n");
238 /* Increase nrg value to increase sensitivity */
239 val = data->nrg_th_cck + NRG_STEP_CCK;
240 data->nrg_th_cck = min((u32)ranges->min_nrg_cck, val);
241 } else {
242 IWL_DEBUG_CALIB(priv,
243 "... but not changing sensitivity\n");
244 }
245
246 /* Else we got a healthy number of false alarms, keep status quo */
247 } else {
248 IWL_DEBUG_CALIB(priv, " FA in safe zone\n");
249 data->nrg_curr_state = IWL_FA_GOOD_RANGE;
250
251 /* Store for use in "fewer than desired" with later beacon */
252 data->nrg_silence_ref = silence_ref;
253
254 /* If previous beacon had too many false alarms,
255 * give it some extra margin by reducing sensitivity again
256 * (but don't go below measured energy of desired Rx) */
257 if (IWL_FA_TOO_MANY == data->nrg_prev_state) {
258 IWL_DEBUG_CALIB(priv, "... increasing margin\n");
259 if (data->nrg_th_cck > (max_nrg_cck + NRG_MARGIN))
260 data->nrg_th_cck -= NRG_MARGIN;
261 else
262 data->nrg_th_cck = max_nrg_cck;
263 }
264 }
265
266 /* Make sure the energy threshold does not go above the measured
267 * energy of the desired Rx signals (reduced by backoff margin),
268 * or else we might start missing Rx frames.
269 * Lower value is higher energy, so we use max()!
270 */
271 data->nrg_th_cck = max(max_nrg_cck, data->nrg_th_cck);
272 IWL_DEBUG_CALIB(priv, "new nrg_th_cck %u\n", data->nrg_th_cck);
273
274 data->nrg_prev_state = data->nrg_curr_state;
275
276 /* Auto-correlation CCK algorithm */
277 if (false_alarms > min_false_alarms) {
278
279 /* increase auto_corr values to decrease sensitivity
280 * so the DSP won't be disturbed by the noise
281 */
282 if (data->auto_corr_cck < AUTO_CORR_MAX_TH_CCK)
283 data->auto_corr_cck = AUTO_CORR_MAX_TH_CCK + 1;
284 else {
285 val = data->auto_corr_cck + AUTO_CORR_STEP_CCK;
286 data->auto_corr_cck =
287 min((u32)ranges->auto_corr_max_cck, val);
288 }
289 val = data->auto_corr_cck_mrc + AUTO_CORR_STEP_CCK;
290 data->auto_corr_cck_mrc =
291 min((u32)ranges->auto_corr_max_cck_mrc, val);
292 } else if ((false_alarms < min_false_alarms) &&
293 ((data->nrg_auto_corr_silence_diff > NRG_DIFF) ||
294 (data->num_in_cck_no_fa > MAX_NUMBER_CCK_NO_FA))) {
295
296 /* Decrease auto_corr values to increase sensitivity */
297 val = data->auto_corr_cck - AUTO_CORR_STEP_CCK;
298 data->auto_corr_cck =
299 max((u32)ranges->auto_corr_min_cck, val);
300 val = data->auto_corr_cck_mrc - AUTO_CORR_STEP_CCK;
301 data->auto_corr_cck_mrc =
302 max((u32)ranges->auto_corr_min_cck_mrc, val);
303 }
304
305 return 0;
306}
307
308
309static int iwl4965_sens_auto_corr_ofdm(struct iwl_priv *priv,
310 u32 norm_fa,
311 u32 rx_enable_time)
312{
313 u32 val;
314 u32 false_alarms = norm_fa * 200 * 1024;
315 u32 max_false_alarms = MAX_FA_OFDM * rx_enable_time;
316 u32 min_false_alarms = MIN_FA_OFDM * rx_enable_time;
317 struct iwl_sensitivity_data *data = NULL;
318 const struct iwl_sensitivity_ranges *ranges = priv->hw_params.sens;
319
320 data = &(priv->sensitivity_data);
321
322 /* If we got too many false alarms this time, reduce sensitivity */
323 if (false_alarms > max_false_alarms) {
324
325 IWL_DEBUG_CALIB(priv, "norm FA %u > max FA %u)\n",
326 false_alarms, max_false_alarms);
327
328 val = data->auto_corr_ofdm + AUTO_CORR_STEP_OFDM;
329 data->auto_corr_ofdm =
330 min((u32)ranges->auto_corr_max_ofdm, val);
331
332 val = data->auto_corr_ofdm_mrc + AUTO_CORR_STEP_OFDM;
333 data->auto_corr_ofdm_mrc =
334 min((u32)ranges->auto_corr_max_ofdm_mrc, val);
335
336 val = data->auto_corr_ofdm_x1 + AUTO_CORR_STEP_OFDM;
337 data->auto_corr_ofdm_x1 =
338 min((u32)ranges->auto_corr_max_ofdm_x1, val);
339
340 val = data->auto_corr_ofdm_mrc_x1 + AUTO_CORR_STEP_OFDM;
341 data->auto_corr_ofdm_mrc_x1 =
342 min((u32)ranges->auto_corr_max_ofdm_mrc_x1, val);
343 }
344
345 /* Else if we got fewer than desired, increase sensitivity */
346 else if (false_alarms < min_false_alarms) {
347
348 IWL_DEBUG_CALIB(priv, "norm FA %u < min FA %u\n",
349 false_alarms, min_false_alarms);
350
351 val = data->auto_corr_ofdm - AUTO_CORR_STEP_OFDM;
352 data->auto_corr_ofdm =
353 max((u32)ranges->auto_corr_min_ofdm, val);
354
355 val = data->auto_corr_ofdm_mrc - AUTO_CORR_STEP_OFDM;
356 data->auto_corr_ofdm_mrc =
357 max((u32)ranges->auto_corr_min_ofdm_mrc, val);
358
359 val = data->auto_corr_ofdm_x1 - AUTO_CORR_STEP_OFDM;
360 data->auto_corr_ofdm_x1 =
361 max((u32)ranges->auto_corr_min_ofdm_x1, val);
362
363 val = data->auto_corr_ofdm_mrc_x1 - AUTO_CORR_STEP_OFDM;
364 data->auto_corr_ofdm_mrc_x1 =
365 max((u32)ranges->auto_corr_min_ofdm_mrc_x1, val);
366 } else {
367 IWL_DEBUG_CALIB(priv, "min FA %u < norm FA %u < max FA %u OK\n",
368 min_false_alarms, false_alarms, max_false_alarms);
369 }
370 return 0;
371}
372
373static void iwl4965_prepare_legacy_sensitivity_tbl(struct iwl_priv *priv,
374 struct iwl_sensitivity_data *data,
375 __le16 *tbl)
376{
377 tbl[HD_AUTO_CORR32_X4_TH_ADD_MIN_INDEX] =
378 cpu_to_le16((u16)data->auto_corr_ofdm);
379 tbl[HD_AUTO_CORR32_X4_TH_ADD_MIN_MRC_INDEX] =
380 cpu_to_le16((u16)data->auto_corr_ofdm_mrc);
381 tbl[HD_AUTO_CORR32_X1_TH_ADD_MIN_INDEX] =
382 cpu_to_le16((u16)data->auto_corr_ofdm_x1);
383 tbl[HD_AUTO_CORR32_X1_TH_ADD_MIN_MRC_INDEX] =
384 cpu_to_le16((u16)data->auto_corr_ofdm_mrc_x1);
385
386 tbl[HD_AUTO_CORR40_X4_TH_ADD_MIN_INDEX] =
387 cpu_to_le16((u16)data->auto_corr_cck);
388 tbl[HD_AUTO_CORR40_X4_TH_ADD_MIN_MRC_INDEX] =
389 cpu_to_le16((u16)data->auto_corr_cck_mrc);
390
391 tbl[HD_MIN_ENERGY_CCK_DET_INDEX] =
392 cpu_to_le16((u16)data->nrg_th_cck);
393 tbl[HD_MIN_ENERGY_OFDM_DET_INDEX] =
394 cpu_to_le16((u16)data->nrg_th_ofdm);
395
396 tbl[HD_BARKER_CORR_TH_ADD_MIN_INDEX] =
397 cpu_to_le16(data->barker_corr_th_min);
398 tbl[HD_BARKER_CORR_TH_ADD_MIN_MRC_INDEX] =
399 cpu_to_le16(data->barker_corr_th_min_mrc);
400 tbl[HD_OFDM_ENERGY_TH_IN_INDEX] =
401 cpu_to_le16(data->nrg_th_cca);
402
403 IWL_DEBUG_CALIB(priv, "ofdm: ac %u mrc %u x1 %u mrc_x1 %u thresh %u\n",
404 data->auto_corr_ofdm, data->auto_corr_ofdm_mrc,
405 data->auto_corr_ofdm_x1, data->auto_corr_ofdm_mrc_x1,
406 data->nrg_th_ofdm);
407
408 IWL_DEBUG_CALIB(priv, "cck: ac %u mrc %u thresh %u\n",
409 data->auto_corr_cck, data->auto_corr_cck_mrc,
410 data->nrg_th_cck);
411}
412
413/* Prepare a SENSITIVITY_CMD, send to uCode if values have changed */
414static int iwl4965_sensitivity_write(struct iwl_priv *priv)
415{
416 struct iwl_sensitivity_cmd cmd;
417 struct iwl_sensitivity_data *data = NULL;
418 struct iwl_host_cmd cmd_out = {
419 .id = SENSITIVITY_CMD,
420 .len = sizeof(struct iwl_sensitivity_cmd),
421 .flags = CMD_ASYNC,
422 .data = &cmd,
423 };
424
425 data = &(priv->sensitivity_data);
426
427 memset(&cmd, 0, sizeof(cmd));
428
429 iwl4965_prepare_legacy_sensitivity_tbl(priv, data, &cmd.table[0]);
430
431 /* Update uCode's "work" table, and copy it to DSP */
432 cmd.control = SENSITIVITY_CMD_CONTROL_WORK_TABLE;
433
434 /* Don't send command to uCode if nothing has changed */
435 if (!memcmp(&cmd.table[0], &(priv->sensitivity_tbl[0]),
436 sizeof(u16)*HD_TABLE_SIZE)) {
437 IWL_DEBUG_CALIB(priv, "No change in SENSITIVITY_CMD\n");
438 return 0;
439 }
440
441 /* Copy table for comparison next time */
442 memcpy(&(priv->sensitivity_tbl[0]), &(cmd.table[0]),
443 sizeof(u16)*HD_TABLE_SIZE);
444
445 return iwl_legacy_send_cmd(priv, &cmd_out);
446}
447
448void iwl4965_init_sensitivity(struct iwl_priv *priv)
449{
450 int ret = 0;
451 int i;
452 struct iwl_sensitivity_data *data = NULL;
453 const struct iwl_sensitivity_ranges *ranges = priv->hw_params.sens;
454
455 if (priv->disable_sens_cal)
456 return;
457
458 IWL_DEBUG_CALIB(priv, "Start iwl4965_init_sensitivity\n");
459
460 /* Clear driver's sensitivity algo data */
461 data = &(priv->sensitivity_data);
462
463 if (ranges == NULL)
464 return;
465
466 memset(data, 0, sizeof(struct iwl_sensitivity_data));
467
468 data->num_in_cck_no_fa = 0;
469 data->nrg_curr_state = IWL_FA_TOO_MANY;
470 data->nrg_prev_state = IWL_FA_TOO_MANY;
471 data->nrg_silence_ref = 0;
472 data->nrg_silence_idx = 0;
473 data->nrg_energy_idx = 0;
474
475 for (i = 0; i < 10; i++)
476 data->nrg_value[i] = 0;
477
478 for (i = 0; i < NRG_NUM_PREV_STAT_L; i++)
479 data->nrg_silence_rssi[i] = 0;
480
481 data->auto_corr_ofdm = ranges->auto_corr_min_ofdm;
482 data->auto_corr_ofdm_mrc = ranges->auto_corr_min_ofdm_mrc;
483 data->auto_corr_ofdm_x1 = ranges->auto_corr_min_ofdm_x1;
484 data->auto_corr_ofdm_mrc_x1 = ranges->auto_corr_min_ofdm_mrc_x1;
485 data->auto_corr_cck = AUTO_CORR_CCK_MIN_VAL_DEF;
486 data->auto_corr_cck_mrc = ranges->auto_corr_min_cck_mrc;
487 data->nrg_th_cck = ranges->nrg_th_cck;
488 data->nrg_th_ofdm = ranges->nrg_th_ofdm;
489 data->barker_corr_th_min = ranges->barker_corr_th_min;
490 data->barker_corr_th_min_mrc = ranges->barker_corr_th_min_mrc;
491 data->nrg_th_cca = ranges->nrg_th_cca;
492
493 data->last_bad_plcp_cnt_ofdm = 0;
494 data->last_fa_cnt_ofdm = 0;
495 data->last_bad_plcp_cnt_cck = 0;
496 data->last_fa_cnt_cck = 0;
497
498 ret |= iwl4965_sensitivity_write(priv);
499 IWL_DEBUG_CALIB(priv, "<<return 0x%X\n", ret);
500}
501
502void iwl4965_sensitivity_calibration(struct iwl_priv *priv, void *resp)
503{
504 u32 rx_enable_time;
505 u32 fa_cck;
506 u32 fa_ofdm;
507 u32 bad_plcp_cck;
508 u32 bad_plcp_ofdm;
509 u32 norm_fa_ofdm;
510 u32 norm_fa_cck;
511 struct iwl_sensitivity_data *data = NULL;
512 struct statistics_rx_non_phy *rx_info;
513 struct statistics_rx_phy *ofdm, *cck;
514 unsigned long flags;
515 struct statistics_general_data statis;
516
517 if (priv->disable_sens_cal)
518 return;
519
520 data = &(priv->sensitivity_data);
521
522 if (!iwl_legacy_is_any_associated(priv)) {
523 IWL_DEBUG_CALIB(priv, "<< - not associated\n");
524 return;
525 }
526
527 spin_lock_irqsave(&priv->lock, flags);
528
529 rx_info = &(((struct iwl_notif_statistics *)resp)->rx.general);
530 ofdm = &(((struct iwl_notif_statistics *)resp)->rx.ofdm);
531 cck = &(((struct iwl_notif_statistics *)resp)->rx.cck);
532
533 if (rx_info->interference_data_flag != INTERFERENCE_DATA_AVAILABLE) {
534 IWL_DEBUG_CALIB(priv, "<< invalid data.\n");
535 spin_unlock_irqrestore(&priv->lock, flags);
536 return;
537 }
538
539 /* Extract Statistics: */
540 rx_enable_time = le32_to_cpu(rx_info->channel_load);
541 fa_cck = le32_to_cpu(cck->false_alarm_cnt);
542 fa_ofdm = le32_to_cpu(ofdm->false_alarm_cnt);
543 bad_plcp_cck = le32_to_cpu(cck->plcp_err);
544 bad_plcp_ofdm = le32_to_cpu(ofdm->plcp_err);
545
546 statis.beacon_silence_rssi_a =
547 le32_to_cpu(rx_info->beacon_silence_rssi_a);
548 statis.beacon_silence_rssi_b =
549 le32_to_cpu(rx_info->beacon_silence_rssi_b);
550 statis.beacon_silence_rssi_c =
551 le32_to_cpu(rx_info->beacon_silence_rssi_c);
552 statis.beacon_energy_a =
553 le32_to_cpu(rx_info->beacon_energy_a);
554 statis.beacon_energy_b =
555 le32_to_cpu(rx_info->beacon_energy_b);
556 statis.beacon_energy_c =
557 le32_to_cpu(rx_info->beacon_energy_c);
558
559 spin_unlock_irqrestore(&priv->lock, flags);
560
561 IWL_DEBUG_CALIB(priv, "rx_enable_time = %u usecs\n", rx_enable_time);
562
563 if (!rx_enable_time) {
564 IWL_DEBUG_CALIB(priv, "<< RX Enable Time == 0!\n");
565 return;
566 }
567
568 /* These statistics increase monotonically, and do not reset
569 * at each beacon. Calculate difference from last value, or just
570 * use the new statistics value if it has reset or wrapped around. */
571 if (data->last_bad_plcp_cnt_cck > bad_plcp_cck)
572 data->last_bad_plcp_cnt_cck = bad_plcp_cck;
573 else {
574 bad_plcp_cck -= data->last_bad_plcp_cnt_cck;
575 data->last_bad_plcp_cnt_cck += bad_plcp_cck;
576 }
577
578 if (data->last_bad_plcp_cnt_ofdm > bad_plcp_ofdm)
579 data->last_bad_plcp_cnt_ofdm = bad_plcp_ofdm;
580 else {
581 bad_plcp_ofdm -= data->last_bad_plcp_cnt_ofdm;
582 data->last_bad_plcp_cnt_ofdm += bad_plcp_ofdm;
583 }
584
585 if (data->last_fa_cnt_ofdm > fa_ofdm)
586 data->last_fa_cnt_ofdm = fa_ofdm;
587 else {
588 fa_ofdm -= data->last_fa_cnt_ofdm;
589 data->last_fa_cnt_ofdm += fa_ofdm;
590 }
591
592 if (data->last_fa_cnt_cck > fa_cck)
593 data->last_fa_cnt_cck = fa_cck;
594 else {
595 fa_cck -= data->last_fa_cnt_cck;
596 data->last_fa_cnt_cck += fa_cck;
597 }
598
599 /* Total aborted signal locks */
600 norm_fa_ofdm = fa_ofdm + bad_plcp_ofdm;
601 norm_fa_cck = fa_cck + bad_plcp_cck;
602
603 IWL_DEBUG_CALIB(priv,
604 "cck: fa %u badp %u ofdm: fa %u badp %u\n", fa_cck,
605 bad_plcp_cck, fa_ofdm, bad_plcp_ofdm);
606
607 iwl4965_sens_auto_corr_ofdm(priv, norm_fa_ofdm, rx_enable_time);
608 iwl4965_sens_energy_cck(priv, norm_fa_cck, rx_enable_time, &statis);
609
610 iwl4965_sensitivity_write(priv);
611}
612
613static inline u8 iwl4965_find_first_chain(u8 mask)
614{
615 if (mask & ANT_A)
616 return CHAIN_A;
617 if (mask & ANT_B)
618 return CHAIN_B;
619 return CHAIN_C;
620}
621
622/**
623 * Run disconnected antenna algorithm to find out which antennas are
624 * disconnected.
625 */
626static void
627iwl4965_find_disconn_antenna(struct iwl_priv *priv, u32* average_sig,
628 struct iwl_chain_noise_data *data)
629{
630 u32 active_chains = 0;
631 u32 max_average_sig;
632 u16 max_average_sig_antenna_i;
633 u8 num_tx_chains;
634 u8 first_chain;
635 u16 i = 0;
636
637 average_sig[0] = data->chain_signal_a /
638 priv->cfg->base_params->chain_noise_num_beacons;
639 average_sig[1] = data->chain_signal_b /
640 priv->cfg->base_params->chain_noise_num_beacons;
641 average_sig[2] = data->chain_signal_c /
642 priv->cfg->base_params->chain_noise_num_beacons;
643
644 if (average_sig[0] >= average_sig[1]) {
645 max_average_sig = average_sig[0];
646 max_average_sig_antenna_i = 0;
647 active_chains = (1 << max_average_sig_antenna_i);
648 } else {
649 max_average_sig = average_sig[1];
650 max_average_sig_antenna_i = 1;
651 active_chains = (1 << max_average_sig_antenna_i);
652 }
653
654 if (average_sig[2] >= max_average_sig) {
655 max_average_sig = average_sig[2];
656 max_average_sig_antenna_i = 2;
657 active_chains = (1 << max_average_sig_antenna_i);
658 }
659
660 IWL_DEBUG_CALIB(priv, "average_sig: a %d b %d c %d\n",
661 average_sig[0], average_sig[1], average_sig[2]);
662 IWL_DEBUG_CALIB(priv, "max_average_sig = %d, antenna %d\n",
663 max_average_sig, max_average_sig_antenna_i);
664
665 /* Compare signal strengths for all 3 receivers. */
666 for (i = 0; i < NUM_RX_CHAINS; i++) {
667 if (i != max_average_sig_antenna_i) {
668 s32 rssi_delta = (max_average_sig - average_sig[i]);
669
670 /* If signal is very weak, compared with
671 * strongest, mark it as disconnected. */
672 if (rssi_delta > MAXIMUM_ALLOWED_PATHLOSS)
673 data->disconn_array[i] = 1;
674 else
675 active_chains |= (1 << i);
676 IWL_DEBUG_CALIB(priv, "i = %d rssiDelta = %d "
677 "disconn_array[i] = %d\n",
678 i, rssi_delta, data->disconn_array[i]);
679 }
680 }
681
682 /*
683 * The above algorithm sometimes fails when the ucode
684 * reports 0 for all chains. It's not clear why that
685 * happens to start with, but it is then causing trouble
686 * because this can make us enable more chains than the
687 * hardware really has.
688 *
689 * To be safe, simply mask out any chains that we know
690 * are not on the device.
691 */
692 active_chains &= priv->hw_params.valid_rx_ant;
693
694 num_tx_chains = 0;
695 for (i = 0; i < NUM_RX_CHAINS; i++) {
696 /* loops on all the bits of
697 * priv->hw_setting.valid_tx_ant */
698 u8 ant_msk = (1 << i);
699 if (!(priv->hw_params.valid_tx_ant & ant_msk))
700 continue;
701
702 num_tx_chains++;
703 if (data->disconn_array[i] == 0)
704 /* there is a Tx antenna connected */
705 break;
706 if (num_tx_chains == priv->hw_params.tx_chains_num &&
707 data->disconn_array[i]) {
708 /*
709 * If all chains are disconnected
710 * connect the first valid tx chain
711 */
712 first_chain =
713 iwl4965_find_first_chain(priv->cfg->valid_tx_ant);
714 data->disconn_array[first_chain] = 0;
715 active_chains |= BIT(first_chain);
716 IWL_DEBUG_CALIB(priv, "All Tx chains are disconnected \
717 W/A - declare %d as connected\n",
718 first_chain);
719 break;
720 }
721 }
722
723 if (active_chains != priv->hw_params.valid_rx_ant &&
724 active_chains != priv->chain_noise_data.active_chains)
725 IWL_DEBUG_CALIB(priv,
726 "Detected that not all antennas are connected! "
727 "Connected: %#x, valid: %#x.\n",
728 active_chains, priv->hw_params.valid_rx_ant);
729
730 /* Save for use within RXON, TX, SCAN commands, etc. */
731 data->active_chains = active_chains;
732 IWL_DEBUG_CALIB(priv, "active_chains (bitwise) = 0x%x\n",
733 active_chains);
734}
735
736static void iwl4965_gain_computation(struct iwl_priv *priv,
737 u32 *average_noise,
738 u16 min_average_noise_antenna_i,
739 u32 min_average_noise,
740 u8 default_chain)
741{
742 int i, ret;
743 struct iwl_chain_noise_data *data = &priv->chain_noise_data;
744
745 data->delta_gain_code[min_average_noise_antenna_i] = 0;
746
747 for (i = default_chain; i < NUM_RX_CHAINS; i++) {
748 s32 delta_g = 0;
749
750 if (!(data->disconn_array[i]) &&
751 (data->delta_gain_code[i] ==
752 CHAIN_NOISE_DELTA_GAIN_INIT_VAL)) {
753 delta_g = average_noise[i] - min_average_noise;
754 data->delta_gain_code[i] = (u8)((delta_g * 10) / 15);
755 data->delta_gain_code[i] =
756 min(data->delta_gain_code[i],
757 (u8) CHAIN_NOISE_MAX_DELTA_GAIN_CODE);
758
759 data->delta_gain_code[i] =
760 (data->delta_gain_code[i] | (1 << 2));
761 } else {
762 data->delta_gain_code[i] = 0;
763 }
764 }
765 IWL_DEBUG_CALIB(priv, "delta_gain_codes: a %d b %d c %d\n",
766 data->delta_gain_code[0],
767 data->delta_gain_code[1],
768 data->delta_gain_code[2]);
769
770 /* Differential gain gets sent to uCode only once */
771 if (!data->radio_write) {
772 struct iwl_calib_diff_gain_cmd cmd;
773 data->radio_write = 1;
774
775 memset(&cmd, 0, sizeof(cmd));
776 cmd.hdr.op_code = IWL_PHY_CALIBRATE_DIFF_GAIN_CMD;
777 cmd.diff_gain_a = data->delta_gain_code[0];
778 cmd.diff_gain_b = data->delta_gain_code[1];
779 cmd.diff_gain_c = data->delta_gain_code[2];
780 ret = iwl_legacy_send_cmd_pdu(priv, REPLY_PHY_CALIBRATION_CMD,
781 sizeof(cmd), &cmd);
782 if (ret)
783 IWL_DEBUG_CALIB(priv, "fail sending cmd "
784 "REPLY_PHY_CALIBRATION_CMD\n");
785
786 /* TODO we might want recalculate
787 * rx_chain in rxon cmd */
788
789 /* Mark so we run this algo only once! */
790 data->state = IWL_CHAIN_NOISE_CALIBRATED;
791 }
792}
793
794
795
796/*
797 * Accumulate 16 beacons of signal and noise statistics for each of
798 * 3 receivers/antennas/rx-chains, then figure out:
799 * 1) Which antennas are connected.
800 * 2) Differential rx gain settings to balance the 3 receivers.
801 */
802void iwl4965_chain_noise_calibration(struct iwl_priv *priv, void *stat_resp)
803{
804 struct iwl_chain_noise_data *data = NULL;
805
806 u32 chain_noise_a;
807 u32 chain_noise_b;
808 u32 chain_noise_c;
809 u32 chain_sig_a;
810 u32 chain_sig_b;
811 u32 chain_sig_c;
812 u32 average_sig[NUM_RX_CHAINS] = {INITIALIZATION_VALUE};
813 u32 average_noise[NUM_RX_CHAINS] = {INITIALIZATION_VALUE};
814 u32 min_average_noise = MIN_AVERAGE_NOISE_MAX_VALUE;
815 u16 min_average_noise_antenna_i = INITIALIZATION_VALUE;
816 u16 i = 0;
817 u16 rxon_chnum = INITIALIZATION_VALUE;
818 u16 stat_chnum = INITIALIZATION_VALUE;
819 u8 rxon_band24;
820 u8 stat_band24;
821 unsigned long flags;
822 struct statistics_rx_non_phy *rx_info;
823
824 struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
825
826 if (priv->disable_chain_noise_cal)
827 return;
828
829 data = &(priv->chain_noise_data);
830
831 /*
832 * Accumulate just the first "chain_noise_num_beacons" after
833 * the first association, then we're done forever.
834 */
835 if (data->state != IWL_CHAIN_NOISE_ACCUMULATE) {
836 if (data->state == IWL_CHAIN_NOISE_ALIVE)
837 IWL_DEBUG_CALIB(priv, "Wait for noise calib reset\n");
838 return;
839 }
840
841 spin_lock_irqsave(&priv->lock, flags);
842
843 rx_info = &(((struct iwl_notif_statistics *)stat_resp)->
844 rx.general);
845
846 if (rx_info->interference_data_flag != INTERFERENCE_DATA_AVAILABLE) {
847 IWL_DEBUG_CALIB(priv, " << Interference data unavailable\n");
848 spin_unlock_irqrestore(&priv->lock, flags);
849 return;
850 }
851
852 rxon_band24 = !!(ctx->staging.flags & RXON_FLG_BAND_24G_MSK);
853 rxon_chnum = le16_to_cpu(ctx->staging.channel);
854
855 stat_band24 = !!(((struct iwl_notif_statistics *)
856 stat_resp)->flag &
857 STATISTICS_REPLY_FLG_BAND_24G_MSK);
858 stat_chnum = le32_to_cpu(((struct iwl_notif_statistics *)
859 stat_resp)->flag) >> 16;
860
861 /* Make sure we accumulate data for just the associated channel
862 * (even if scanning). */
863 if ((rxon_chnum != stat_chnum) || (rxon_band24 != stat_band24)) {
864 IWL_DEBUG_CALIB(priv, "Stats not from chan=%d, band24=%d\n",
865 rxon_chnum, rxon_band24);
866 spin_unlock_irqrestore(&priv->lock, flags);
867 return;
868 }
869
870 /*
871 * Accumulate beacon statistics values across
872 * "chain_noise_num_beacons"
873 */
874 chain_noise_a = le32_to_cpu(rx_info->beacon_silence_rssi_a) &
875 IN_BAND_FILTER;
876 chain_noise_b = le32_to_cpu(rx_info->beacon_silence_rssi_b) &
877 IN_BAND_FILTER;
878 chain_noise_c = le32_to_cpu(rx_info->beacon_silence_rssi_c) &
879 IN_BAND_FILTER;
880
881 chain_sig_a = le32_to_cpu(rx_info->beacon_rssi_a) & IN_BAND_FILTER;
882 chain_sig_b = le32_to_cpu(rx_info->beacon_rssi_b) & IN_BAND_FILTER;
883 chain_sig_c = le32_to_cpu(rx_info->beacon_rssi_c) & IN_BAND_FILTER;
884
885 spin_unlock_irqrestore(&priv->lock, flags);
886
887 data->beacon_count++;
888
889 data->chain_noise_a = (chain_noise_a + data->chain_noise_a);
890 data->chain_noise_b = (chain_noise_b + data->chain_noise_b);
891 data->chain_noise_c = (chain_noise_c + data->chain_noise_c);
892
893 data->chain_signal_a = (chain_sig_a + data->chain_signal_a);
894 data->chain_signal_b = (chain_sig_b + data->chain_signal_b);
895 data->chain_signal_c = (chain_sig_c + data->chain_signal_c);
896
897 IWL_DEBUG_CALIB(priv, "chan=%d, band24=%d, beacon=%d\n",
898 rxon_chnum, rxon_band24, data->beacon_count);
899 IWL_DEBUG_CALIB(priv, "chain_sig: a %d b %d c %d\n",
900 chain_sig_a, chain_sig_b, chain_sig_c);
901 IWL_DEBUG_CALIB(priv, "chain_noise: a %d b %d c %d\n",
902 chain_noise_a, chain_noise_b, chain_noise_c);
903
904 /* If this is the "chain_noise_num_beacons", determine:
905 * 1) Disconnected antennas (using signal strengths)
906 * 2) Differential gain (using silence noise) to balance receivers */
907 if (data->beacon_count !=
908 priv->cfg->base_params->chain_noise_num_beacons)
909 return;
910
911 /* Analyze signal for disconnected antenna */
912 iwl4965_find_disconn_antenna(priv, average_sig, data);
913
914 /* Analyze noise for rx balance */
915 average_noise[0] = data->chain_noise_a /
916 priv->cfg->base_params->chain_noise_num_beacons;
917 average_noise[1] = data->chain_noise_b /
918 priv->cfg->base_params->chain_noise_num_beacons;
919 average_noise[2] = data->chain_noise_c /
920 priv->cfg->base_params->chain_noise_num_beacons;
921
922 for (i = 0; i < NUM_RX_CHAINS; i++) {
923 if (!(data->disconn_array[i]) &&
924 (average_noise[i] <= min_average_noise)) {
925 /* This means that chain i is active and has
926 * lower noise values so far: */
927 min_average_noise = average_noise[i];
928 min_average_noise_antenna_i = i;
929 }
930 }
931
932 IWL_DEBUG_CALIB(priv, "average_noise: a %d b %d c %d\n",
933 average_noise[0], average_noise[1],
934 average_noise[2]);
935
936 IWL_DEBUG_CALIB(priv, "min_average_noise = %d, antenna %d\n",
937 min_average_noise, min_average_noise_antenna_i);
938
939 iwl4965_gain_computation(priv, average_noise,
940 min_average_noise_antenna_i, min_average_noise,
941 iwl4965_find_first_chain(priv->cfg->valid_rx_ant));
942
943 /* Some power changes may have been made during the calibration.
944 * Update and commit the RXON
945 */
946 if (priv->cfg->ops->lib->update_chain_flags)
947 priv->cfg->ops->lib->update_chain_flags(priv);
948
949 data->state = IWL_CHAIN_NOISE_DONE;
950 iwl_legacy_power_update_mode(priv, false);
951}
952
953void iwl4965_reset_run_time_calib(struct iwl_priv *priv)
954{
955 int i;
956 memset(&(priv->sensitivity_data), 0,
957 sizeof(struct iwl_sensitivity_data));
958 memset(&(priv->chain_noise_data), 0,
959 sizeof(struct iwl_chain_noise_data));
960 for (i = 0; i < NUM_RX_CHAINS; i++)
961 priv->chain_noise_data.delta_gain_code[i] =
962 CHAIN_NOISE_DELTA_GAIN_INIT_VAL;
963
964 /* Ask for statistics now, the uCode will send notification
965 * periodically after association */
966 iwl_legacy_send_statistics_request(priv, CMD_ASYNC, true);
967}
diff --git a/drivers/net/wireless/iwlegacy/iwl-4965-calib.h b/drivers/net/wireless/iwlegacy/iwl-4965-calib.h
new file mode 100644
index 000000000000..f46c80e6e005
--- /dev/null
+++ b/drivers/net/wireless/iwlegacy/iwl-4965-calib.h
@@ -0,0 +1,75 @@
1/******************************************************************************
2 *
3 * This file is provided under a dual BSD/GPLv2 license. When using or
4 * redistributing this file, you may do so under either license.
5 *
6 * GPL LICENSE SUMMARY
7 *
8 * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of version 2 of the GNU General Public License as
12 * published by the Free Software Foundation.
13 *
14 * This program is distributed in the hope that it will be useful, but
15 * WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
22 * USA
23 *
24 * The full GNU General Public License is included in this distribution
25 * in the file called LICENSE.GPL.
26 *
27 * Contact Information:
28 * Intel Linux Wireless <ilw@linux.intel.com>
29 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
30 *
31 * BSD LICENSE
32 *
33 * Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved.
34 * All rights reserved.
35 *
36 * Redistribution and use in source and binary forms, with or without
37 * modification, are permitted provided that the following conditions
38 * are met:
39 *
40 * * Redistributions of source code must retain the above copyright
41 * notice, this list of conditions and the following disclaimer.
42 * * Redistributions in binary form must reproduce the above copyright
43 * notice, this list of conditions and the following disclaimer in
44 * the documentation and/or other materials provided with the
45 * distribution.
46 * * Neither the name Intel Corporation nor the names of its
47 * contributors may be used to endorse or promote products derived
48 * from this software without specific prior written permission.
49 *
50 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
51 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
52 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
53 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
54 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
55 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
56 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
57 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
58 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
59 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
60 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
61 *****************************************************************************/
62#ifndef __iwl_4965_calib_h__
63#define __iwl_4965_calib_h__
64
65#include "iwl-dev.h"
66#include "iwl-core.h"
67#include "iwl-commands.h"
68
69void iwl4965_chain_noise_calibration(struct iwl_priv *priv, void *stat_resp);
70void iwl4965_sensitivity_calibration(struct iwl_priv *priv, void *resp);
71void iwl4965_init_sensitivity(struct iwl_priv *priv);
72void iwl4965_reset_run_time_calib(struct iwl_priv *priv);
73void iwl4965_calib_free_results(struct iwl_priv *priv);
74
75#endif /* __iwl_4965_calib_h__ */
diff --git a/drivers/net/wireless/iwlegacy/iwl-4965-debugfs.c b/drivers/net/wireless/iwlegacy/iwl-4965-debugfs.c
new file mode 100644
index 000000000000..1c93665766e4
--- /dev/null
+++ b/drivers/net/wireless/iwlegacy/iwl-4965-debugfs.c
@@ -0,0 +1,774 @@
1/******************************************************************************
2*
3* GPL LICENSE SUMMARY
4*
5* Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
6*
7* This program is free software; you can redistribute it and/or modify
8* it under the terms of version 2 of the GNU General Public License as
9* published by the Free Software Foundation.
10*
11* This program is distributed in the hope that it will be useful, but
12* WITHOUT ANY WARRANTY; without even the implied warranty of
13* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14* General Public License for more details.
15*
16* You should have received a copy of the GNU General Public License
17* along with this program; if not, write to the Free Software
18* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
19* USA
20*
21* The full GNU General Public License is included in this distribution
22* in the file called LICENSE.GPL.
23*
24* Contact Information:
25* Intel Linux Wireless <ilw@linux.intel.com>
26* Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
27*****************************************************************************/
28#include "iwl-4965.h"
29#include "iwl-4965-debugfs.h"
30
31static const char *fmt_value = " %-30s %10u\n";
32static const char *fmt_table = " %-30s %10u %10u %10u %10u\n";
33static const char *fmt_header =
34 "%-32s current cumulative delta max\n";
35
36static int iwl4965_statistics_flag(struct iwl_priv *priv, char *buf, int bufsz)
37{
38 int p = 0;
39 u32 flag;
40
41 flag = le32_to_cpu(priv->_4965.statistics.flag);
42
43 p += scnprintf(buf + p, bufsz - p, "Statistics Flag(0x%X):\n", flag);
44 if (flag & UCODE_STATISTICS_CLEAR_MSK)
45 p += scnprintf(buf + p, bufsz - p,
46 "\tStatistics have been cleared\n");
47 p += scnprintf(buf + p, bufsz - p, "\tOperational Frequency: %s\n",
48 (flag & UCODE_STATISTICS_FREQUENCY_MSK)
49 ? "2.4 GHz" : "5.2 GHz");
50 p += scnprintf(buf + p, bufsz - p, "\tTGj Narrow Band: %s\n",
51 (flag & UCODE_STATISTICS_NARROW_BAND_MSK)
52 ? "enabled" : "disabled");
53
54 return p;
55}
56
57ssize_t iwl4965_ucode_rx_stats_read(struct file *file, char __user *user_buf,
58 size_t count, loff_t *ppos)
59{
60 struct iwl_priv *priv = file->private_data;
61 int pos = 0;
62 char *buf;
63 int bufsz = sizeof(struct statistics_rx_phy) * 40 +
64 sizeof(struct statistics_rx_non_phy) * 40 +
65 sizeof(struct statistics_rx_ht_phy) * 40 + 400;
66 ssize_t ret;
67 struct statistics_rx_phy *ofdm, *accum_ofdm, *delta_ofdm, *max_ofdm;
68 struct statistics_rx_phy *cck, *accum_cck, *delta_cck, *max_cck;
69 struct statistics_rx_non_phy *general, *accum_general;
70 struct statistics_rx_non_phy *delta_general, *max_general;
71 struct statistics_rx_ht_phy *ht, *accum_ht, *delta_ht, *max_ht;
72
73 if (!iwl_legacy_is_alive(priv))
74 return -EAGAIN;
75
76 buf = kzalloc(bufsz, GFP_KERNEL);
77 if (!buf) {
78 IWL_ERR(priv, "Can not allocate Buffer\n");
79 return -ENOMEM;
80 }
81
82 /*
83 * the statistic information display here is based on
84 * the last statistics notification from uCode
85 * might not reflect the current uCode activity
86 */
87 ofdm = &priv->_4965.statistics.rx.ofdm;
88 cck = &priv->_4965.statistics.rx.cck;
89 general = &priv->_4965.statistics.rx.general;
90 ht = &priv->_4965.statistics.rx.ofdm_ht;
91 accum_ofdm = &priv->_4965.accum_statistics.rx.ofdm;
92 accum_cck = &priv->_4965.accum_statistics.rx.cck;
93 accum_general = &priv->_4965.accum_statistics.rx.general;
94 accum_ht = &priv->_4965.accum_statistics.rx.ofdm_ht;
95 delta_ofdm = &priv->_4965.delta_statistics.rx.ofdm;
96 delta_cck = &priv->_4965.delta_statistics.rx.cck;
97 delta_general = &priv->_4965.delta_statistics.rx.general;
98 delta_ht = &priv->_4965.delta_statistics.rx.ofdm_ht;
99 max_ofdm = &priv->_4965.max_delta.rx.ofdm;
100 max_cck = &priv->_4965.max_delta.rx.cck;
101 max_general = &priv->_4965.max_delta.rx.general;
102 max_ht = &priv->_4965.max_delta.rx.ofdm_ht;
103
104 pos += iwl4965_statistics_flag(priv, buf, bufsz);
105 pos += scnprintf(buf + pos, bufsz - pos,
106 fmt_header, "Statistics_Rx - OFDM:");
107 pos += scnprintf(buf + pos, bufsz - pos,
108 fmt_table, "ina_cnt:",
109 le32_to_cpu(ofdm->ina_cnt),
110 accum_ofdm->ina_cnt,
111 delta_ofdm->ina_cnt, max_ofdm->ina_cnt);
112 pos += scnprintf(buf + pos, bufsz - pos,
113 fmt_table, "fina_cnt:",
114 le32_to_cpu(ofdm->fina_cnt), accum_ofdm->fina_cnt,
115 delta_ofdm->fina_cnt, max_ofdm->fina_cnt);
116 pos += scnprintf(buf + pos, bufsz - pos,
117 fmt_table, "plcp_err:",
118 le32_to_cpu(ofdm->plcp_err), accum_ofdm->plcp_err,
119 delta_ofdm->plcp_err, max_ofdm->plcp_err);
120 pos += scnprintf(buf + pos, bufsz - pos,
121 fmt_table, "crc32_err:",
122 le32_to_cpu(ofdm->crc32_err), accum_ofdm->crc32_err,
123 delta_ofdm->crc32_err, max_ofdm->crc32_err);
124 pos += scnprintf(buf + pos, bufsz - pos,
125 fmt_table, "overrun_err:",
126 le32_to_cpu(ofdm->overrun_err),
127 accum_ofdm->overrun_err, delta_ofdm->overrun_err,
128 max_ofdm->overrun_err);
129 pos += scnprintf(buf + pos, bufsz - pos,
130 fmt_table, "early_overrun_err:",
131 le32_to_cpu(ofdm->early_overrun_err),
132 accum_ofdm->early_overrun_err,
133 delta_ofdm->early_overrun_err,
134 max_ofdm->early_overrun_err);
135 pos += scnprintf(buf + pos, bufsz - pos,
136 fmt_table, "crc32_good:",
137 le32_to_cpu(ofdm->crc32_good),
138 accum_ofdm->crc32_good, delta_ofdm->crc32_good,
139 max_ofdm->crc32_good);
140 pos += scnprintf(buf + pos, bufsz - pos,
141 fmt_table, "false_alarm_cnt:",
142 le32_to_cpu(ofdm->false_alarm_cnt),
143 accum_ofdm->false_alarm_cnt,
144 delta_ofdm->false_alarm_cnt,
145 max_ofdm->false_alarm_cnt);
146 pos += scnprintf(buf + pos, bufsz - pos,
147 fmt_table, "fina_sync_err_cnt:",
148 le32_to_cpu(ofdm->fina_sync_err_cnt),
149 accum_ofdm->fina_sync_err_cnt,
150 delta_ofdm->fina_sync_err_cnt,
151 max_ofdm->fina_sync_err_cnt);
152 pos += scnprintf(buf + pos, bufsz - pos,
153 fmt_table, "sfd_timeout:",
154 le32_to_cpu(ofdm->sfd_timeout),
155 accum_ofdm->sfd_timeout, delta_ofdm->sfd_timeout,
156 max_ofdm->sfd_timeout);
157 pos += scnprintf(buf + pos, bufsz - pos,
158 fmt_table, "fina_timeout:",
159 le32_to_cpu(ofdm->fina_timeout),
160 accum_ofdm->fina_timeout, delta_ofdm->fina_timeout,
161 max_ofdm->fina_timeout);
162 pos += scnprintf(buf + pos, bufsz - pos,
163 fmt_table, "unresponded_rts:",
164 le32_to_cpu(ofdm->unresponded_rts),
165 accum_ofdm->unresponded_rts,
166 delta_ofdm->unresponded_rts,
167 max_ofdm->unresponded_rts);
168 pos += scnprintf(buf + pos, bufsz - pos,
169 fmt_table, "rxe_frame_lmt_ovrun:",
170 le32_to_cpu(ofdm->rxe_frame_limit_overrun),
171 accum_ofdm->rxe_frame_limit_overrun,
172 delta_ofdm->rxe_frame_limit_overrun,
173 max_ofdm->rxe_frame_limit_overrun);
174 pos += scnprintf(buf + pos, bufsz - pos,
175 fmt_table, "sent_ack_cnt:",
176 le32_to_cpu(ofdm->sent_ack_cnt),
177 accum_ofdm->sent_ack_cnt, delta_ofdm->sent_ack_cnt,
178 max_ofdm->sent_ack_cnt);
179 pos += scnprintf(buf + pos, bufsz - pos,
180 fmt_table, "sent_cts_cnt:",
181 le32_to_cpu(ofdm->sent_cts_cnt),
182 accum_ofdm->sent_cts_cnt, delta_ofdm->sent_cts_cnt,
183 max_ofdm->sent_cts_cnt);
184 pos += scnprintf(buf + pos, bufsz - pos,
185 fmt_table, "sent_ba_rsp_cnt:",
186 le32_to_cpu(ofdm->sent_ba_rsp_cnt),
187 accum_ofdm->sent_ba_rsp_cnt,
188 delta_ofdm->sent_ba_rsp_cnt,
189 max_ofdm->sent_ba_rsp_cnt);
190 pos += scnprintf(buf + pos, bufsz - pos,
191 fmt_table, "dsp_self_kill:",
192 le32_to_cpu(ofdm->dsp_self_kill),
193 accum_ofdm->dsp_self_kill,
194 delta_ofdm->dsp_self_kill,
195 max_ofdm->dsp_self_kill);
196 pos += scnprintf(buf + pos, bufsz - pos,
197 fmt_table, "mh_format_err:",
198 le32_to_cpu(ofdm->mh_format_err),
199 accum_ofdm->mh_format_err,
200 delta_ofdm->mh_format_err,
201 max_ofdm->mh_format_err);
202 pos += scnprintf(buf + pos, bufsz - pos,
203 fmt_table, "re_acq_main_rssi_sum:",
204 le32_to_cpu(ofdm->re_acq_main_rssi_sum),
205 accum_ofdm->re_acq_main_rssi_sum,
206 delta_ofdm->re_acq_main_rssi_sum,
207 max_ofdm->re_acq_main_rssi_sum);
208
209 pos += scnprintf(buf + pos, bufsz - pos,
210 fmt_header, "Statistics_Rx - CCK:");
211 pos += scnprintf(buf + pos, bufsz - pos,
212 fmt_table, "ina_cnt:",
213 le32_to_cpu(cck->ina_cnt), accum_cck->ina_cnt,
214 delta_cck->ina_cnt, max_cck->ina_cnt);
215 pos += scnprintf(buf + pos, bufsz - pos,
216 fmt_table, "fina_cnt:",
217 le32_to_cpu(cck->fina_cnt), accum_cck->fina_cnt,
218 delta_cck->fina_cnt, max_cck->fina_cnt);
219 pos += scnprintf(buf + pos, bufsz - pos,
220 fmt_table, "plcp_err:",
221 le32_to_cpu(cck->plcp_err), accum_cck->plcp_err,
222 delta_cck->plcp_err, max_cck->plcp_err);
223 pos += scnprintf(buf + pos, bufsz - pos,
224 fmt_table, "crc32_err:",
225 le32_to_cpu(cck->crc32_err), accum_cck->crc32_err,
226 delta_cck->crc32_err, max_cck->crc32_err);
227 pos += scnprintf(buf + pos, bufsz - pos,
228 fmt_table, "overrun_err:",
229 le32_to_cpu(cck->overrun_err),
230 accum_cck->overrun_err, delta_cck->overrun_err,
231 max_cck->overrun_err);
232 pos += scnprintf(buf + pos, bufsz - pos,
233 fmt_table, "early_overrun_err:",
234 le32_to_cpu(cck->early_overrun_err),
235 accum_cck->early_overrun_err,
236 delta_cck->early_overrun_err,
237 max_cck->early_overrun_err);
238 pos += scnprintf(buf + pos, bufsz - pos,
239 fmt_table, "crc32_good:",
240 le32_to_cpu(cck->crc32_good), accum_cck->crc32_good,
241 delta_cck->crc32_good, max_cck->crc32_good);
242 pos += scnprintf(buf + pos, bufsz - pos,
243 fmt_table, "false_alarm_cnt:",
244 le32_to_cpu(cck->false_alarm_cnt),
245 accum_cck->false_alarm_cnt,
246 delta_cck->false_alarm_cnt, max_cck->false_alarm_cnt);
247 pos += scnprintf(buf + pos, bufsz - pos,
248 fmt_table, "fina_sync_err_cnt:",
249 le32_to_cpu(cck->fina_sync_err_cnt),
250 accum_cck->fina_sync_err_cnt,
251 delta_cck->fina_sync_err_cnt,
252 max_cck->fina_sync_err_cnt);
253 pos += scnprintf(buf + pos, bufsz - pos,
254 fmt_table, "sfd_timeout:",
255 le32_to_cpu(cck->sfd_timeout),
256 accum_cck->sfd_timeout, delta_cck->sfd_timeout,
257 max_cck->sfd_timeout);
258 pos += scnprintf(buf + pos, bufsz - pos,
259 fmt_table, "fina_timeout:",
260 le32_to_cpu(cck->fina_timeout),
261 accum_cck->fina_timeout, delta_cck->fina_timeout,
262 max_cck->fina_timeout);
263 pos += scnprintf(buf + pos, bufsz - pos,
264 fmt_table, "unresponded_rts:",
265 le32_to_cpu(cck->unresponded_rts),
266 accum_cck->unresponded_rts, delta_cck->unresponded_rts,
267 max_cck->unresponded_rts);
268 pos += scnprintf(buf + pos, bufsz - pos,
269 fmt_table, "rxe_frame_lmt_ovrun:",
270 le32_to_cpu(cck->rxe_frame_limit_overrun),
271 accum_cck->rxe_frame_limit_overrun,
272 delta_cck->rxe_frame_limit_overrun,
273 max_cck->rxe_frame_limit_overrun);
274 pos += scnprintf(buf + pos, bufsz - pos,
275 fmt_table, "sent_ack_cnt:",
276 le32_to_cpu(cck->sent_ack_cnt),
277 accum_cck->sent_ack_cnt, delta_cck->sent_ack_cnt,
278 max_cck->sent_ack_cnt);
279 pos += scnprintf(buf + pos, bufsz - pos,
280 fmt_table, "sent_cts_cnt:",
281 le32_to_cpu(cck->sent_cts_cnt),
282 accum_cck->sent_cts_cnt, delta_cck->sent_cts_cnt,
283 max_cck->sent_cts_cnt);
284 pos += scnprintf(buf + pos, bufsz - pos,
285 fmt_table, "sent_ba_rsp_cnt:",
286 le32_to_cpu(cck->sent_ba_rsp_cnt),
287 accum_cck->sent_ba_rsp_cnt,
288 delta_cck->sent_ba_rsp_cnt,
289 max_cck->sent_ba_rsp_cnt);
290 pos += scnprintf(buf + pos, bufsz - pos,
291 fmt_table, "dsp_self_kill:",
292 le32_to_cpu(cck->dsp_self_kill),
293 accum_cck->dsp_self_kill, delta_cck->dsp_self_kill,
294 max_cck->dsp_self_kill);
295 pos += scnprintf(buf + pos, bufsz - pos,
296 fmt_table, "mh_format_err:",
297 le32_to_cpu(cck->mh_format_err),
298 accum_cck->mh_format_err, delta_cck->mh_format_err,
299 max_cck->mh_format_err);
300 pos += scnprintf(buf + pos, bufsz - pos,
301 fmt_table, "re_acq_main_rssi_sum:",
302 le32_to_cpu(cck->re_acq_main_rssi_sum),
303 accum_cck->re_acq_main_rssi_sum,
304 delta_cck->re_acq_main_rssi_sum,
305 max_cck->re_acq_main_rssi_sum);
306
307 pos += scnprintf(buf + pos, bufsz - pos,
308 fmt_header, "Statistics_Rx - GENERAL:");
309 pos += scnprintf(buf + pos, bufsz - pos,
310 fmt_table, "bogus_cts:",
311 le32_to_cpu(general->bogus_cts),
312 accum_general->bogus_cts, delta_general->bogus_cts,
313 max_general->bogus_cts);
314 pos += scnprintf(buf + pos, bufsz - pos,
315 fmt_table, "bogus_ack:",
316 le32_to_cpu(general->bogus_ack),
317 accum_general->bogus_ack, delta_general->bogus_ack,
318 max_general->bogus_ack);
319 pos += scnprintf(buf + pos, bufsz - pos,
320 fmt_table, "non_bssid_frames:",
321 le32_to_cpu(general->non_bssid_frames),
322 accum_general->non_bssid_frames,
323 delta_general->non_bssid_frames,
324 max_general->non_bssid_frames);
325 pos += scnprintf(buf + pos, bufsz - pos,
326 fmt_table, "filtered_frames:",
327 le32_to_cpu(general->filtered_frames),
328 accum_general->filtered_frames,
329 delta_general->filtered_frames,
330 max_general->filtered_frames);
331 pos += scnprintf(buf + pos, bufsz - pos,
332 fmt_table, "non_channel_beacons:",
333 le32_to_cpu(general->non_channel_beacons),
334 accum_general->non_channel_beacons,
335 delta_general->non_channel_beacons,
336 max_general->non_channel_beacons);
337 pos += scnprintf(buf + pos, bufsz - pos,
338 fmt_table, "channel_beacons:",
339 le32_to_cpu(general->channel_beacons),
340 accum_general->channel_beacons,
341 delta_general->channel_beacons,
342 max_general->channel_beacons);
343 pos += scnprintf(buf + pos, bufsz - pos,
344 fmt_table, "num_missed_bcon:",
345 le32_to_cpu(general->num_missed_bcon),
346 accum_general->num_missed_bcon,
347 delta_general->num_missed_bcon,
348 max_general->num_missed_bcon);
349 pos += scnprintf(buf + pos, bufsz - pos,
350 fmt_table, "adc_rx_saturation_time:",
351 le32_to_cpu(general->adc_rx_saturation_time),
352 accum_general->adc_rx_saturation_time,
353 delta_general->adc_rx_saturation_time,
354 max_general->adc_rx_saturation_time);
355 pos += scnprintf(buf + pos, bufsz - pos,
356 fmt_table, "ina_detect_search_tm:",
357 le32_to_cpu(general->ina_detection_search_time),
358 accum_general->ina_detection_search_time,
359 delta_general->ina_detection_search_time,
360 max_general->ina_detection_search_time);
361 pos += scnprintf(buf + pos, bufsz - pos,
362 fmt_table, "beacon_silence_rssi_a:",
363 le32_to_cpu(general->beacon_silence_rssi_a),
364 accum_general->beacon_silence_rssi_a,
365 delta_general->beacon_silence_rssi_a,
366 max_general->beacon_silence_rssi_a);
367 pos += scnprintf(buf + pos, bufsz - pos,
368 fmt_table, "beacon_silence_rssi_b:",
369 le32_to_cpu(general->beacon_silence_rssi_b),
370 accum_general->beacon_silence_rssi_b,
371 delta_general->beacon_silence_rssi_b,
372 max_general->beacon_silence_rssi_b);
373 pos += scnprintf(buf + pos, bufsz - pos,
374 fmt_table, "beacon_silence_rssi_c:",
375 le32_to_cpu(general->beacon_silence_rssi_c),
376 accum_general->beacon_silence_rssi_c,
377 delta_general->beacon_silence_rssi_c,
378 max_general->beacon_silence_rssi_c);
379 pos += scnprintf(buf + pos, bufsz - pos,
380 fmt_table, "interference_data_flag:",
381 le32_to_cpu(general->interference_data_flag),
382 accum_general->interference_data_flag,
383 delta_general->interference_data_flag,
384 max_general->interference_data_flag);
385 pos += scnprintf(buf + pos, bufsz - pos,
386 fmt_table, "channel_load:",
387 le32_to_cpu(general->channel_load),
388 accum_general->channel_load,
389 delta_general->channel_load,
390 max_general->channel_load);
391 pos += scnprintf(buf + pos, bufsz - pos,
392 fmt_table, "dsp_false_alarms:",
393 le32_to_cpu(general->dsp_false_alarms),
394 accum_general->dsp_false_alarms,
395 delta_general->dsp_false_alarms,
396 max_general->dsp_false_alarms);
397 pos += scnprintf(buf + pos, bufsz - pos,
398 fmt_table, "beacon_rssi_a:",
399 le32_to_cpu(general->beacon_rssi_a),
400 accum_general->beacon_rssi_a,
401 delta_general->beacon_rssi_a,
402 max_general->beacon_rssi_a);
403 pos += scnprintf(buf + pos, bufsz - pos,
404 fmt_table, "beacon_rssi_b:",
405 le32_to_cpu(general->beacon_rssi_b),
406 accum_general->beacon_rssi_b,
407 delta_general->beacon_rssi_b,
408 max_general->beacon_rssi_b);
409 pos += scnprintf(buf + pos, bufsz - pos,
410 fmt_table, "beacon_rssi_c:",
411 le32_to_cpu(general->beacon_rssi_c),
412 accum_general->beacon_rssi_c,
413 delta_general->beacon_rssi_c,
414 max_general->beacon_rssi_c);
415 pos += scnprintf(buf + pos, bufsz - pos,
416 fmt_table, "beacon_energy_a:",
417 le32_to_cpu(general->beacon_energy_a),
418 accum_general->beacon_energy_a,
419 delta_general->beacon_energy_a,
420 max_general->beacon_energy_a);
421 pos += scnprintf(buf + pos, bufsz - pos,
422 fmt_table, "beacon_energy_b:",
423 le32_to_cpu(general->beacon_energy_b),
424 accum_general->beacon_energy_b,
425 delta_general->beacon_energy_b,
426 max_general->beacon_energy_b);
427 pos += scnprintf(buf + pos, bufsz - pos,
428 fmt_table, "beacon_energy_c:",
429 le32_to_cpu(general->beacon_energy_c),
430 accum_general->beacon_energy_c,
431 delta_general->beacon_energy_c,
432 max_general->beacon_energy_c);
433
434 pos += scnprintf(buf + pos, bufsz - pos,
435 fmt_header, "Statistics_Rx - OFDM_HT:");
436 pos += scnprintf(buf + pos, bufsz - pos,
437 fmt_table, "plcp_err:",
438 le32_to_cpu(ht->plcp_err), accum_ht->plcp_err,
439 delta_ht->plcp_err, max_ht->plcp_err);
440 pos += scnprintf(buf + pos, bufsz - pos,
441 fmt_table, "overrun_err:",
442 le32_to_cpu(ht->overrun_err), accum_ht->overrun_err,
443 delta_ht->overrun_err, max_ht->overrun_err);
444 pos += scnprintf(buf + pos, bufsz - pos,
445 fmt_table, "early_overrun_err:",
446 le32_to_cpu(ht->early_overrun_err),
447 accum_ht->early_overrun_err,
448 delta_ht->early_overrun_err,
449 max_ht->early_overrun_err);
450 pos += scnprintf(buf + pos, bufsz - pos,
451 fmt_table, "crc32_good:",
452 le32_to_cpu(ht->crc32_good), accum_ht->crc32_good,
453 delta_ht->crc32_good, max_ht->crc32_good);
454 pos += scnprintf(buf + pos, bufsz - pos,
455 fmt_table, "crc32_err:",
456 le32_to_cpu(ht->crc32_err), accum_ht->crc32_err,
457 delta_ht->crc32_err, max_ht->crc32_err);
458 pos += scnprintf(buf + pos, bufsz - pos,
459 fmt_table, "mh_format_err:",
460 le32_to_cpu(ht->mh_format_err),
461 accum_ht->mh_format_err,
462 delta_ht->mh_format_err, max_ht->mh_format_err);
463 pos += scnprintf(buf + pos, bufsz - pos,
464 fmt_table, "agg_crc32_good:",
465 le32_to_cpu(ht->agg_crc32_good),
466 accum_ht->agg_crc32_good,
467 delta_ht->agg_crc32_good, max_ht->agg_crc32_good);
468 pos += scnprintf(buf + pos, bufsz - pos,
469 fmt_table, "agg_mpdu_cnt:",
470 le32_to_cpu(ht->agg_mpdu_cnt),
471 accum_ht->agg_mpdu_cnt,
472 delta_ht->agg_mpdu_cnt, max_ht->agg_mpdu_cnt);
473 pos += scnprintf(buf + pos, bufsz - pos,
474 fmt_table, "agg_cnt:",
475 le32_to_cpu(ht->agg_cnt), accum_ht->agg_cnt,
476 delta_ht->agg_cnt, max_ht->agg_cnt);
477 pos += scnprintf(buf + pos, bufsz - pos,
478 fmt_table, "unsupport_mcs:",
479 le32_to_cpu(ht->unsupport_mcs),
480 accum_ht->unsupport_mcs,
481 delta_ht->unsupport_mcs, max_ht->unsupport_mcs);
482
483 ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
484 kfree(buf);
485 return ret;
486}
487
488ssize_t iwl4965_ucode_tx_stats_read(struct file *file,
489 char __user *user_buf,
490 size_t count, loff_t *ppos)
491{
492 struct iwl_priv *priv = file->private_data;
493 int pos = 0;
494 char *buf;
495 int bufsz = (sizeof(struct statistics_tx) * 48) + 250;
496 ssize_t ret;
497 struct statistics_tx *tx, *accum_tx, *delta_tx, *max_tx;
498
499 if (!iwl_legacy_is_alive(priv))
500 return -EAGAIN;
501
502 buf = kzalloc(bufsz, GFP_KERNEL);
503 if (!buf) {
504 IWL_ERR(priv, "Can not allocate Buffer\n");
505 return -ENOMEM;
506 }
507
508 /* the statistic information display here is based on
509 * the last statistics notification from uCode
510 * might not reflect the current uCode activity
511 */
512 tx = &priv->_4965.statistics.tx;
513 accum_tx = &priv->_4965.accum_statistics.tx;
514 delta_tx = &priv->_4965.delta_statistics.tx;
515 max_tx = &priv->_4965.max_delta.tx;
516
517 pos += iwl4965_statistics_flag(priv, buf, bufsz);
518 pos += scnprintf(buf + pos, bufsz - pos,
519 fmt_header, "Statistics_Tx:");
520 pos += scnprintf(buf + pos, bufsz - pos,
521 fmt_table, "preamble:",
522 le32_to_cpu(tx->preamble_cnt),
523 accum_tx->preamble_cnt,
524 delta_tx->preamble_cnt, max_tx->preamble_cnt);
525 pos += scnprintf(buf + pos, bufsz - pos,
526 fmt_table, "rx_detected_cnt:",
527 le32_to_cpu(tx->rx_detected_cnt),
528 accum_tx->rx_detected_cnt,
529 delta_tx->rx_detected_cnt, max_tx->rx_detected_cnt);
530 pos += scnprintf(buf + pos, bufsz - pos,
531 fmt_table, "bt_prio_defer_cnt:",
532 le32_to_cpu(tx->bt_prio_defer_cnt),
533 accum_tx->bt_prio_defer_cnt,
534 delta_tx->bt_prio_defer_cnt,
535 max_tx->bt_prio_defer_cnt);
536 pos += scnprintf(buf + pos, bufsz - pos,
537 fmt_table, "bt_prio_kill_cnt:",
538 le32_to_cpu(tx->bt_prio_kill_cnt),
539 accum_tx->bt_prio_kill_cnt,
540 delta_tx->bt_prio_kill_cnt,
541 max_tx->bt_prio_kill_cnt);
542 pos += scnprintf(buf + pos, bufsz - pos,
543 fmt_table, "few_bytes_cnt:",
544 le32_to_cpu(tx->few_bytes_cnt),
545 accum_tx->few_bytes_cnt,
546 delta_tx->few_bytes_cnt, max_tx->few_bytes_cnt);
547 pos += scnprintf(buf + pos, bufsz - pos,
548 fmt_table, "cts_timeout:",
549 le32_to_cpu(tx->cts_timeout), accum_tx->cts_timeout,
550 delta_tx->cts_timeout, max_tx->cts_timeout);
551 pos += scnprintf(buf + pos, bufsz - pos,
552 fmt_table, "ack_timeout:",
553 le32_to_cpu(tx->ack_timeout),
554 accum_tx->ack_timeout,
555 delta_tx->ack_timeout, max_tx->ack_timeout);
556 pos += scnprintf(buf + pos, bufsz - pos,
557 fmt_table, "expected_ack_cnt:",
558 le32_to_cpu(tx->expected_ack_cnt),
559 accum_tx->expected_ack_cnt,
560 delta_tx->expected_ack_cnt,
561 max_tx->expected_ack_cnt);
562 pos += scnprintf(buf + pos, bufsz - pos,
563 fmt_table, "actual_ack_cnt:",
564 le32_to_cpu(tx->actual_ack_cnt),
565 accum_tx->actual_ack_cnt,
566 delta_tx->actual_ack_cnt,
567 max_tx->actual_ack_cnt);
568 pos += scnprintf(buf + pos, bufsz - pos,
569 fmt_table, "dump_msdu_cnt:",
570 le32_to_cpu(tx->dump_msdu_cnt),
571 accum_tx->dump_msdu_cnt,
572 delta_tx->dump_msdu_cnt,
573 max_tx->dump_msdu_cnt);
574 pos += scnprintf(buf + pos, bufsz - pos,
575 fmt_table, "abort_nxt_frame_mismatch:",
576 le32_to_cpu(tx->burst_abort_next_frame_mismatch_cnt),
577 accum_tx->burst_abort_next_frame_mismatch_cnt,
578 delta_tx->burst_abort_next_frame_mismatch_cnt,
579 max_tx->burst_abort_next_frame_mismatch_cnt);
580 pos += scnprintf(buf + pos, bufsz - pos,
581 fmt_table, "abort_missing_nxt_frame:",
582 le32_to_cpu(tx->burst_abort_missing_next_frame_cnt),
583 accum_tx->burst_abort_missing_next_frame_cnt,
584 delta_tx->burst_abort_missing_next_frame_cnt,
585 max_tx->burst_abort_missing_next_frame_cnt);
586 pos += scnprintf(buf + pos, bufsz - pos,
587 fmt_table, "cts_timeout_collision:",
588 le32_to_cpu(tx->cts_timeout_collision),
589 accum_tx->cts_timeout_collision,
590 delta_tx->cts_timeout_collision,
591 max_tx->cts_timeout_collision);
592 pos += scnprintf(buf + pos, bufsz - pos,
593 fmt_table, "ack_ba_timeout_collision:",
594 le32_to_cpu(tx->ack_or_ba_timeout_collision),
595 accum_tx->ack_or_ba_timeout_collision,
596 delta_tx->ack_or_ba_timeout_collision,
597 max_tx->ack_or_ba_timeout_collision);
598 pos += scnprintf(buf + pos, bufsz - pos,
599 fmt_table, "agg ba_timeout:",
600 le32_to_cpu(tx->agg.ba_timeout),
601 accum_tx->agg.ba_timeout,
602 delta_tx->agg.ba_timeout,
603 max_tx->agg.ba_timeout);
604 pos += scnprintf(buf + pos, bufsz - pos,
605 fmt_table, "agg ba_resched_frames:",
606 le32_to_cpu(tx->agg.ba_reschedule_frames),
607 accum_tx->agg.ba_reschedule_frames,
608 delta_tx->agg.ba_reschedule_frames,
609 max_tx->agg.ba_reschedule_frames);
610 pos += scnprintf(buf + pos, bufsz - pos,
611 fmt_table, "agg scd_query_agg_frame:",
612 le32_to_cpu(tx->agg.scd_query_agg_frame_cnt),
613 accum_tx->agg.scd_query_agg_frame_cnt,
614 delta_tx->agg.scd_query_agg_frame_cnt,
615 max_tx->agg.scd_query_agg_frame_cnt);
616 pos += scnprintf(buf + pos, bufsz - pos,
617 fmt_table, "agg scd_query_no_agg:",
618 le32_to_cpu(tx->agg.scd_query_no_agg),
619 accum_tx->agg.scd_query_no_agg,
620 delta_tx->agg.scd_query_no_agg,
621 max_tx->agg.scd_query_no_agg);
622 pos += scnprintf(buf + pos, bufsz - pos,
623 fmt_table, "agg scd_query_agg:",
624 le32_to_cpu(tx->agg.scd_query_agg),
625 accum_tx->agg.scd_query_agg,
626 delta_tx->agg.scd_query_agg,
627 max_tx->agg.scd_query_agg);
628 pos += scnprintf(buf + pos, bufsz - pos,
629 fmt_table, "agg scd_query_mismatch:",
630 le32_to_cpu(tx->agg.scd_query_mismatch),
631 accum_tx->agg.scd_query_mismatch,
632 delta_tx->agg.scd_query_mismatch,
633 max_tx->agg.scd_query_mismatch);
634 pos += scnprintf(buf + pos, bufsz - pos,
635 fmt_table, "agg frame_not_ready:",
636 le32_to_cpu(tx->agg.frame_not_ready),
637 accum_tx->agg.frame_not_ready,
638 delta_tx->agg.frame_not_ready,
639 max_tx->agg.frame_not_ready);
640 pos += scnprintf(buf + pos, bufsz - pos,
641 fmt_table, "agg underrun:",
642 le32_to_cpu(tx->agg.underrun),
643 accum_tx->agg.underrun,
644 delta_tx->agg.underrun, max_tx->agg.underrun);
645 pos += scnprintf(buf + pos, bufsz - pos,
646 fmt_table, "agg bt_prio_kill:",
647 le32_to_cpu(tx->agg.bt_prio_kill),
648 accum_tx->agg.bt_prio_kill,
649 delta_tx->agg.bt_prio_kill,
650 max_tx->agg.bt_prio_kill);
651 pos += scnprintf(buf + pos, bufsz - pos,
652 fmt_table, "agg rx_ba_rsp_cnt:",
653 le32_to_cpu(tx->agg.rx_ba_rsp_cnt),
654 accum_tx->agg.rx_ba_rsp_cnt,
655 delta_tx->agg.rx_ba_rsp_cnt,
656 max_tx->agg.rx_ba_rsp_cnt);
657
658 ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
659 kfree(buf);
660 return ret;
661}
662
663ssize_t
664iwl4965_ucode_general_stats_read(struct file *file, char __user *user_buf,
665 size_t count, loff_t *ppos)
666{
667 struct iwl_priv *priv = file->private_data;
668 int pos = 0;
669 char *buf;
670 int bufsz = sizeof(struct statistics_general) * 10 + 300;
671 ssize_t ret;
672 struct statistics_general_common *general, *accum_general;
673 struct statistics_general_common *delta_general, *max_general;
674 struct statistics_dbg *dbg, *accum_dbg, *delta_dbg, *max_dbg;
675 struct statistics_div *div, *accum_div, *delta_div, *max_div;
676
677 if (!iwl_legacy_is_alive(priv))
678 return -EAGAIN;
679
680 buf = kzalloc(bufsz, GFP_KERNEL);
681 if (!buf) {
682 IWL_ERR(priv, "Can not allocate Buffer\n");
683 return -ENOMEM;
684 }
685
686 /* the statistic information display here is based on
687 * the last statistics notification from uCode
688 * might not reflect the current uCode activity
689 */
690 general = &priv->_4965.statistics.general.common;
691 dbg = &priv->_4965.statistics.general.common.dbg;
692 div = &priv->_4965.statistics.general.common.div;
693 accum_general = &priv->_4965.accum_statistics.general.common;
694 accum_dbg = &priv->_4965.accum_statistics.general.common.dbg;
695 accum_div = &priv->_4965.accum_statistics.general.common.div;
696 delta_general = &priv->_4965.delta_statistics.general.common;
697 max_general = &priv->_4965.max_delta.general.common;
698 delta_dbg = &priv->_4965.delta_statistics.general.common.dbg;
699 max_dbg = &priv->_4965.max_delta.general.common.dbg;
700 delta_div = &priv->_4965.delta_statistics.general.common.div;
701 max_div = &priv->_4965.max_delta.general.common.div;
702
703 pos += iwl4965_statistics_flag(priv, buf, bufsz);
704 pos += scnprintf(buf + pos, bufsz - pos,
705 fmt_header, "Statistics_General:");
706 pos += scnprintf(buf + pos, bufsz - pos,
707 fmt_value, "temperature:",
708 le32_to_cpu(general->temperature));
709 pos += scnprintf(buf + pos, bufsz - pos,
710 fmt_value, "ttl_timestamp:",
711 le32_to_cpu(general->ttl_timestamp));
712 pos += scnprintf(buf + pos, bufsz - pos,
713 fmt_table, "burst_check:",
714 le32_to_cpu(dbg->burst_check),
715 accum_dbg->burst_check,
716 delta_dbg->burst_check, max_dbg->burst_check);
717 pos += scnprintf(buf + pos, bufsz - pos,
718 fmt_table, "burst_count:",
719 le32_to_cpu(dbg->burst_count),
720 accum_dbg->burst_count,
721 delta_dbg->burst_count, max_dbg->burst_count);
722 pos += scnprintf(buf + pos, bufsz - pos,
723 fmt_table, "wait_for_silence_timeout_count:",
724 le32_to_cpu(dbg->wait_for_silence_timeout_cnt),
725 accum_dbg->wait_for_silence_timeout_cnt,
726 delta_dbg->wait_for_silence_timeout_cnt,
727 max_dbg->wait_for_silence_timeout_cnt);
728 pos += scnprintf(buf + pos, bufsz - pos,
729 fmt_table, "sleep_time:",
730 le32_to_cpu(general->sleep_time),
731 accum_general->sleep_time,
732 delta_general->sleep_time, max_general->sleep_time);
733 pos += scnprintf(buf + pos, bufsz - pos,
734 fmt_table, "slots_out:",
735 le32_to_cpu(general->slots_out),
736 accum_general->slots_out,
737 delta_general->slots_out, max_general->slots_out);
738 pos += scnprintf(buf + pos, bufsz - pos,
739 fmt_table, "slots_idle:",
740 le32_to_cpu(general->slots_idle),
741 accum_general->slots_idle,
742 delta_general->slots_idle, max_general->slots_idle);
743 pos += scnprintf(buf + pos, bufsz - pos,
744 fmt_table, "tx_on_a:",
745 le32_to_cpu(div->tx_on_a), accum_div->tx_on_a,
746 delta_div->tx_on_a, max_div->tx_on_a);
747 pos += scnprintf(buf + pos, bufsz - pos,
748 fmt_table, "tx_on_b:",
749 le32_to_cpu(div->tx_on_b), accum_div->tx_on_b,
750 delta_div->tx_on_b, max_div->tx_on_b);
751 pos += scnprintf(buf + pos, bufsz - pos,
752 fmt_table, "exec_time:",
753 le32_to_cpu(div->exec_time), accum_div->exec_time,
754 delta_div->exec_time, max_div->exec_time);
755 pos += scnprintf(buf + pos, bufsz - pos,
756 fmt_table, "probe_time:",
757 le32_to_cpu(div->probe_time), accum_div->probe_time,
758 delta_div->probe_time, max_div->probe_time);
759 pos += scnprintf(buf + pos, bufsz - pos,
760 fmt_table, "rx_enable_counter:",
761 le32_to_cpu(general->rx_enable_counter),
762 accum_general->rx_enable_counter,
763 delta_general->rx_enable_counter,
764 max_general->rx_enable_counter);
765 pos += scnprintf(buf + pos, bufsz - pos,
766 fmt_table, "num_of_sos_states:",
767 le32_to_cpu(general->num_of_sos_states),
768 accum_general->num_of_sos_states,
769 delta_general->num_of_sos_states,
770 max_general->num_of_sos_states);
771 ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
772 kfree(buf);
773 return ret;
774}
diff --git a/drivers/net/wireless/iwlegacy/iwl-4965-debugfs.h b/drivers/net/wireless/iwlegacy/iwl-4965-debugfs.h
new file mode 100644
index 000000000000..6c8e35361a9e
--- /dev/null
+++ b/drivers/net/wireless/iwlegacy/iwl-4965-debugfs.h
@@ -0,0 +1,59 @@
1/******************************************************************************
2 *
3 * GPL LICENSE SUMMARY
4 *
5 * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of version 2 of the GNU General Public License as
9 * published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful, but
12 * WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
19 * USA
20 *
21 * The full GNU General Public License is included in this distribution
22 * in the file called LICENSE.GPL.
23 *
24 * Contact Information:
25 * Intel Linux Wireless <ilw@linux.intel.com>
26 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
27 *****************************************************************************/
28
29#include "iwl-dev.h"
30#include "iwl-core.h"
31#include "iwl-debug.h"
32
33#ifdef CONFIG_IWLWIFI_LEGACY_DEBUGFS
34ssize_t iwl4965_ucode_rx_stats_read(struct file *file, char __user *user_buf,
35 size_t count, loff_t *ppos);
36ssize_t iwl4965_ucode_tx_stats_read(struct file *file, char __user *user_buf,
37 size_t count, loff_t *ppos);
38ssize_t iwl4965_ucode_general_stats_read(struct file *file,
39 char __user *user_buf, size_t count, loff_t *ppos);
40#else
41static ssize_t
42iwl4965_ucode_rx_stats_read(struct file *file, char __user *user_buf,
43 size_t count, loff_t *ppos)
44{
45 return 0;
46}
47static ssize_t
48iwl4965_ucode_tx_stats_read(struct file *file, char __user *user_buf,
49 size_t count, loff_t *ppos)
50{
51 return 0;
52}
53static ssize_t
54iwl4965_ucode_general_stats_read(struct file *file, char __user *user_buf,
55 size_t count, loff_t *ppos)
56{
57 return 0;
58}
59#endif
diff --git a/drivers/net/wireless/iwlegacy/iwl-4965-eeprom.c b/drivers/net/wireless/iwlegacy/iwl-4965-eeprom.c
new file mode 100644
index 000000000000..cb9baab1ff7d
--- /dev/null
+++ b/drivers/net/wireless/iwlegacy/iwl-4965-eeprom.c
@@ -0,0 +1,154 @@
1/******************************************************************************
2 *
3 * This file is provided under a dual BSD/GPLv2 license. When using or
4 * redistributing this file, you may do so under either license.
5 *
6 * GPL LICENSE SUMMARY
7 *
8 * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of version 2 of the GNU General Public License as
12 * published by the Free Software Foundation.
13 *
14 * This program is distributed in the hope that it will be useful, but
15 * WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
22 * USA
23 *
24 * The full GNU General Public License is included in this distribution
25 * in the file called LICENSE.GPL.
26 *
27 * Contact Information:
28 * Intel Linux Wireless <ilw@linux.intel.com>
29 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
30 *
31 * BSD LICENSE
32 *
33 * Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved.
34 * All rights reserved.
35 *
36 * Redistribution and use in source and binary forms, with or without
37 * modification, are permitted provided that the following conditions
38 * are met:
39 *
40 * * Redistributions of source code must retain the above copyright
41 * notice, this list of conditions and the following disclaimer.
42 * * Redistributions in binary form must reproduce the above copyright
43 * notice, this list of conditions and the following disclaimer in
44 * the documentation and/or other materials provided with the
45 * distribution.
46 * * Neither the name Intel Corporation nor the names of its
47 * contributors may be used to endorse or promote products derived
48 * from this software without specific prior written permission.
49 *
50 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
51 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
52 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
53 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
54 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
55 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
56 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
57 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
58 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
59 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
60 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
61 *****************************************************************************/
62
63
64#include <linux/kernel.h>
65#include <linux/module.h>
66#include <linux/slab.h>
67#include <linux/init.h>
68
69#include <net/mac80211.h>
70
71#include "iwl-commands.h"
72#include "iwl-dev.h"
73#include "iwl-core.h"
74#include "iwl-debug.h"
75#include "iwl-4965.h"
76#include "iwl-io.h"
77
78/******************************************************************************
79 *
80 * EEPROM related functions
81 *
82******************************************************************************/
83
84/*
85 * The device's EEPROM semaphore prevents conflicts between driver and uCode
86 * when accessing the EEPROM; each access is a series of pulses to/from the
87 * EEPROM chip, not a single event, so even reads could conflict if they
88 * weren't arbitrated by the semaphore.
89 */
90int iwl4965_eeprom_acquire_semaphore(struct iwl_priv *priv)
91{
92 u16 count;
93 int ret;
94
95 for (count = 0; count < EEPROM_SEM_RETRY_LIMIT; count++) {
96 /* Request semaphore */
97 iwl_legacy_set_bit(priv, CSR_HW_IF_CONFIG_REG,
98 CSR_HW_IF_CONFIG_REG_BIT_EEPROM_OWN_SEM);
99
100 /* See if we got it */
101 ret = iwl_poll_bit(priv, CSR_HW_IF_CONFIG_REG,
102 CSR_HW_IF_CONFIG_REG_BIT_EEPROM_OWN_SEM,
103 CSR_HW_IF_CONFIG_REG_BIT_EEPROM_OWN_SEM,
104 EEPROM_SEM_TIMEOUT);
105 if (ret >= 0) {
106 IWL_DEBUG_IO(priv,
107 "Acquired semaphore after %d tries.\n",
108 count+1);
109 return ret;
110 }
111 }
112
113 return ret;
114}
115
116void iwl4965_eeprom_release_semaphore(struct iwl_priv *priv)
117{
118 iwl_legacy_clear_bit(priv, CSR_HW_IF_CONFIG_REG,
119 CSR_HW_IF_CONFIG_REG_BIT_EEPROM_OWN_SEM);
120
121}
122
123int iwl4965_eeprom_check_version(struct iwl_priv *priv)
124{
125 u16 eeprom_ver;
126 u16 calib_ver;
127
128 eeprom_ver = iwl_legacy_eeprom_query16(priv, EEPROM_VERSION);
129 calib_ver = iwl_legacy_eeprom_query16(priv,
130 EEPROM_4965_CALIB_VERSION_OFFSET);
131
132 if (eeprom_ver < priv->cfg->eeprom_ver ||
133 calib_ver < priv->cfg->eeprom_calib_ver)
134 goto err;
135
136 IWL_INFO(priv, "device EEPROM VER=0x%x, CALIB=0x%x\n",
137 eeprom_ver, calib_ver);
138
139 return 0;
140err:
141 IWL_ERR(priv, "Unsupported (too old) EEPROM VER=0x%x < 0x%x "
142 "CALIB=0x%x < 0x%x\n",
143 eeprom_ver, priv->cfg->eeprom_ver,
144 calib_ver, priv->cfg->eeprom_calib_ver);
145 return -EINVAL;
146
147}
148
149void iwl4965_eeprom_get_mac(const struct iwl_priv *priv, u8 *mac)
150{
151 const u8 *addr = iwl_legacy_eeprom_query_addr(priv,
152 EEPROM_MAC_ADDRESS);
153 memcpy(mac, addr, ETH_ALEN);
154}
diff --git a/drivers/net/wireless/iwlegacy/iwl-4965-hw.h b/drivers/net/wireless/iwlegacy/iwl-4965-hw.h
new file mode 100644
index 000000000000..08b189c8472d
--- /dev/null
+++ b/drivers/net/wireless/iwlegacy/iwl-4965-hw.h
@@ -0,0 +1,814 @@
1/******************************************************************************
2 *
3 * This file is provided under a dual BSD/GPLv2 license. When using or
4 * redistributing this file, you may do so under either license.
5 *
6 * GPL LICENSE SUMMARY
7 *
8 * Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved.
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of version 2 of the GNU General Public License as
12 * published by the Free Software Foundation.
13 *
14 * This program is distributed in the hope that it will be useful, but
15 * WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
22 * USA
23 *
24 * The full GNU General Public License is included in this distribution
25 * in the file called LICENSE.GPL.
26 *
27 * Contact Information:
28 * Intel Linux Wireless <ilw@linux.intel.com>
29 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
30 *
31 * BSD LICENSE
32 *
33 * Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved.
34 * All rights reserved.
35 *
36 * Redistribution and use in source and binary forms, with or without
37 * modification, are permitted provided that the following conditions
38 * are met:
39 *
40 * * Redistributions of source code must retain the above copyright
41 * notice, this list of conditions and the following disclaimer.
42 * * Redistributions in binary form must reproduce the above copyright
43 * notice, this list of conditions and the following disclaimer in
44 * the documentation and/or other materials provided with the
45 * distribution.
46 * * Neither the name Intel Corporation nor the names of its
47 * contributors may be used to endorse or promote products derived
48 * from this software without specific prior written permission.
49 *
50 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
51 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
52 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
53 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
54 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
55 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
56 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
57 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
58 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
59 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
60 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
61 *
62 *****************************************************************************/
63/*
64 * Please use this file (iwl-4965-hw.h) only for hardware-related definitions.
65 * Use iwl-commands.h for uCode API definitions.
66 * Use iwl-dev.h for driver implementation definitions.
67 */
68
69#ifndef __iwl_4965_hw_h__
70#define __iwl_4965_hw_h__
71
72#include "iwl-fh.h"
73
74/* EEPROM */
75#define IWL4965_EEPROM_IMG_SIZE 1024
76
77/*
78 * uCode queue management definitions ...
79 * The first queue used for block-ack aggregation is #7 (4965 only).
80 * All block-ack aggregation queues should map to Tx DMA/FIFO channel 7.
81 */
82#define IWL49_FIRST_AMPDU_QUEUE 7
83
84/* Sizes and addresses for instruction and data memory (SRAM) in
85 * 4965's embedded processor. Driver access is via HBUS_TARG_MEM_* regs. */
86#define IWL49_RTC_INST_LOWER_BOUND (0x000000)
87#define IWL49_RTC_INST_UPPER_BOUND (0x018000)
88
89#define IWL49_RTC_DATA_LOWER_BOUND (0x800000)
90#define IWL49_RTC_DATA_UPPER_BOUND (0x80A000)
91
92#define IWL49_RTC_INST_SIZE (IWL49_RTC_INST_UPPER_BOUND - \
93 IWL49_RTC_INST_LOWER_BOUND)
94#define IWL49_RTC_DATA_SIZE (IWL49_RTC_DATA_UPPER_BOUND - \
95 IWL49_RTC_DATA_LOWER_BOUND)
96
97#define IWL49_MAX_INST_SIZE IWL49_RTC_INST_SIZE
98#define IWL49_MAX_DATA_SIZE IWL49_RTC_DATA_SIZE
99
100/* Size of uCode instruction memory in bootstrap state machine */
101#define IWL49_MAX_BSM_SIZE BSM_SRAM_SIZE
102
103static inline int iwl4965_hw_valid_rtc_data_addr(u32 addr)
104{
105 return (addr >= IWL49_RTC_DATA_LOWER_BOUND) &&
106 (addr < IWL49_RTC_DATA_UPPER_BOUND);
107}
108
109/********************* START TEMPERATURE *************************************/
110
111/**
112 * 4965 temperature calculation.
113 *
114 * The driver must calculate the device temperature before calculating
115 * a txpower setting (amplifier gain is temperature dependent). The
116 * calculation uses 4 measurements, 3 of which (R1, R2, R3) are calibration
117 * values used for the life of the driver, and one of which (R4) is the
118 * real-time temperature indicator.
119 *
120 * uCode provides all 4 values to the driver via the "initialize alive"
121 * notification (see struct iwl4965_init_alive_resp). After the runtime uCode
122 * image loads, uCode updates the R4 value via statistics notifications
123 * (see STATISTICS_NOTIFICATION), which occur after each received beacon
124 * when associated, or can be requested via REPLY_STATISTICS_CMD.
125 *
126 * NOTE: uCode provides the R4 value as a 23-bit signed value. Driver
127 * must sign-extend to 32 bits before applying formula below.
128 *
129 * Formula:
130 *
131 * degrees Kelvin = ((97 * 259 * (R4 - R2) / (R3 - R1)) / 100) + 8
132 *
133 * NOTE: The basic formula is 259 * (R4-R2) / (R3-R1). The 97/100 is
134 * an additional correction, which should be centered around 0 degrees
135 * Celsius (273 degrees Kelvin). The 8 (3 percent of 273) compensates for
136 * centering the 97/100 correction around 0 degrees K.
137 *
138 * Add 273 to Kelvin value to find degrees Celsius, for comparing current
139 * temperature with factory-measured temperatures when calculating txpower
140 * settings.
141 */
142#define TEMPERATURE_CALIB_KELVIN_OFFSET 8
143#define TEMPERATURE_CALIB_A_VAL 259
144
145/* Limit range of calculated temperature to be between these Kelvin values */
146#define IWL_TX_POWER_TEMPERATURE_MIN (263)
147#define IWL_TX_POWER_TEMPERATURE_MAX (410)
148
149#define IWL_TX_POWER_TEMPERATURE_OUT_OF_RANGE(t) \
150 (((t) < IWL_TX_POWER_TEMPERATURE_MIN) || \
151 ((t) > IWL_TX_POWER_TEMPERATURE_MAX))
152
153/********************* END TEMPERATURE ***************************************/
154
155/********************* START TXPOWER *****************************************/
156
157/**
158 * 4965 txpower calculations rely on information from three sources:
159 *
160 * 1) EEPROM
161 * 2) "initialize" alive notification
162 * 3) statistics notifications
163 *
164 * EEPROM data consists of:
165 *
166 * 1) Regulatory information (max txpower and channel usage flags) is provided
167 * separately for each channel that can possibly supported by 4965.
168 * 40 MHz wide (.11n HT40) channels are listed separately from 20 MHz
169 * (legacy) channels.
170 *
171 * See struct iwl4965_eeprom_channel for format, and struct iwl4965_eeprom
172 * for locations in EEPROM.
173 *
174 * 2) Factory txpower calibration information is provided separately for
175 * sub-bands of contiguous channels. 2.4GHz has just one sub-band,
176 * but 5 GHz has several sub-bands.
177 *
178 * In addition, per-band (2.4 and 5 Ghz) saturation txpowers are provided.
179 *
180 * See struct iwl4965_eeprom_calib_info (and the tree of structures
181 * contained within it) for format, and struct iwl4965_eeprom for
182 * locations in EEPROM.
183 *
184 * "Initialization alive" notification (see struct iwl4965_init_alive_resp)
185 * consists of:
186 *
187 * 1) Temperature calculation parameters.
188 *
189 * 2) Power supply voltage measurement.
190 *
191 * 3) Tx gain compensation to balance 2 transmitters for MIMO use.
192 *
193 * Statistics notifications deliver:
194 *
195 * 1) Current values for temperature param R4.
196 */
197
198/**
199 * To calculate a txpower setting for a given desired target txpower, channel,
200 * modulation bit rate, and transmitter chain (4965 has 2 transmitters to
201 * support MIMO and transmit diversity), driver must do the following:
202 *
203 * 1) Compare desired txpower vs. (EEPROM) regulatory limit for this channel.
204 * Do not exceed regulatory limit; reduce target txpower if necessary.
205 *
206 * If setting up txpowers for MIMO rates (rate indexes 8-15, 24-31),
207 * 2 transmitters will be used simultaneously; driver must reduce the
208 * regulatory limit by 3 dB (half-power) for each transmitter, so the
209 * combined total output of the 2 transmitters is within regulatory limits.
210 *
211 *
212 * 2) Compare target txpower vs. (EEPROM) saturation txpower *reduced by
213 * backoff for this bit rate*. Do not exceed (saturation - backoff[rate]);
214 * reduce target txpower if necessary.
215 *
216 * Backoff values below are in 1/2 dB units (equivalent to steps in
217 * txpower gain tables):
218 *
219 * OFDM 6 - 36 MBit: 10 steps (5 dB)
220 * OFDM 48 MBit: 15 steps (7.5 dB)
221 * OFDM 54 MBit: 17 steps (8.5 dB)
222 * OFDM 60 MBit: 20 steps (10 dB)
223 * CCK all rates: 10 steps (5 dB)
224 *
225 * Backoff values apply to saturation txpower on a per-transmitter basis;
226 * when using MIMO (2 transmitters), each transmitter uses the same
227 * saturation level provided in EEPROM, and the same backoff values;
228 * no reduction (such as with regulatory txpower limits) is required.
229 *
230 * Saturation and Backoff values apply equally to 20 Mhz (legacy) channel
231 * widths and 40 Mhz (.11n HT40) channel widths; there is no separate
232 * factory measurement for ht40 channels.
233 *
234 * The result of this step is the final target txpower. The rest of
235 * the steps figure out the proper settings for the device to achieve
236 * that target txpower.
237 *
238 *
239 * 3) Determine (EEPROM) calibration sub band for the target channel, by
240 * comparing against first and last channels in each sub band
241 * (see struct iwl4965_eeprom_calib_subband_info).
242 *
243 *
244 * 4) Linearly interpolate (EEPROM) factory calibration measurement sets,
245 * referencing the 2 factory-measured (sample) channels within the sub band.
246 *
247 * Interpolation is based on difference between target channel's frequency
248 * and the sample channels' frequencies. Since channel numbers are based
249 * on frequency (5 MHz between each channel number), this is equivalent
250 * to interpolating based on channel number differences.
251 *
252 * Note that the sample channels may or may not be the channels at the
253 * edges of the sub band. The target channel may be "outside" of the
254 * span of the sampled channels.
255 *
256 * Driver may choose the pair (for 2 Tx chains) of measurements (see
257 * struct iwl4965_eeprom_calib_ch_info) for which the actual measured
258 * txpower comes closest to the desired txpower. Usually, though,
259 * the middle set of measurements is closest to the regulatory limits,
260 * and is therefore a good choice for all txpower calculations (this
261 * assumes that high accuracy is needed for maximizing legal txpower,
262 * while lower txpower configurations do not need as much accuracy).
263 *
264 * Driver should interpolate both members of the chosen measurement pair,
265 * i.e. for both Tx chains (radio transmitters), unless the driver knows
266 * that only one of the chains will be used (e.g. only one tx antenna
267 * connected, but this should be unusual). The rate scaling algorithm
268 * switches antennas to find best performance, so both Tx chains will
269 * be used (although only one at a time) even for non-MIMO transmissions.
270 *
271 * Driver should interpolate factory values for temperature, gain table
272 * index, and actual power. The power amplifier detector values are
273 * not used by the driver.
274 *
275 * Sanity check: If the target channel happens to be one of the sample
276 * channels, the results should agree with the sample channel's
277 * measurements!
278 *
279 *
280 * 5) Find difference between desired txpower and (interpolated)
281 * factory-measured txpower. Using (interpolated) factory gain table index
282 * (shown elsewhere) as a starting point, adjust this index lower to
283 * increase txpower, or higher to decrease txpower, until the target
284 * txpower is reached. Each step in the gain table is 1/2 dB.
285 *
286 * For example, if factory measured txpower is 16 dBm, and target txpower
287 * is 13 dBm, add 6 steps to the factory gain index to reduce txpower
288 * by 3 dB.
289 *
290 *
291 * 6) Find difference between current device temperature and (interpolated)
292 * factory-measured temperature for sub-band. Factory values are in
293 * degrees Celsius. To calculate current temperature, see comments for
294 * "4965 temperature calculation".
295 *
296 * If current temperature is higher than factory temperature, driver must
297 * increase gain (lower gain table index), and vice verse.
298 *
299 * Temperature affects gain differently for different channels:
300 *
301 * 2.4 GHz all channels: 3.5 degrees per half-dB step
302 * 5 GHz channels 34-43: 4.5 degrees per half-dB step
303 * 5 GHz channels >= 44: 4.0 degrees per half-dB step
304 *
305 * NOTE: Temperature can increase rapidly when transmitting, especially
306 * with heavy traffic at high txpowers. Driver should update
307 * temperature calculations often under these conditions to
308 * maintain strong txpower in the face of rising temperature.
309 *
310 *
311 * 7) Find difference between current power supply voltage indicator
312 * (from "initialize alive") and factory-measured power supply voltage
313 * indicator (EEPROM).
314 *
315 * If the current voltage is higher (indicator is lower) than factory
316 * voltage, gain should be reduced (gain table index increased) by:
317 *
318 * (eeprom - current) / 7
319 *
320 * If the current voltage is lower (indicator is higher) than factory
321 * voltage, gain should be increased (gain table index decreased) by:
322 *
323 * 2 * (current - eeprom) / 7
324 *
325 * If number of index steps in either direction turns out to be > 2,
326 * something is wrong ... just use 0.
327 *
328 * NOTE: Voltage compensation is independent of band/channel.
329 *
330 * NOTE: "Initialize" uCode measures current voltage, which is assumed
331 * to be constant after this initial measurement. Voltage
332 * compensation for txpower (number of steps in gain table)
333 * may be calculated once and used until the next uCode bootload.
334 *
335 *
336 * 8) If setting up txpowers for MIMO rates (rate indexes 8-15, 24-31),
337 * adjust txpower for each transmitter chain, so txpower is balanced
338 * between the two chains. There are 5 pairs of tx_atten[group][chain]
339 * values in "initialize alive", one pair for each of 5 channel ranges:
340 *
341 * Group 0: 5 GHz channel 34-43
342 * Group 1: 5 GHz channel 44-70
343 * Group 2: 5 GHz channel 71-124
344 * Group 3: 5 GHz channel 125-200
345 * Group 4: 2.4 GHz all channels
346 *
347 * Add the tx_atten[group][chain] value to the index for the target chain.
348 * The values are signed, but are in pairs of 0 and a non-negative number,
349 * so as to reduce gain (if necessary) of the "hotter" channel. This
350 * avoids any need to double-check for regulatory compliance after
351 * this step.
352 *
353 *
354 * 9) If setting up for a CCK rate, lower the gain by adding a CCK compensation
355 * value to the index:
356 *
357 * Hardware rev B: 9 steps (4.5 dB)
358 * Hardware rev C: 5 steps (2.5 dB)
359 *
360 * Hardware rev for 4965 can be determined by reading CSR_HW_REV_WA_REG,
361 * bits [3:2], 1 = B, 2 = C.
362 *
363 * NOTE: This compensation is in addition to any saturation backoff that
364 * might have been applied in an earlier step.
365 *
366 *
367 * 10) Select the gain table, based on band (2.4 vs 5 GHz).
368 *
369 * Limit the adjusted index to stay within the table!
370 *
371 *
372 * 11) Read gain table entries for DSP and radio gain, place into appropriate
373 * location(s) in command (struct iwl4965_txpowertable_cmd).
374 */
375
376/**
377 * When MIMO is used (2 transmitters operating simultaneously), driver should
378 * limit each transmitter to deliver a max of 3 dB below the regulatory limit
379 * for the device. That is, use half power for each transmitter, so total
380 * txpower is within regulatory limits.
381 *
382 * The value "6" represents number of steps in gain table to reduce power 3 dB.
383 * Each step is 1/2 dB.
384 */
385#define IWL_TX_POWER_MIMO_REGULATORY_COMPENSATION (6)
386
387/**
388 * CCK gain compensation.
389 *
390 * When calculating txpowers for CCK, after making sure that the target power
391 * is within regulatory and saturation limits, driver must additionally
392 * back off gain by adding these values to the gain table index.
393 *
394 * Hardware rev for 4965 can be determined by reading CSR_HW_REV_WA_REG,
395 * bits [3:2], 1 = B, 2 = C.
396 */
397#define IWL_TX_POWER_CCK_COMPENSATION_B_STEP (9)
398#define IWL_TX_POWER_CCK_COMPENSATION_C_STEP (5)
399
400/*
401 * 4965 power supply voltage compensation for txpower
402 */
403#define TX_POWER_IWL_VOLTAGE_CODES_PER_03V (7)
404
405/**
406 * Gain tables.
407 *
408 * The following tables contain pair of values for setting txpower, i.e.
409 * gain settings for the output of the device's digital signal processor (DSP),
410 * and for the analog gain structure of the transmitter.
411 *
412 * Each entry in the gain tables represents a step of 1/2 dB. Note that these
413 * are *relative* steps, not indications of absolute output power. Output
414 * power varies with temperature, voltage, and channel frequency, and also
415 * requires consideration of average power (to satisfy regulatory constraints),
416 * and peak power (to avoid distortion of the output signal).
417 *
418 * Each entry contains two values:
419 * 1) DSP gain (or sometimes called DSP attenuation). This is a fine-grained
420 * linear value that multiplies the output of the digital signal processor,
421 * before being sent to the analog radio.
422 * 2) Radio gain. This sets the analog gain of the radio Tx path.
423 * It is a coarser setting, and behaves in a logarithmic (dB) fashion.
424 *
425 * EEPROM contains factory calibration data for txpower. This maps actual
426 * measured txpower levels to gain settings in the "well known" tables
427 * below ("well-known" means here that both factory calibration *and* the
428 * driver work with the same table).
429 *
430 * There are separate tables for 2.4 GHz and 5 GHz bands. The 5 GHz table
431 * has an extension (into negative indexes), in case the driver needs to
432 * boost power setting for high device temperatures (higher than would be
433 * present during factory calibration). A 5 Ghz EEPROM index of "40"
434 * corresponds to the 49th entry in the table used by the driver.
435 */
436#define MIN_TX_GAIN_INDEX (0) /* highest gain, lowest idx, 2.4 */
437#define MIN_TX_GAIN_INDEX_52GHZ_EXT (-9) /* highest gain, lowest idx, 5 */
438
439/**
440 * 2.4 GHz gain table
441 *
442 * Index Dsp gain Radio gain
443 * 0 110 0x3f (highest gain)
444 * 1 104 0x3f
445 * 2 98 0x3f
446 * 3 110 0x3e
447 * 4 104 0x3e
448 * 5 98 0x3e
449 * 6 110 0x3d
450 * 7 104 0x3d
451 * 8 98 0x3d
452 * 9 110 0x3c
453 * 10 104 0x3c
454 * 11 98 0x3c
455 * 12 110 0x3b
456 * 13 104 0x3b
457 * 14 98 0x3b
458 * 15 110 0x3a
459 * 16 104 0x3a
460 * 17 98 0x3a
461 * 18 110 0x39
462 * 19 104 0x39
463 * 20 98 0x39
464 * 21 110 0x38
465 * 22 104 0x38
466 * 23 98 0x38
467 * 24 110 0x37
468 * 25 104 0x37
469 * 26 98 0x37
470 * 27 110 0x36
471 * 28 104 0x36
472 * 29 98 0x36
473 * 30 110 0x35
474 * 31 104 0x35
475 * 32 98 0x35
476 * 33 110 0x34
477 * 34 104 0x34
478 * 35 98 0x34
479 * 36 110 0x33
480 * 37 104 0x33
481 * 38 98 0x33
482 * 39 110 0x32
483 * 40 104 0x32
484 * 41 98 0x32
485 * 42 110 0x31
486 * 43 104 0x31
487 * 44 98 0x31
488 * 45 110 0x30
489 * 46 104 0x30
490 * 47 98 0x30
491 * 48 110 0x6
492 * 49 104 0x6
493 * 50 98 0x6
494 * 51 110 0x5
495 * 52 104 0x5
496 * 53 98 0x5
497 * 54 110 0x4
498 * 55 104 0x4
499 * 56 98 0x4
500 * 57 110 0x3
501 * 58 104 0x3
502 * 59 98 0x3
503 * 60 110 0x2
504 * 61 104 0x2
505 * 62 98 0x2
506 * 63 110 0x1
507 * 64 104 0x1
508 * 65 98 0x1
509 * 66 110 0x0
510 * 67 104 0x0
511 * 68 98 0x0
512 * 69 97 0
513 * 70 96 0
514 * 71 95 0
515 * 72 94 0
516 * 73 93 0
517 * 74 92 0
518 * 75 91 0
519 * 76 90 0
520 * 77 89 0
521 * 78 88 0
522 * 79 87 0
523 * 80 86 0
524 * 81 85 0
525 * 82 84 0
526 * 83 83 0
527 * 84 82 0
528 * 85 81 0
529 * 86 80 0
530 * 87 79 0
531 * 88 78 0
532 * 89 77 0
533 * 90 76 0
534 * 91 75 0
535 * 92 74 0
536 * 93 73 0
537 * 94 72 0
538 * 95 71 0
539 * 96 70 0
540 * 97 69 0
541 * 98 68 0
542 */
543
544/**
545 * 5 GHz gain table
546 *
547 * Index Dsp gain Radio gain
548 * -9 123 0x3F (highest gain)
549 * -8 117 0x3F
550 * -7 110 0x3F
551 * -6 104 0x3F
552 * -5 98 0x3F
553 * -4 110 0x3E
554 * -3 104 0x3E
555 * -2 98 0x3E
556 * -1 110 0x3D
557 * 0 104 0x3D
558 * 1 98 0x3D
559 * 2 110 0x3C
560 * 3 104 0x3C
561 * 4 98 0x3C
562 * 5 110 0x3B
563 * 6 104 0x3B
564 * 7 98 0x3B
565 * 8 110 0x3A
566 * 9 104 0x3A
567 * 10 98 0x3A
568 * 11 110 0x39
569 * 12 104 0x39
570 * 13 98 0x39
571 * 14 110 0x38
572 * 15 104 0x38
573 * 16 98 0x38
574 * 17 110 0x37
575 * 18 104 0x37
576 * 19 98 0x37
577 * 20 110 0x36
578 * 21 104 0x36
579 * 22 98 0x36
580 * 23 110 0x35
581 * 24 104 0x35
582 * 25 98 0x35
583 * 26 110 0x34
584 * 27 104 0x34
585 * 28 98 0x34
586 * 29 110 0x33
587 * 30 104 0x33
588 * 31 98 0x33
589 * 32 110 0x32
590 * 33 104 0x32
591 * 34 98 0x32
592 * 35 110 0x31
593 * 36 104 0x31
594 * 37 98 0x31
595 * 38 110 0x30
596 * 39 104 0x30
597 * 40 98 0x30
598 * 41 110 0x25
599 * 42 104 0x25
600 * 43 98 0x25
601 * 44 110 0x24
602 * 45 104 0x24
603 * 46 98 0x24
604 * 47 110 0x23
605 * 48 104 0x23
606 * 49 98 0x23
607 * 50 110 0x22
608 * 51 104 0x18
609 * 52 98 0x18
610 * 53 110 0x17
611 * 54 104 0x17
612 * 55 98 0x17
613 * 56 110 0x16
614 * 57 104 0x16
615 * 58 98 0x16
616 * 59 110 0x15
617 * 60 104 0x15
618 * 61 98 0x15
619 * 62 110 0x14
620 * 63 104 0x14
621 * 64 98 0x14
622 * 65 110 0x13
623 * 66 104 0x13
624 * 67 98 0x13
625 * 68 110 0x12
626 * 69 104 0x08
627 * 70 98 0x08
628 * 71 110 0x07
629 * 72 104 0x07
630 * 73 98 0x07
631 * 74 110 0x06
632 * 75 104 0x06
633 * 76 98 0x06
634 * 77 110 0x05
635 * 78 104 0x05
636 * 79 98 0x05
637 * 80 110 0x04
638 * 81 104 0x04
639 * 82 98 0x04
640 * 83 110 0x03
641 * 84 104 0x03
642 * 85 98 0x03
643 * 86 110 0x02
644 * 87 104 0x02
645 * 88 98 0x02
646 * 89 110 0x01
647 * 90 104 0x01
648 * 91 98 0x01
649 * 92 110 0x00
650 * 93 104 0x00
651 * 94 98 0x00
652 * 95 93 0x00
653 * 96 88 0x00
654 * 97 83 0x00
655 * 98 78 0x00
656 */
657
658
659/**
660 * Sanity checks and default values for EEPROM regulatory levels.
661 * If EEPROM values fall outside MIN/MAX range, use default values.
662 *
663 * Regulatory limits refer to the maximum average txpower allowed by
664 * regulatory agencies in the geographies in which the device is meant
665 * to be operated. These limits are SKU-specific (i.e. geography-specific),
666 * and channel-specific; each channel has an individual regulatory limit
667 * listed in the EEPROM.
668 *
669 * Units are in half-dBm (i.e. "34" means 17 dBm).
670 */
671#define IWL_TX_POWER_DEFAULT_REGULATORY_24 (34)
672#define IWL_TX_POWER_DEFAULT_REGULATORY_52 (34)
673#define IWL_TX_POWER_REGULATORY_MIN (0)
674#define IWL_TX_POWER_REGULATORY_MAX (34)
675
676/**
677 * Sanity checks and default values for EEPROM saturation levels.
678 * If EEPROM values fall outside MIN/MAX range, use default values.
679 *
680 * Saturation is the highest level that the output power amplifier can produce
681 * without significant clipping distortion. This is a "peak" power level.
682 * Different types of modulation (i.e. various "rates", and OFDM vs. CCK)
683 * require differing amounts of backoff, relative to their average power output,
684 * in order to avoid clipping distortion.
685 *
686 * Driver must make sure that it is violating neither the saturation limit,
687 * nor the regulatory limit, when calculating Tx power settings for various
688 * rates.
689 *
690 * Units are in half-dBm (i.e. "38" means 19 dBm).
691 */
692#define IWL_TX_POWER_DEFAULT_SATURATION_24 (38)
693#define IWL_TX_POWER_DEFAULT_SATURATION_52 (38)
694#define IWL_TX_POWER_SATURATION_MIN (20)
695#define IWL_TX_POWER_SATURATION_MAX (50)
696
697/**
698 * Channel groups used for Tx Attenuation calibration (MIMO tx channel balance)
699 * and thermal Txpower calibration.
700 *
701 * When calculating txpower, driver must compensate for current device
702 * temperature; higher temperature requires higher gain. Driver must calculate
703 * current temperature (see "4965 temperature calculation"), then compare vs.
704 * factory calibration temperature in EEPROM; if current temperature is higher
705 * than factory temperature, driver must *increase* gain by proportions shown
706 * in table below. If current temperature is lower than factory, driver must
707 * *decrease* gain.
708 *
709 * Different frequency ranges require different compensation, as shown below.
710 */
711/* Group 0, 5.2 GHz ch 34-43: 4.5 degrees per 1/2 dB. */
712#define CALIB_IWL_TX_ATTEN_GR1_FCH 34
713#define CALIB_IWL_TX_ATTEN_GR1_LCH 43
714
715/* Group 1, 5.3 GHz ch 44-70: 4.0 degrees per 1/2 dB. */
716#define CALIB_IWL_TX_ATTEN_GR2_FCH 44
717#define CALIB_IWL_TX_ATTEN_GR2_LCH 70
718
719/* Group 2, 5.5 GHz ch 71-124: 4.0 degrees per 1/2 dB. */
720#define CALIB_IWL_TX_ATTEN_GR3_FCH 71
721#define CALIB_IWL_TX_ATTEN_GR3_LCH 124
722
723/* Group 3, 5.7 GHz ch 125-200: 4.0 degrees per 1/2 dB. */
724#define CALIB_IWL_TX_ATTEN_GR4_FCH 125
725#define CALIB_IWL_TX_ATTEN_GR4_LCH 200
726
727/* Group 4, 2.4 GHz all channels: 3.5 degrees per 1/2 dB. */
728#define CALIB_IWL_TX_ATTEN_GR5_FCH 1
729#define CALIB_IWL_TX_ATTEN_GR5_LCH 20
730
731enum {
732 CALIB_CH_GROUP_1 = 0,
733 CALIB_CH_GROUP_2 = 1,
734 CALIB_CH_GROUP_3 = 2,
735 CALIB_CH_GROUP_4 = 3,
736 CALIB_CH_GROUP_5 = 4,
737 CALIB_CH_GROUP_MAX
738};
739
740/********************* END TXPOWER *****************************************/
741
742
743/**
744 * Tx/Rx Queues
745 *
746 * Most communication between driver and 4965 is via queues of data buffers.
747 * For example, all commands that the driver issues to device's embedded
748 * controller (uCode) are via the command queue (one of the Tx queues). All
749 * uCode command responses/replies/notifications, including Rx frames, are
750 * conveyed from uCode to driver via the Rx queue.
751 *
752 * Most support for these queues, including handshake support, resides in
753 * structures in host DRAM, shared between the driver and the device. When
754 * allocating this memory, the driver must make sure that data written by
755 * the host CPU updates DRAM immediately (and does not get "stuck" in CPU's
756 * cache memory), so DRAM and cache are consistent, and the device can
757 * immediately see changes made by the driver.
758 *
759 * 4965 supports up to 16 DRAM-based Tx queues, and services these queues via
760 * up to 7 DMA channels (FIFOs). Each Tx queue is supported by a circular array
761 * in DRAM containing 256 Transmit Frame Descriptors (TFDs).
762 */
763#define IWL49_NUM_FIFOS 7
764#define IWL49_CMD_FIFO_NUM 4
765#define IWL49_NUM_QUEUES 16
766#define IWL49_NUM_AMPDU_QUEUES 8
767
768
769/**
770 * struct iwl4965_schedq_bc_tbl
771 *
772 * Byte Count table
773 *
774 * Each Tx queue uses a byte-count table containing 320 entries:
775 * one 16-bit entry for each of 256 TFDs, plus an additional 64 entries that
776 * duplicate the first 64 entries (to avoid wrap-around within a Tx window;
777 * max Tx window is 64 TFDs).
778 *
779 * When driver sets up a new TFD, it must also enter the total byte count
780 * of the frame to be transmitted into the corresponding entry in the byte
781 * count table for the chosen Tx queue. If the TFD index is 0-63, the driver
782 * must duplicate the byte count entry in corresponding index 256-319.
783 *
784 * padding puts each byte count table on a 1024-byte boundary;
785 * 4965 assumes tables are separated by 1024 bytes.
786 */
787struct iwl4965_scd_bc_tbl {
788 __le16 tfd_offset[TFD_QUEUE_BC_SIZE];
789 u8 pad[1024 - (TFD_QUEUE_BC_SIZE) * sizeof(__le16)];
790} __packed;
791
792
793#define IWL4965_RTC_INST_LOWER_BOUND (0x000000)
794
795/* RSSI to dBm */
796#define IWL4965_RSSI_OFFSET 44
797
798/* PCI registers */
799#define PCI_CFG_RETRY_TIMEOUT 0x041
800
801/* PCI register values */
802#define PCI_CFG_LINK_CTRL_VAL_L0S_EN 0x01
803#define PCI_CFG_LINK_CTRL_VAL_L1_EN 0x02
804
805#define IWL4965_DEFAULT_TX_RETRY 15
806
807/* Limit range of txpower output target to be between these values */
808#define IWL4965_TX_POWER_TARGET_POWER_MIN (0) /* 0 dBm: 1 milliwatt */
809
810/* EEPROM */
811#define IWL4965_FIRST_AMPDU_QUEUE 10
812
813
814#endif /* !__iwl_4965_hw_h__ */
diff --git a/drivers/net/wireless/iwlegacy/iwl-4965-led.c b/drivers/net/wireless/iwlegacy/iwl-4965-led.c
new file mode 100644
index 000000000000..26d324e30692
--- /dev/null
+++ b/drivers/net/wireless/iwlegacy/iwl-4965-led.c
@@ -0,0 +1,74 @@
1/******************************************************************************
2 *
3 * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of version 2 of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
17 *
18 * The full GNU General Public License is included in this distribution in the
19 * file called LICENSE.
20 *
21 * Contact Information:
22 * Intel Linux Wireless <ilw@linux.intel.com>
23 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
24 *
25 *****************************************************************************/
26
27#include <linux/kernel.h>
28#include <linux/module.h>
29#include <linux/init.h>
30#include <linux/pci.h>
31#include <linux/dma-mapping.h>
32#include <linux/delay.h>
33#include <linux/skbuff.h>
34#include <linux/netdevice.h>
35#include <linux/wireless.h>
36#include <net/mac80211.h>
37#include <linux/etherdevice.h>
38#include <asm/unaligned.h>
39
40#include "iwl-commands.h"
41#include "iwl-dev.h"
42#include "iwl-core.h"
43#include "iwl-io.h"
44#include "iwl-4965-led.h"
45
46/* Send led command */
47static int
48iwl4965_send_led_cmd(struct iwl_priv *priv, struct iwl_led_cmd *led_cmd)
49{
50 struct iwl_host_cmd cmd = {
51 .id = REPLY_LEDS_CMD,
52 .len = sizeof(struct iwl_led_cmd),
53 .data = led_cmd,
54 .flags = CMD_ASYNC,
55 .callback = NULL,
56 };
57 u32 reg;
58
59 reg = iwl_read32(priv, CSR_LED_REG);
60 if (reg != (reg & CSR_LED_BSM_CTRL_MSK))
61 iwl_write32(priv, CSR_LED_REG, reg & CSR_LED_BSM_CTRL_MSK);
62
63 return iwl_legacy_send_cmd(priv, &cmd);
64}
65
66/* Set led register off */
67void iwl4965_led_enable(struct iwl_priv *priv)
68{
69 iwl_write32(priv, CSR_LED_REG, CSR_LED_REG_TRUN_ON);
70}
71
72const struct iwl_led_ops iwl4965_led_ops = {
73 .cmd = iwl4965_send_led_cmd,
74};
diff --git a/drivers/net/wireless/iwlegacy/iwl-4965-led.h b/drivers/net/wireless/iwlegacy/iwl-4965-led.h
new file mode 100644
index 000000000000..5ed3615fc338
--- /dev/null
+++ b/drivers/net/wireless/iwlegacy/iwl-4965-led.h
@@ -0,0 +1,33 @@
1/******************************************************************************
2 *
3 * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of version 2 of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
17 *
18 * The full GNU General Public License is included in this distribution in the
19 * file called LICENSE.
20 *
21 * Contact Information:
22 * Intel Linux Wireless <ilw@linux.intel.com>
23 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
24 *
25 *****************************************************************************/
26
27#ifndef __iwl_4965_led_h__
28#define __iwl_4965_led_h__
29
30extern const struct iwl_led_ops iwl4965_led_ops;
31void iwl4965_led_enable(struct iwl_priv *priv);
32
33#endif /* __iwl_4965_led_h__ */
diff --git a/drivers/net/wireless/iwlegacy/iwl-4965-lib.c b/drivers/net/wireless/iwlegacy/iwl-4965-lib.c
new file mode 100644
index 000000000000..5a8a3cce27bc
--- /dev/null
+++ b/drivers/net/wireless/iwlegacy/iwl-4965-lib.c
@@ -0,0 +1,1260 @@
1/******************************************************************************
2 *
3 * GPL LICENSE SUMMARY
4 *
5 * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of version 2 of the GNU General Public License as
9 * published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful, but
12 * WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
19 * USA
20 *
21 * The full GNU General Public License is included in this distribution
22 * in the file called LICENSE.GPL.
23 *
24 * Contact Information:
25 * Intel Linux Wireless <ilw@linux.intel.com>
26 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
27 *
28 *****************************************************************************/
29#include <linux/etherdevice.h>
30#include <linux/kernel.h>
31#include <linux/module.h>
32#include <linux/init.h>
33#include <linux/sched.h>
34
35#include "iwl-dev.h"
36#include "iwl-core.h"
37#include "iwl-io.h"
38#include "iwl-helpers.h"
39#include "iwl-4965-hw.h"
40#include "iwl-4965.h"
41#include "iwl-sta.h"
42
43void iwl4965_check_abort_status(struct iwl_priv *priv,
44 u8 frame_count, u32 status)
45{
46 if (frame_count == 1 && status == TX_STATUS_FAIL_RFKILL_FLUSH) {
47 IWL_ERR(priv, "Tx flush command to flush out all frames\n");
48 if (!test_bit(STATUS_EXIT_PENDING, &priv->status))
49 queue_work(priv->workqueue, &priv->tx_flush);
50 }
51}
52
53/*
54 * EEPROM
55 */
56struct iwl_mod_params iwl4965_mod_params = {
57 .amsdu_size_8K = 1,
58 .restart_fw = 1,
59 /* the rest are 0 by default */
60};
61
62void iwl4965_rx_queue_reset(struct iwl_priv *priv, struct iwl_rx_queue *rxq)
63{
64 unsigned long flags;
65 int i;
66 spin_lock_irqsave(&rxq->lock, flags);
67 INIT_LIST_HEAD(&rxq->rx_free);
68 INIT_LIST_HEAD(&rxq->rx_used);
69 /* Fill the rx_used queue with _all_ of the Rx buffers */
70 for (i = 0; i < RX_FREE_BUFFERS + RX_QUEUE_SIZE; i++) {
71 /* In the reset function, these buffers may have been allocated
72 * to an SKB, so we need to unmap and free potential storage */
73 if (rxq->pool[i].page != NULL) {
74 pci_unmap_page(priv->pci_dev, rxq->pool[i].page_dma,
75 PAGE_SIZE << priv->hw_params.rx_page_order,
76 PCI_DMA_FROMDEVICE);
77 __iwl_legacy_free_pages(priv, rxq->pool[i].page);
78 rxq->pool[i].page = NULL;
79 }
80 list_add_tail(&rxq->pool[i].list, &rxq->rx_used);
81 }
82
83 for (i = 0; i < RX_QUEUE_SIZE; i++)
84 rxq->queue[i] = NULL;
85
86 /* Set us so that we have processed and used all buffers, but have
87 * not restocked the Rx queue with fresh buffers */
88 rxq->read = rxq->write = 0;
89 rxq->write_actual = 0;
90 rxq->free_count = 0;
91 spin_unlock_irqrestore(&rxq->lock, flags);
92}
93
94int iwl4965_rx_init(struct iwl_priv *priv, struct iwl_rx_queue *rxq)
95{
96 u32 rb_size;
97 const u32 rfdnlog = RX_QUEUE_SIZE_LOG; /* 256 RBDs */
98 u32 rb_timeout = 0;
99
100 if (priv->cfg->mod_params->amsdu_size_8K)
101 rb_size = FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_8K;
102 else
103 rb_size = FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_4K;
104
105 /* Stop Rx DMA */
106 iwl_legacy_write_direct32(priv, FH_MEM_RCSR_CHNL0_CONFIG_REG, 0);
107
108 /* Reset driver's Rx queue write index */
109 iwl_legacy_write_direct32(priv, FH_RSCSR_CHNL0_RBDCB_WPTR_REG, 0);
110
111 /* Tell device where to find RBD circular buffer in DRAM */
112 iwl_legacy_write_direct32(priv, FH_RSCSR_CHNL0_RBDCB_BASE_REG,
113 (u32)(rxq->bd_dma >> 8));
114
115 /* Tell device where in DRAM to update its Rx status */
116 iwl_legacy_write_direct32(priv, FH_RSCSR_CHNL0_STTS_WPTR_REG,
117 rxq->rb_stts_dma >> 4);
118
119 /* Enable Rx DMA
120 * Direct rx interrupts to hosts
121 * Rx buffer size 4 or 8k
122 * RB timeout 0x10
123 * 256 RBDs
124 */
125 iwl_legacy_write_direct32(priv, FH_MEM_RCSR_CHNL0_CONFIG_REG,
126 FH_RCSR_RX_CONFIG_CHNL_EN_ENABLE_VAL |
127 FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_INT_HOST_VAL |
128 FH_RCSR_CHNL0_RX_CONFIG_SINGLE_FRAME_MSK |
129 rb_size|
130 (rb_timeout << FH_RCSR_RX_CONFIG_REG_IRQ_RBTH_POS)|
131 (rfdnlog << FH_RCSR_RX_CONFIG_RBDCB_SIZE_POS));
132
133 /* Set interrupt coalescing timer to default (2048 usecs) */
134 iwl_write8(priv, CSR_INT_COALESCING, IWL_HOST_INT_TIMEOUT_DEF);
135
136 return 0;
137}
138
139static void iwl4965_set_pwr_vmain(struct iwl_priv *priv)
140{
141/*
142 * (for documentation purposes)
143 * to set power to V_AUX, do:
144
145 if (pci_pme_capable(priv->pci_dev, PCI_D3cold))
146 iwl_legacy_set_bits_mask_prph(priv, APMG_PS_CTRL_REG,
147 APMG_PS_CTRL_VAL_PWR_SRC_VAUX,
148 ~APMG_PS_CTRL_MSK_PWR_SRC);
149 */
150
151 iwl_legacy_set_bits_mask_prph(priv, APMG_PS_CTRL_REG,
152 APMG_PS_CTRL_VAL_PWR_SRC_VMAIN,
153 ~APMG_PS_CTRL_MSK_PWR_SRC);
154}
155
156int iwl4965_hw_nic_init(struct iwl_priv *priv)
157{
158 unsigned long flags;
159 struct iwl_rx_queue *rxq = &priv->rxq;
160 int ret;
161
162 /* nic_init */
163 spin_lock_irqsave(&priv->lock, flags);
164 priv->cfg->ops->lib->apm_ops.init(priv);
165
166 /* Set interrupt coalescing calibration timer to default (512 usecs) */
167 iwl_write8(priv, CSR_INT_COALESCING, IWL_HOST_INT_CALIB_TIMEOUT_DEF);
168
169 spin_unlock_irqrestore(&priv->lock, flags);
170
171 iwl4965_set_pwr_vmain(priv);
172
173 priv->cfg->ops->lib->apm_ops.config(priv);
174
175 /* Allocate the RX queue, or reset if it is already allocated */
176 if (!rxq->bd) {
177 ret = iwl_legacy_rx_queue_alloc(priv);
178 if (ret) {
179 IWL_ERR(priv, "Unable to initialize Rx queue\n");
180 return -ENOMEM;
181 }
182 } else
183 iwl4965_rx_queue_reset(priv, rxq);
184
185 iwl4965_rx_replenish(priv);
186
187 iwl4965_rx_init(priv, rxq);
188
189 spin_lock_irqsave(&priv->lock, flags);
190
191 rxq->need_update = 1;
192 iwl_legacy_rx_queue_update_write_ptr(priv, rxq);
193
194 spin_unlock_irqrestore(&priv->lock, flags);
195
196 /* Allocate or reset and init all Tx and Command queues */
197 if (!priv->txq) {
198 ret = iwl4965_txq_ctx_alloc(priv);
199 if (ret)
200 return ret;
201 } else
202 iwl4965_txq_ctx_reset(priv);
203
204 set_bit(STATUS_INIT, &priv->status);
205
206 return 0;
207}
208
209/**
210 * iwl4965_dma_addr2rbd_ptr - convert a DMA address to a uCode read buffer ptr
211 */
212static inline __le32 iwl4965_dma_addr2rbd_ptr(struct iwl_priv *priv,
213 dma_addr_t dma_addr)
214{
215 return cpu_to_le32((u32)(dma_addr >> 8));
216}
217
218/**
219 * iwl4965_rx_queue_restock - refill RX queue from pre-allocated pool
220 *
221 * If there are slots in the RX queue that need to be restocked,
222 * and we have free pre-allocated buffers, fill the ranks as much
223 * as we can, pulling from rx_free.
224 *
225 * This moves the 'write' index forward to catch up with 'processed', and
226 * also updates the memory address in the firmware to reference the new
227 * target buffer.
228 */
229void iwl4965_rx_queue_restock(struct iwl_priv *priv)
230{
231 struct iwl_rx_queue *rxq = &priv->rxq;
232 struct list_head *element;
233 struct iwl_rx_mem_buffer *rxb;
234 unsigned long flags;
235
236 spin_lock_irqsave(&rxq->lock, flags);
237 while ((iwl_legacy_rx_queue_space(rxq) > 0) && (rxq->free_count)) {
238 /* The overwritten rxb must be a used one */
239 rxb = rxq->queue[rxq->write];
240 BUG_ON(rxb && rxb->page);
241
242 /* Get next free Rx buffer, remove from free list */
243 element = rxq->rx_free.next;
244 rxb = list_entry(element, struct iwl_rx_mem_buffer, list);
245 list_del(element);
246
247 /* Point to Rx buffer via next RBD in circular buffer */
248 rxq->bd[rxq->write] = iwl4965_dma_addr2rbd_ptr(priv,
249 rxb->page_dma);
250 rxq->queue[rxq->write] = rxb;
251 rxq->write = (rxq->write + 1) & RX_QUEUE_MASK;
252 rxq->free_count--;
253 }
254 spin_unlock_irqrestore(&rxq->lock, flags);
255 /* If the pre-allocated buffer pool is dropping low, schedule to
256 * refill it */
257 if (rxq->free_count <= RX_LOW_WATERMARK)
258 queue_work(priv->workqueue, &priv->rx_replenish);
259
260
261 /* If we've added more space for the firmware to place data, tell it.
262 * Increment device's write pointer in multiples of 8. */
263 if (rxq->write_actual != (rxq->write & ~0x7)) {
264 spin_lock_irqsave(&rxq->lock, flags);
265 rxq->need_update = 1;
266 spin_unlock_irqrestore(&rxq->lock, flags);
267 iwl_legacy_rx_queue_update_write_ptr(priv, rxq);
268 }
269}
270
271/**
272 * iwl4965_rx_replenish - Move all used packet from rx_used to rx_free
273 *
274 * When moving to rx_free an SKB is allocated for the slot.
275 *
276 * Also restock the Rx queue via iwl_rx_queue_restock.
277 * This is called as a scheduled work item (except for during initialization)
278 */
279static void iwl4965_rx_allocate(struct iwl_priv *priv, gfp_t priority)
280{
281 struct iwl_rx_queue *rxq = &priv->rxq;
282 struct list_head *element;
283 struct iwl_rx_mem_buffer *rxb;
284 struct page *page;
285 unsigned long flags;
286 gfp_t gfp_mask = priority;
287
288 while (1) {
289 spin_lock_irqsave(&rxq->lock, flags);
290 if (list_empty(&rxq->rx_used)) {
291 spin_unlock_irqrestore(&rxq->lock, flags);
292 return;
293 }
294 spin_unlock_irqrestore(&rxq->lock, flags);
295
296 if (rxq->free_count > RX_LOW_WATERMARK)
297 gfp_mask |= __GFP_NOWARN;
298
299 if (priv->hw_params.rx_page_order > 0)
300 gfp_mask |= __GFP_COMP;
301
302 /* Alloc a new receive buffer */
303 page = alloc_pages(gfp_mask, priv->hw_params.rx_page_order);
304 if (!page) {
305 if (net_ratelimit())
306 IWL_DEBUG_INFO(priv, "alloc_pages failed, "
307 "order: %d\n",
308 priv->hw_params.rx_page_order);
309
310 if ((rxq->free_count <= RX_LOW_WATERMARK) &&
311 net_ratelimit())
312 IWL_CRIT(priv,
313 "Failed to alloc_pages with %s. "
314 "Only %u free buffers remaining.\n",
315 priority == GFP_ATOMIC ?
316 "GFP_ATOMIC" : "GFP_KERNEL",
317 rxq->free_count);
318 /* We don't reschedule replenish work here -- we will
319 * call the restock method and if it still needs
320 * more buffers it will schedule replenish */
321 return;
322 }
323
324 spin_lock_irqsave(&rxq->lock, flags);
325
326 if (list_empty(&rxq->rx_used)) {
327 spin_unlock_irqrestore(&rxq->lock, flags);
328 __free_pages(page, priv->hw_params.rx_page_order);
329 return;
330 }
331 element = rxq->rx_used.next;
332 rxb = list_entry(element, struct iwl_rx_mem_buffer, list);
333 list_del(element);
334
335 spin_unlock_irqrestore(&rxq->lock, flags);
336
337 BUG_ON(rxb->page);
338 rxb->page = page;
339 /* Get physical address of the RB */
340 rxb->page_dma = pci_map_page(priv->pci_dev, page, 0,
341 PAGE_SIZE << priv->hw_params.rx_page_order,
342 PCI_DMA_FROMDEVICE);
343 /* dma address must be no more than 36 bits */
344 BUG_ON(rxb->page_dma & ~DMA_BIT_MASK(36));
345 /* and also 256 byte aligned! */
346 BUG_ON(rxb->page_dma & DMA_BIT_MASK(8));
347
348 spin_lock_irqsave(&rxq->lock, flags);
349
350 list_add_tail(&rxb->list, &rxq->rx_free);
351 rxq->free_count++;
352 priv->alloc_rxb_page++;
353
354 spin_unlock_irqrestore(&rxq->lock, flags);
355 }
356}
357
358void iwl4965_rx_replenish(struct iwl_priv *priv)
359{
360 unsigned long flags;
361
362 iwl4965_rx_allocate(priv, GFP_KERNEL);
363
364 spin_lock_irqsave(&priv->lock, flags);
365 iwl4965_rx_queue_restock(priv);
366 spin_unlock_irqrestore(&priv->lock, flags);
367}
368
369void iwl4965_rx_replenish_now(struct iwl_priv *priv)
370{
371 iwl4965_rx_allocate(priv, GFP_ATOMIC);
372
373 iwl4965_rx_queue_restock(priv);
374}
375
376/* Assumes that the skb field of the buffers in 'pool' is kept accurate.
377 * If an SKB has been detached, the POOL needs to have its SKB set to NULL
378 * This free routine walks the list of POOL entries and if SKB is set to
379 * non NULL it is unmapped and freed
380 */
381void iwl4965_rx_queue_free(struct iwl_priv *priv, struct iwl_rx_queue *rxq)
382{
383 int i;
384 for (i = 0; i < RX_QUEUE_SIZE + RX_FREE_BUFFERS; i++) {
385 if (rxq->pool[i].page != NULL) {
386 pci_unmap_page(priv->pci_dev, rxq->pool[i].page_dma,
387 PAGE_SIZE << priv->hw_params.rx_page_order,
388 PCI_DMA_FROMDEVICE);
389 __iwl_legacy_free_pages(priv, rxq->pool[i].page);
390 rxq->pool[i].page = NULL;
391 }
392 }
393
394 dma_free_coherent(&priv->pci_dev->dev, 4 * RX_QUEUE_SIZE, rxq->bd,
395 rxq->bd_dma);
396 dma_free_coherent(&priv->pci_dev->dev, sizeof(struct iwl_rb_status),
397 rxq->rb_stts, rxq->rb_stts_dma);
398 rxq->bd = NULL;
399 rxq->rb_stts = NULL;
400}
401
402int iwl4965_rxq_stop(struct iwl_priv *priv)
403{
404
405 /* stop Rx DMA */
406 iwl_legacy_write_direct32(priv, FH_MEM_RCSR_CHNL0_CONFIG_REG, 0);
407 iwl_poll_direct_bit(priv, FH_MEM_RSSR_RX_STATUS_REG,
408 FH_RSSR_CHNL0_RX_STATUS_CHNL_IDLE, 1000);
409
410 return 0;
411}
412
413int iwl4965_hwrate_to_mac80211_idx(u32 rate_n_flags, enum ieee80211_band band)
414{
415 int idx = 0;
416 int band_offset = 0;
417
418 /* HT rate format: mac80211 wants an MCS number, which is just LSB */
419 if (rate_n_flags & RATE_MCS_HT_MSK) {
420 idx = (rate_n_flags & 0xff);
421 return idx;
422 /* Legacy rate format, search for match in table */
423 } else {
424 if (band == IEEE80211_BAND_5GHZ)
425 band_offset = IWL_FIRST_OFDM_RATE;
426 for (idx = band_offset; idx < IWL_RATE_COUNT_LEGACY; idx++)
427 if (iwlegacy_rates[idx].plcp == (rate_n_flags & 0xFF))
428 return idx - band_offset;
429 }
430
431 return -1;
432}
433
434static int iwl4965_calc_rssi(struct iwl_priv *priv,
435 struct iwl_rx_phy_res *rx_resp)
436{
437 /* data from PHY/DSP regarding signal strength, etc.,
438 * contents are always there, not configurable by host. */
439 struct iwl4965_rx_non_cfg_phy *ncphy =
440 (struct iwl4965_rx_non_cfg_phy *)rx_resp->non_cfg_phy_buf;
441 u32 agc = (le16_to_cpu(ncphy->agc_info) & IWL49_AGC_DB_MASK)
442 >> IWL49_AGC_DB_POS;
443
444 u32 valid_antennae =
445 (le16_to_cpu(rx_resp->phy_flags) & IWL49_RX_PHY_FLAGS_ANTENNAE_MASK)
446 >> IWL49_RX_PHY_FLAGS_ANTENNAE_OFFSET;
447 u8 max_rssi = 0;
448 u32 i;
449
450 /* Find max rssi among 3 possible receivers.
451 * These values are measured by the digital signal processor (DSP).
452 * They should stay fairly constant even as the signal strength varies,
453 * if the radio's automatic gain control (AGC) is working right.
454 * AGC value (see below) will provide the "interesting" info. */
455 for (i = 0; i < 3; i++)
456 if (valid_antennae & (1 << i))
457 max_rssi = max(ncphy->rssi_info[i << 1], max_rssi);
458
459 IWL_DEBUG_STATS(priv, "Rssi In A %d B %d C %d Max %d AGC dB %d\n",
460 ncphy->rssi_info[0], ncphy->rssi_info[2], ncphy->rssi_info[4],
461 max_rssi, agc);
462
463 /* dBm = max_rssi dB - agc dB - constant.
464 * Higher AGC (higher radio gain) means lower signal. */
465 return max_rssi - agc - IWL4965_RSSI_OFFSET;
466}
467
468
469static u32 iwl4965_translate_rx_status(struct iwl_priv *priv, u32 decrypt_in)
470{
471 u32 decrypt_out = 0;
472
473 if ((decrypt_in & RX_RES_STATUS_STATION_FOUND) ==
474 RX_RES_STATUS_STATION_FOUND)
475 decrypt_out |= (RX_RES_STATUS_STATION_FOUND |
476 RX_RES_STATUS_NO_STATION_INFO_MISMATCH);
477
478 decrypt_out |= (decrypt_in & RX_RES_STATUS_SEC_TYPE_MSK);
479
480 /* packet was not encrypted */
481 if ((decrypt_in & RX_RES_STATUS_SEC_TYPE_MSK) ==
482 RX_RES_STATUS_SEC_TYPE_NONE)
483 return decrypt_out;
484
485 /* packet was encrypted with unknown alg */
486 if ((decrypt_in & RX_RES_STATUS_SEC_TYPE_MSK) ==
487 RX_RES_STATUS_SEC_TYPE_ERR)
488 return decrypt_out;
489
490 /* decryption was not done in HW */
491 if ((decrypt_in & RX_MPDU_RES_STATUS_DEC_DONE_MSK) !=
492 RX_MPDU_RES_STATUS_DEC_DONE_MSK)
493 return decrypt_out;
494
495 switch (decrypt_in & RX_RES_STATUS_SEC_TYPE_MSK) {
496
497 case RX_RES_STATUS_SEC_TYPE_CCMP:
498 /* alg is CCM: check MIC only */
499 if (!(decrypt_in & RX_MPDU_RES_STATUS_MIC_OK))
500 /* Bad MIC */
501 decrypt_out |= RX_RES_STATUS_BAD_ICV_MIC;
502 else
503 decrypt_out |= RX_RES_STATUS_DECRYPT_OK;
504
505 break;
506
507 case RX_RES_STATUS_SEC_TYPE_TKIP:
508 if (!(decrypt_in & RX_MPDU_RES_STATUS_TTAK_OK)) {
509 /* Bad TTAK */
510 decrypt_out |= RX_RES_STATUS_BAD_KEY_TTAK;
511 break;
512 }
513 /* fall through if TTAK OK */
514 default:
515 if (!(decrypt_in & RX_MPDU_RES_STATUS_ICV_OK))
516 decrypt_out |= RX_RES_STATUS_BAD_ICV_MIC;
517 else
518 decrypt_out |= RX_RES_STATUS_DECRYPT_OK;
519 break;
520 }
521
522 IWL_DEBUG_RX(priv, "decrypt_in:0x%x decrypt_out = 0x%x\n",
523 decrypt_in, decrypt_out);
524
525 return decrypt_out;
526}
527
528static void iwl4965_pass_packet_to_mac80211(struct iwl_priv *priv,
529 struct ieee80211_hdr *hdr,
530 u16 len,
531 u32 ampdu_status,
532 struct iwl_rx_mem_buffer *rxb,
533 struct ieee80211_rx_status *stats)
534{
535 struct sk_buff *skb;
536 __le16 fc = hdr->frame_control;
537
538 /* We only process data packets if the interface is open */
539 if (unlikely(!priv->is_open)) {
540 IWL_DEBUG_DROP_LIMIT(priv,
541 "Dropping packet while interface is not open.\n");
542 return;
543 }
544
545 /* In case of HW accelerated crypto and bad decryption, drop */
546 if (!priv->cfg->mod_params->sw_crypto &&
547 iwl_legacy_set_decrypted_flag(priv, hdr, ampdu_status, stats))
548 return;
549
550 skb = dev_alloc_skb(128);
551 if (!skb) {
552 IWL_ERR(priv, "dev_alloc_skb failed\n");
553 return;
554 }
555
556 skb_add_rx_frag(skb, 0, rxb->page, (void *)hdr - rxb_addr(rxb), len);
557
558 iwl_legacy_update_stats(priv, false, fc, len);
559 memcpy(IEEE80211_SKB_RXCB(skb), stats, sizeof(*stats));
560
561 ieee80211_rx(priv->hw, skb);
562 priv->alloc_rxb_page--;
563 rxb->page = NULL;
564}
565
566/* Called for REPLY_RX (legacy ABG frames), or
567 * REPLY_RX_MPDU_CMD (HT high-throughput N frames). */
568void iwl4965_rx_reply_rx(struct iwl_priv *priv,
569 struct iwl_rx_mem_buffer *rxb)
570{
571 struct ieee80211_hdr *header;
572 struct ieee80211_rx_status rx_status;
573 struct iwl_rx_packet *pkt = rxb_addr(rxb);
574 struct iwl_rx_phy_res *phy_res;
575 __le32 rx_pkt_status;
576 struct iwl_rx_mpdu_res_start *amsdu;
577 u32 len;
578 u32 ampdu_status;
579 u32 rate_n_flags;
580
581 /**
582 * REPLY_RX and REPLY_RX_MPDU_CMD are handled differently.
583 * REPLY_RX: physical layer info is in this buffer
584 * REPLY_RX_MPDU_CMD: physical layer info was sent in separate
585 * command and cached in priv->last_phy_res
586 *
587 * Here we set up local variables depending on which command is
588 * received.
589 */
590 if (pkt->hdr.cmd == REPLY_RX) {
591 phy_res = (struct iwl_rx_phy_res *)pkt->u.raw;
592 header = (struct ieee80211_hdr *)(pkt->u.raw + sizeof(*phy_res)
593 + phy_res->cfg_phy_cnt);
594
595 len = le16_to_cpu(phy_res->byte_count);
596 rx_pkt_status = *(__le32 *)(pkt->u.raw + sizeof(*phy_res) +
597 phy_res->cfg_phy_cnt + len);
598 ampdu_status = le32_to_cpu(rx_pkt_status);
599 } else {
600 if (!priv->_4965.last_phy_res_valid) {
601 IWL_ERR(priv, "MPDU frame without cached PHY data\n");
602 return;
603 }
604 phy_res = &priv->_4965.last_phy_res;
605 amsdu = (struct iwl_rx_mpdu_res_start *)pkt->u.raw;
606 header = (struct ieee80211_hdr *)(pkt->u.raw + sizeof(*amsdu));
607 len = le16_to_cpu(amsdu->byte_count);
608 rx_pkt_status = *(__le32 *)(pkt->u.raw + sizeof(*amsdu) + len);
609 ampdu_status = iwl4965_translate_rx_status(priv,
610 le32_to_cpu(rx_pkt_status));
611 }
612
613 if ((unlikely(phy_res->cfg_phy_cnt > 20))) {
614 IWL_DEBUG_DROP(priv, "dsp size out of range [0,20]: %d/n",
615 phy_res->cfg_phy_cnt);
616 return;
617 }
618
619 if (!(rx_pkt_status & RX_RES_STATUS_NO_CRC32_ERROR) ||
620 !(rx_pkt_status & RX_RES_STATUS_NO_RXE_OVERFLOW)) {
621 IWL_DEBUG_RX(priv, "Bad CRC or FIFO: 0x%08X.\n",
622 le32_to_cpu(rx_pkt_status));
623 return;
624 }
625
626 /* This will be used in several places later */
627 rate_n_flags = le32_to_cpu(phy_res->rate_n_flags);
628
629 /* rx_status carries information about the packet to mac80211 */
630 rx_status.mactime = le64_to_cpu(phy_res->timestamp);
631 rx_status.freq =
632 ieee80211_channel_to_frequency(le16_to_cpu(phy_res->channel),
633 rx_status.band);
634 rx_status.band = (phy_res->phy_flags & RX_RES_PHY_FLAGS_BAND_24_MSK) ?
635 IEEE80211_BAND_2GHZ : IEEE80211_BAND_5GHZ;
636 rx_status.rate_idx =
637 iwl4965_hwrate_to_mac80211_idx(rate_n_flags, rx_status.band);
638 rx_status.flag = 0;
639
640 /* TSF isn't reliable. In order to allow smooth user experience,
641 * this W/A doesn't propagate it to the mac80211 */
642 /*rx_status.flag |= RX_FLAG_MACTIME_MPDU;*/
643
644 priv->ucode_beacon_time = le32_to_cpu(phy_res->beacon_time_stamp);
645
646 /* Find max signal strength (dBm) among 3 antenna/receiver chains */
647 rx_status.signal = iwl4965_calc_rssi(priv, phy_res);
648
649 iwl_legacy_dbg_log_rx_data_frame(priv, len, header);
650 IWL_DEBUG_STATS_LIMIT(priv, "Rssi %d, TSF %llu\n",
651 rx_status.signal, (unsigned long long)rx_status.mactime);
652
653 /*
654 * "antenna number"
655 *
656 * It seems that the antenna field in the phy flags value
657 * is actually a bit field. This is undefined by radiotap,
658 * it wants an actual antenna number but I always get "7"
659 * for most legacy frames I receive indicating that the
660 * same frame was received on all three RX chains.
661 *
662 * I think this field should be removed in favor of a
663 * new 802.11n radiotap field "RX chains" that is defined
664 * as a bitmask.
665 */
666 rx_status.antenna =
667 (le16_to_cpu(phy_res->phy_flags) & RX_RES_PHY_FLAGS_ANTENNA_MSK)
668 >> RX_RES_PHY_FLAGS_ANTENNA_POS;
669
670 /* set the preamble flag if appropriate */
671 if (phy_res->phy_flags & RX_RES_PHY_FLAGS_SHORT_PREAMBLE_MSK)
672 rx_status.flag |= RX_FLAG_SHORTPRE;
673
674 /* Set up the HT phy flags */
675 if (rate_n_flags & RATE_MCS_HT_MSK)
676 rx_status.flag |= RX_FLAG_HT;
677 if (rate_n_flags & RATE_MCS_HT40_MSK)
678 rx_status.flag |= RX_FLAG_40MHZ;
679 if (rate_n_flags & RATE_MCS_SGI_MSK)
680 rx_status.flag |= RX_FLAG_SHORT_GI;
681
682 iwl4965_pass_packet_to_mac80211(priv, header, len, ampdu_status,
683 rxb, &rx_status);
684}
685
686/* Cache phy data (Rx signal strength, etc) for HT frame (REPLY_RX_PHY_CMD).
687 * This will be used later in iwl_rx_reply_rx() for REPLY_RX_MPDU_CMD. */
688void iwl4965_rx_reply_rx_phy(struct iwl_priv *priv,
689 struct iwl_rx_mem_buffer *rxb)
690{
691 struct iwl_rx_packet *pkt = rxb_addr(rxb);
692 priv->_4965.last_phy_res_valid = true;
693 memcpy(&priv->_4965.last_phy_res, pkt->u.raw,
694 sizeof(struct iwl_rx_phy_res));
695}
696
697static int iwl4965_get_single_channel_for_scan(struct iwl_priv *priv,
698 struct ieee80211_vif *vif,
699 enum ieee80211_band band,
700 struct iwl_scan_channel *scan_ch)
701{
702 const struct ieee80211_supported_band *sband;
703 u16 passive_dwell = 0;
704 u16 active_dwell = 0;
705 int added = 0;
706 u16 channel = 0;
707
708 sband = iwl_get_hw_mode(priv, band);
709 if (!sband) {
710 IWL_ERR(priv, "invalid band\n");
711 return added;
712 }
713
714 active_dwell = iwl_legacy_get_active_dwell_time(priv, band, 0);
715 passive_dwell = iwl_legacy_get_passive_dwell_time(priv, band, vif);
716
717 if (passive_dwell <= active_dwell)
718 passive_dwell = active_dwell + 1;
719
720 channel = iwl_legacy_get_single_channel_number(priv, band);
721 if (channel) {
722 scan_ch->channel = cpu_to_le16(channel);
723 scan_ch->type = SCAN_CHANNEL_TYPE_PASSIVE;
724 scan_ch->active_dwell = cpu_to_le16(active_dwell);
725 scan_ch->passive_dwell = cpu_to_le16(passive_dwell);
726 /* Set txpower levels to defaults */
727 scan_ch->dsp_atten = 110;
728 if (band == IEEE80211_BAND_5GHZ)
729 scan_ch->tx_gain = ((1 << 5) | (3 << 3)) | 3;
730 else
731 scan_ch->tx_gain = ((1 << 5) | (5 << 3));
732 added++;
733 } else
734 IWL_ERR(priv, "no valid channel found\n");
735 return added;
736}
737
738static int iwl4965_get_channels_for_scan(struct iwl_priv *priv,
739 struct ieee80211_vif *vif,
740 enum ieee80211_band band,
741 u8 is_active, u8 n_probes,
742 struct iwl_scan_channel *scan_ch)
743{
744 struct ieee80211_channel *chan;
745 const struct ieee80211_supported_band *sband;
746 const struct iwl_channel_info *ch_info;
747 u16 passive_dwell = 0;
748 u16 active_dwell = 0;
749 int added, i;
750 u16 channel;
751
752 sband = iwl_get_hw_mode(priv, band);
753 if (!sband)
754 return 0;
755
756 active_dwell = iwl_legacy_get_active_dwell_time(priv, band, n_probes);
757 passive_dwell = iwl_legacy_get_passive_dwell_time(priv, band, vif);
758
759 if (passive_dwell <= active_dwell)
760 passive_dwell = active_dwell + 1;
761
762 for (i = 0, added = 0; i < priv->scan_request->n_channels; i++) {
763 chan = priv->scan_request->channels[i];
764
765 if (chan->band != band)
766 continue;
767
768 channel = chan->hw_value;
769 scan_ch->channel = cpu_to_le16(channel);
770
771 ch_info = iwl_legacy_get_channel_info(priv, band, channel);
772 if (!iwl_legacy_is_channel_valid(ch_info)) {
773 IWL_DEBUG_SCAN(priv,
774 "Channel %d is INVALID for this band.\n",
775 channel);
776 continue;
777 }
778
779 if (!is_active || iwl_legacy_is_channel_passive(ch_info) ||
780 (chan->flags & IEEE80211_CHAN_PASSIVE_SCAN))
781 scan_ch->type = SCAN_CHANNEL_TYPE_PASSIVE;
782 else
783 scan_ch->type = SCAN_CHANNEL_TYPE_ACTIVE;
784
785 if (n_probes)
786 scan_ch->type |= IWL_SCAN_PROBE_MASK(n_probes);
787
788 scan_ch->active_dwell = cpu_to_le16(active_dwell);
789 scan_ch->passive_dwell = cpu_to_le16(passive_dwell);
790
791 /* Set txpower levels to defaults */
792 scan_ch->dsp_atten = 110;
793
794 /* NOTE: if we were doing 6Mb OFDM for scans we'd use
795 * power level:
796 * scan_ch->tx_gain = ((1 << 5) | (2 << 3)) | 3;
797 */
798 if (band == IEEE80211_BAND_5GHZ)
799 scan_ch->tx_gain = ((1 << 5) | (3 << 3)) | 3;
800 else
801 scan_ch->tx_gain = ((1 << 5) | (5 << 3));
802
803 IWL_DEBUG_SCAN(priv, "Scanning ch=%d prob=0x%X [%s %d]\n",
804 channel, le32_to_cpu(scan_ch->type),
805 (scan_ch->type & SCAN_CHANNEL_TYPE_ACTIVE) ?
806 "ACTIVE" : "PASSIVE",
807 (scan_ch->type & SCAN_CHANNEL_TYPE_ACTIVE) ?
808 active_dwell : passive_dwell);
809
810 scan_ch++;
811 added++;
812 }
813
814 IWL_DEBUG_SCAN(priv, "total channels to scan %d\n", added);
815 return added;
816}
817
818int iwl4965_request_scan(struct iwl_priv *priv, struct ieee80211_vif *vif)
819{
820 struct iwl_host_cmd cmd = {
821 .id = REPLY_SCAN_CMD,
822 .len = sizeof(struct iwl_scan_cmd),
823 .flags = CMD_SIZE_HUGE,
824 };
825 struct iwl_scan_cmd *scan;
826 struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
827 u32 rate_flags = 0;
828 u16 cmd_len;
829 u16 rx_chain = 0;
830 enum ieee80211_band band;
831 u8 n_probes = 0;
832 u8 rx_ant = priv->hw_params.valid_rx_ant;
833 u8 rate;
834 bool is_active = false;
835 int chan_mod;
836 u8 active_chains;
837 u8 scan_tx_antennas = priv->hw_params.valid_tx_ant;
838 int ret;
839
840 lockdep_assert_held(&priv->mutex);
841
842 if (vif)
843 ctx = iwl_legacy_rxon_ctx_from_vif(vif);
844
845 if (!priv->scan_cmd) {
846 priv->scan_cmd = kmalloc(sizeof(struct iwl_scan_cmd) +
847 IWL_MAX_SCAN_SIZE, GFP_KERNEL);
848 if (!priv->scan_cmd) {
849 IWL_DEBUG_SCAN(priv,
850 "fail to allocate memory for scan\n");
851 return -ENOMEM;
852 }
853 }
854 scan = priv->scan_cmd;
855 memset(scan, 0, sizeof(struct iwl_scan_cmd) + IWL_MAX_SCAN_SIZE);
856
857 scan->quiet_plcp_th = IWL_PLCP_QUIET_THRESH;
858 scan->quiet_time = IWL_ACTIVE_QUIET_TIME;
859
860 if (iwl_legacy_is_any_associated(priv)) {
861 u16 interval = 0;
862 u32 extra;
863 u32 suspend_time = 100;
864 u32 scan_suspend_time = 100;
865
866 IWL_DEBUG_INFO(priv, "Scanning while associated...\n");
867 if (priv->is_internal_short_scan)
868 interval = 0;
869 else
870 interval = vif->bss_conf.beacon_int;
871
872 scan->suspend_time = 0;
873 scan->max_out_time = cpu_to_le32(200 * 1024);
874 if (!interval)
875 interval = suspend_time;
876
877 extra = (suspend_time / interval) << 22;
878 scan_suspend_time = (extra |
879 ((suspend_time % interval) * 1024));
880 scan->suspend_time = cpu_to_le32(scan_suspend_time);
881 IWL_DEBUG_SCAN(priv, "suspend_time 0x%X beacon interval %d\n",
882 scan_suspend_time, interval);
883 }
884
885 if (priv->is_internal_short_scan) {
886 IWL_DEBUG_SCAN(priv, "Start internal passive scan.\n");
887 } else if (priv->scan_request->n_ssids) {
888 int i, p = 0;
889 IWL_DEBUG_SCAN(priv, "Kicking off active scan\n");
890 for (i = 0; i < priv->scan_request->n_ssids; i++) {
891 /* always does wildcard anyway */
892 if (!priv->scan_request->ssids[i].ssid_len)
893 continue;
894 scan->direct_scan[p].id = WLAN_EID_SSID;
895 scan->direct_scan[p].len =
896 priv->scan_request->ssids[i].ssid_len;
897 memcpy(scan->direct_scan[p].ssid,
898 priv->scan_request->ssids[i].ssid,
899 priv->scan_request->ssids[i].ssid_len);
900 n_probes++;
901 p++;
902 }
903 is_active = true;
904 } else
905 IWL_DEBUG_SCAN(priv, "Start passive scan.\n");
906
907 scan->tx_cmd.tx_flags = TX_CMD_FLG_SEQ_CTL_MSK;
908 scan->tx_cmd.sta_id = ctx->bcast_sta_id;
909 scan->tx_cmd.stop_time.life_time = TX_CMD_LIFE_TIME_INFINITE;
910
911 switch (priv->scan_band) {
912 case IEEE80211_BAND_2GHZ:
913 scan->flags = RXON_FLG_BAND_24G_MSK | RXON_FLG_AUTO_DETECT_MSK;
914 chan_mod = le32_to_cpu(
915 priv->contexts[IWL_RXON_CTX_BSS].active.flags &
916 RXON_FLG_CHANNEL_MODE_MSK)
917 >> RXON_FLG_CHANNEL_MODE_POS;
918 if (chan_mod == CHANNEL_MODE_PURE_40) {
919 rate = IWL_RATE_6M_PLCP;
920 } else {
921 rate = IWL_RATE_1M_PLCP;
922 rate_flags = RATE_MCS_CCK_MSK;
923 }
924 break;
925 case IEEE80211_BAND_5GHZ:
926 rate = IWL_RATE_6M_PLCP;
927 break;
928 default:
929 IWL_WARN(priv, "Invalid scan band\n");
930 return -EIO;
931 }
932
933 /*
934 * If active scanning is requested but a certain channel is
935 * marked passive, we can do active scanning if we detect
936 * transmissions.
937 *
938 * There is an issue with some firmware versions that triggers
939 * a sysassert on a "good CRC threshold" of zero (== disabled),
940 * on a radar channel even though this means that we should NOT
941 * send probes.
942 *
943 * The "good CRC threshold" is the number of frames that we
944 * need to receive during our dwell time on a channel before
945 * sending out probes -- setting this to a huge value will
946 * mean we never reach it, but at the same time work around
947 * the aforementioned issue. Thus use IWL_GOOD_CRC_TH_NEVER
948 * here instead of IWL_GOOD_CRC_TH_DISABLED.
949 */
950 scan->good_CRC_th = is_active ? IWL_GOOD_CRC_TH_DEFAULT :
951 IWL_GOOD_CRC_TH_NEVER;
952
953 band = priv->scan_band;
954
955 if (priv->cfg->scan_rx_antennas[band])
956 rx_ant = priv->cfg->scan_rx_antennas[band];
957
958 if (priv->cfg->scan_tx_antennas[band])
959 scan_tx_antennas = priv->cfg->scan_tx_antennas[band];
960
961 priv->scan_tx_ant[band] = iwl4965_toggle_tx_ant(priv,
962 priv->scan_tx_ant[band],
963 scan_tx_antennas);
964 rate_flags |= iwl4965_ant_idx_to_flags(priv->scan_tx_ant[band]);
965 scan->tx_cmd.rate_n_flags = iwl4965_hw_set_rate_n_flags(rate, rate_flags);
966
967 /* In power save mode use one chain, otherwise use all chains */
968 if (test_bit(STATUS_POWER_PMI, &priv->status)) {
969 /* rx_ant has been set to all valid chains previously */
970 active_chains = rx_ant &
971 ((u8)(priv->chain_noise_data.active_chains));
972 if (!active_chains)
973 active_chains = rx_ant;
974
975 IWL_DEBUG_SCAN(priv, "chain_noise_data.active_chains: %u\n",
976 priv->chain_noise_data.active_chains);
977
978 rx_ant = iwl4965_first_antenna(active_chains);
979 }
980
981 /* MIMO is not used here, but value is required */
982 rx_chain |= priv->hw_params.valid_rx_ant << RXON_RX_CHAIN_VALID_POS;
983 rx_chain |= rx_ant << RXON_RX_CHAIN_FORCE_MIMO_SEL_POS;
984 rx_chain |= rx_ant << RXON_RX_CHAIN_FORCE_SEL_POS;
985 rx_chain |= 0x1 << RXON_RX_CHAIN_DRIVER_FORCE_POS;
986 scan->rx_chain = cpu_to_le16(rx_chain);
987 if (!priv->is_internal_short_scan) {
988 cmd_len = iwl_legacy_fill_probe_req(priv,
989 (struct ieee80211_mgmt *)scan->data,
990 vif->addr,
991 priv->scan_request->ie,
992 priv->scan_request->ie_len,
993 IWL_MAX_SCAN_SIZE - sizeof(*scan));
994 } else {
995 /* use bcast addr, will not be transmitted but must be valid */
996 cmd_len = iwl_legacy_fill_probe_req(priv,
997 (struct ieee80211_mgmt *)scan->data,
998 iwlegacy_bcast_addr, NULL, 0,
999 IWL_MAX_SCAN_SIZE - sizeof(*scan));
1000
1001 }
1002 scan->tx_cmd.len = cpu_to_le16(cmd_len);
1003
1004 scan->filter_flags |= (RXON_FILTER_ACCEPT_GRP_MSK |
1005 RXON_FILTER_BCON_AWARE_MSK);
1006
1007 if (priv->is_internal_short_scan) {
1008 scan->channel_count =
1009 iwl4965_get_single_channel_for_scan(priv, vif, band,
1010 (void *)&scan->data[le16_to_cpu(
1011 scan->tx_cmd.len)]);
1012 } else {
1013 scan->channel_count =
1014 iwl4965_get_channels_for_scan(priv, vif, band,
1015 is_active, n_probes,
1016 (void *)&scan->data[le16_to_cpu(
1017 scan->tx_cmd.len)]);
1018 }
1019 if (scan->channel_count == 0) {
1020 IWL_DEBUG_SCAN(priv, "channel count %d\n", scan->channel_count);
1021 return -EIO;
1022 }
1023
1024 cmd.len += le16_to_cpu(scan->tx_cmd.len) +
1025 scan->channel_count * sizeof(struct iwl_scan_channel);
1026 cmd.data = scan;
1027 scan->len = cpu_to_le16(cmd.len);
1028
1029 set_bit(STATUS_SCAN_HW, &priv->status);
1030
1031 ret = iwl_legacy_send_cmd_sync(priv, &cmd);
1032 if (ret)
1033 clear_bit(STATUS_SCAN_HW, &priv->status);
1034
1035 return ret;
1036}
1037
1038int iwl4965_manage_ibss_station(struct iwl_priv *priv,
1039 struct ieee80211_vif *vif, bool add)
1040{
1041 struct iwl_vif_priv *vif_priv = (void *)vif->drv_priv;
1042
1043 if (add)
1044 return iwl4965_add_bssid_station(priv, vif_priv->ctx,
1045 vif->bss_conf.bssid,
1046 &vif_priv->ibss_bssid_sta_id);
1047 return iwl_legacy_remove_station(priv, vif_priv->ibss_bssid_sta_id,
1048 vif->bss_conf.bssid);
1049}
1050
1051void iwl4965_free_tfds_in_queue(struct iwl_priv *priv,
1052 int sta_id, int tid, int freed)
1053{
1054 lockdep_assert_held(&priv->sta_lock);
1055
1056 if (priv->stations[sta_id].tid[tid].tfds_in_queue >= freed)
1057 priv->stations[sta_id].tid[tid].tfds_in_queue -= freed;
1058 else {
1059 IWL_DEBUG_TX(priv, "free more than tfds_in_queue (%u:%d)\n",
1060 priv->stations[sta_id].tid[tid].tfds_in_queue,
1061 freed);
1062 priv->stations[sta_id].tid[tid].tfds_in_queue = 0;
1063 }
1064}
1065
1066#define IWL_TX_QUEUE_MSK 0xfffff
1067
1068static bool iwl4965_is_single_rx_stream(struct iwl_priv *priv)
1069{
1070 return priv->current_ht_config.smps == IEEE80211_SMPS_STATIC ||
1071 priv->current_ht_config.single_chain_sufficient;
1072}
1073
1074#define IWL_NUM_RX_CHAINS_MULTIPLE 3
1075#define IWL_NUM_RX_CHAINS_SINGLE 2
1076#define IWL_NUM_IDLE_CHAINS_DUAL 2
1077#define IWL_NUM_IDLE_CHAINS_SINGLE 1
1078
1079/*
1080 * Determine how many receiver/antenna chains to use.
1081 *
1082 * More provides better reception via diversity. Fewer saves power
1083 * at the expense of throughput, but only when not in powersave to
1084 * start with.
1085 *
1086 * MIMO (dual stream) requires at least 2, but works better with 3.
1087 * This does not determine *which* chains to use, just how many.
1088 */
1089static int iwl4965_get_active_rx_chain_count(struct iwl_priv *priv)
1090{
1091 /* # of Rx chains to use when expecting MIMO. */
1092 if (iwl4965_is_single_rx_stream(priv))
1093 return IWL_NUM_RX_CHAINS_SINGLE;
1094 else
1095 return IWL_NUM_RX_CHAINS_MULTIPLE;
1096}
1097
1098/*
1099 * When we are in power saving mode, unless device support spatial
1100 * multiplexing power save, use the active count for rx chain count.
1101 */
1102static int
1103iwl4965_get_idle_rx_chain_count(struct iwl_priv *priv, int active_cnt)
1104{
1105 /* # Rx chains when idling, depending on SMPS mode */
1106 switch (priv->current_ht_config.smps) {
1107 case IEEE80211_SMPS_STATIC:
1108 case IEEE80211_SMPS_DYNAMIC:
1109 return IWL_NUM_IDLE_CHAINS_SINGLE;
1110 case IEEE80211_SMPS_OFF:
1111 return active_cnt;
1112 default:
1113 WARN(1, "invalid SMPS mode %d",
1114 priv->current_ht_config.smps);
1115 return active_cnt;
1116 }
1117}
1118
1119/* up to 4 chains */
1120static u8 iwl4965_count_chain_bitmap(u32 chain_bitmap)
1121{
1122 u8 res;
1123 res = (chain_bitmap & BIT(0)) >> 0;
1124 res += (chain_bitmap & BIT(1)) >> 1;
1125 res += (chain_bitmap & BIT(2)) >> 2;
1126 res += (chain_bitmap & BIT(3)) >> 3;
1127 return res;
1128}
1129
1130/**
1131 * iwl4965_set_rxon_chain - Set up Rx chain usage in "staging" RXON image
1132 *
1133 * Selects how many and which Rx receivers/antennas/chains to use.
1134 * This should not be used for scan command ... it puts data in wrong place.
1135 */
1136void iwl4965_set_rxon_chain(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
1137{
1138 bool is_single = iwl4965_is_single_rx_stream(priv);
1139 bool is_cam = !test_bit(STATUS_POWER_PMI, &priv->status);
1140 u8 idle_rx_cnt, active_rx_cnt, valid_rx_cnt;
1141 u32 active_chains;
1142 u16 rx_chain;
1143
1144 /* Tell uCode which antennas are actually connected.
1145 * Before first association, we assume all antennas are connected.
1146 * Just after first association, iwl4965_chain_noise_calibration()
1147 * checks which antennas actually *are* connected. */
1148 if (priv->chain_noise_data.active_chains)
1149 active_chains = priv->chain_noise_data.active_chains;
1150 else
1151 active_chains = priv->hw_params.valid_rx_ant;
1152
1153 rx_chain = active_chains << RXON_RX_CHAIN_VALID_POS;
1154
1155 /* How many receivers should we use? */
1156 active_rx_cnt = iwl4965_get_active_rx_chain_count(priv);
1157 idle_rx_cnt = iwl4965_get_idle_rx_chain_count(priv, active_rx_cnt);
1158
1159
1160 /* correct rx chain count according hw settings
1161 * and chain noise calibration
1162 */
1163 valid_rx_cnt = iwl4965_count_chain_bitmap(active_chains);
1164 if (valid_rx_cnt < active_rx_cnt)
1165 active_rx_cnt = valid_rx_cnt;
1166
1167 if (valid_rx_cnt < idle_rx_cnt)
1168 idle_rx_cnt = valid_rx_cnt;
1169
1170 rx_chain |= active_rx_cnt << RXON_RX_CHAIN_MIMO_CNT_POS;
1171 rx_chain |= idle_rx_cnt << RXON_RX_CHAIN_CNT_POS;
1172
1173 ctx->staging.rx_chain = cpu_to_le16(rx_chain);
1174
1175 if (!is_single && (active_rx_cnt >= IWL_NUM_RX_CHAINS_SINGLE) && is_cam)
1176 ctx->staging.rx_chain |= RXON_RX_CHAIN_MIMO_FORCE_MSK;
1177 else
1178 ctx->staging.rx_chain &= ~RXON_RX_CHAIN_MIMO_FORCE_MSK;
1179
1180 IWL_DEBUG_ASSOC(priv, "rx_chain=0x%X active=%d idle=%d\n",
1181 ctx->staging.rx_chain,
1182 active_rx_cnt, idle_rx_cnt);
1183
1184 WARN_ON(active_rx_cnt == 0 || idle_rx_cnt == 0 ||
1185 active_rx_cnt < idle_rx_cnt);
1186}
1187
1188u8 iwl4965_toggle_tx_ant(struct iwl_priv *priv, u8 ant, u8 valid)
1189{
1190 int i;
1191 u8 ind = ant;
1192
1193 for (i = 0; i < RATE_ANT_NUM - 1; i++) {
1194 ind = (ind + 1) < RATE_ANT_NUM ? ind + 1 : 0;
1195 if (valid & BIT(ind))
1196 return ind;
1197 }
1198 return ant;
1199}
1200
1201static const char *iwl4965_get_fh_string(int cmd)
1202{
1203 switch (cmd) {
1204 IWL_CMD(FH_RSCSR_CHNL0_STTS_WPTR_REG);
1205 IWL_CMD(FH_RSCSR_CHNL0_RBDCB_BASE_REG);
1206 IWL_CMD(FH_RSCSR_CHNL0_WPTR);
1207 IWL_CMD(FH_MEM_RCSR_CHNL0_CONFIG_REG);
1208 IWL_CMD(FH_MEM_RSSR_SHARED_CTRL_REG);
1209 IWL_CMD(FH_MEM_RSSR_RX_STATUS_REG);
1210 IWL_CMD(FH_MEM_RSSR_RX_ENABLE_ERR_IRQ2DRV);
1211 IWL_CMD(FH_TSSR_TX_STATUS_REG);
1212 IWL_CMD(FH_TSSR_TX_ERROR_REG);
1213 default:
1214 return "UNKNOWN";
1215 }
1216}
1217
1218int iwl4965_dump_fh(struct iwl_priv *priv, char **buf, bool display)
1219{
1220 int i;
1221#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
1222 int pos = 0;
1223 size_t bufsz = 0;
1224#endif
1225 static const u32 fh_tbl[] = {
1226 FH_RSCSR_CHNL0_STTS_WPTR_REG,
1227 FH_RSCSR_CHNL0_RBDCB_BASE_REG,
1228 FH_RSCSR_CHNL0_WPTR,
1229 FH_MEM_RCSR_CHNL0_CONFIG_REG,
1230 FH_MEM_RSSR_SHARED_CTRL_REG,
1231 FH_MEM_RSSR_RX_STATUS_REG,
1232 FH_MEM_RSSR_RX_ENABLE_ERR_IRQ2DRV,
1233 FH_TSSR_TX_STATUS_REG,
1234 FH_TSSR_TX_ERROR_REG
1235 };
1236#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
1237 if (display) {
1238 bufsz = ARRAY_SIZE(fh_tbl) * 48 + 40;
1239 *buf = kmalloc(bufsz, GFP_KERNEL);
1240 if (!*buf)
1241 return -ENOMEM;
1242 pos += scnprintf(*buf + pos, bufsz - pos,
1243 "FH register values:\n");
1244 for (i = 0; i < ARRAY_SIZE(fh_tbl); i++) {
1245 pos += scnprintf(*buf + pos, bufsz - pos,
1246 " %34s: 0X%08x\n",
1247 iwl4965_get_fh_string(fh_tbl[i]),
1248 iwl_legacy_read_direct32(priv, fh_tbl[i]));
1249 }
1250 return pos;
1251 }
1252#endif
1253 IWL_ERR(priv, "FH register values:\n");
1254 for (i = 0; i < ARRAY_SIZE(fh_tbl); i++) {
1255 IWL_ERR(priv, " %34s: 0X%08x\n",
1256 iwl4965_get_fh_string(fh_tbl[i]),
1257 iwl_legacy_read_direct32(priv, fh_tbl[i]));
1258 }
1259 return 0;
1260}
diff --git a/drivers/net/wireless/iwlegacy/iwl-4965-rs.c b/drivers/net/wireless/iwlegacy/iwl-4965-rs.c
new file mode 100644
index 000000000000..31ac672b64e1
--- /dev/null
+++ b/drivers/net/wireless/iwlegacy/iwl-4965-rs.c
@@ -0,0 +1,2870 @@
1/******************************************************************************
2 *
3 * Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of version 2 of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
17 *
18 * The full GNU General Public License is included in this distribution in the
19 * file called LICENSE.
20 *
21 * Contact Information:
22 * Intel Linux Wireless <ilw@linux.intel.com>
23 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
24 *
25 *****************************************************************************/
26#include <linux/kernel.h>
27#include <linux/init.h>
28#include <linux/skbuff.h>
29#include <linux/slab.h>
30#include <linux/wireless.h>
31#include <net/mac80211.h>
32
33#include <linux/netdevice.h>
34#include <linux/etherdevice.h>
35#include <linux/delay.h>
36
37#include <linux/workqueue.h>
38
39#include "iwl-dev.h"
40#include "iwl-sta.h"
41#include "iwl-core.h"
42#include "iwl-4965.h"
43
44#define IWL4965_RS_NAME "iwl-4965-rs"
45
46#define NUM_TRY_BEFORE_ANT_TOGGLE 1
47#define IWL_NUMBER_TRY 1
48#define IWL_HT_NUMBER_TRY 3
49
50#define IWL_RATE_MAX_WINDOW 62 /* # tx in history window */
51#define IWL_RATE_MIN_FAILURE_TH 6 /* min failures to calc tpt */
52#define IWL_RATE_MIN_SUCCESS_TH 8 /* min successes to calc tpt */
53
54/* max allowed rate miss before sync LQ cmd */
55#define IWL_MISSED_RATE_MAX 15
56/* max time to accum history 2 seconds */
57#define IWL_RATE_SCALE_FLUSH_INTVL (3*HZ)
58
59static u8 rs_ht_to_legacy[] = {
60 IWL_RATE_6M_INDEX, IWL_RATE_6M_INDEX,
61 IWL_RATE_6M_INDEX, IWL_RATE_6M_INDEX,
62 IWL_RATE_6M_INDEX,
63 IWL_RATE_6M_INDEX, IWL_RATE_9M_INDEX,
64 IWL_RATE_12M_INDEX, IWL_RATE_18M_INDEX,
65 IWL_RATE_24M_INDEX, IWL_RATE_36M_INDEX,
66 IWL_RATE_48M_INDEX, IWL_RATE_54M_INDEX
67};
68
69static const u8 ant_toggle_lookup[] = {
70 /*ANT_NONE -> */ ANT_NONE,
71 /*ANT_A -> */ ANT_B,
72 /*ANT_B -> */ ANT_C,
73 /*ANT_AB -> */ ANT_BC,
74 /*ANT_C -> */ ANT_A,
75 /*ANT_AC -> */ ANT_AB,
76 /*ANT_BC -> */ ANT_AC,
77 /*ANT_ABC -> */ ANT_ABC,
78};
79
80#define IWL_DECLARE_RATE_INFO(r, s, ip, in, rp, rn, pp, np) \
81 [IWL_RATE_##r##M_INDEX] = { IWL_RATE_##r##M_PLCP, \
82 IWL_RATE_SISO_##s##M_PLCP, \
83 IWL_RATE_MIMO2_##s##M_PLCP,\
84 IWL_RATE_##r##M_IEEE, \
85 IWL_RATE_##ip##M_INDEX, \
86 IWL_RATE_##in##M_INDEX, \
87 IWL_RATE_##rp##M_INDEX, \
88 IWL_RATE_##rn##M_INDEX, \
89 IWL_RATE_##pp##M_INDEX, \
90 IWL_RATE_##np##M_INDEX }
91
92/*
93 * Parameter order:
94 * rate, ht rate, prev rate, next rate, prev tgg rate, next tgg rate
95 *
96 * If there isn't a valid next or previous rate then INV is used which
97 * maps to IWL_RATE_INVALID
98 *
99 */
100const struct iwl_rate_info iwlegacy_rates[IWL_RATE_COUNT] = {
101 IWL_DECLARE_RATE_INFO(1, INV, INV, 2, INV, 2, INV, 2), /* 1mbps */
102 IWL_DECLARE_RATE_INFO(2, INV, 1, 5, 1, 5, 1, 5), /* 2mbps */
103 IWL_DECLARE_RATE_INFO(5, INV, 2, 6, 2, 11, 2, 11), /*5.5mbps */
104 IWL_DECLARE_RATE_INFO(11, INV, 9, 12, 9, 12, 5, 18), /* 11mbps */
105 IWL_DECLARE_RATE_INFO(6, 6, 5, 9, 5, 11, 5, 11), /* 6mbps */
106 IWL_DECLARE_RATE_INFO(9, 6, 6, 11, 6, 11, 5, 11), /* 9mbps */
107 IWL_DECLARE_RATE_INFO(12, 12, 11, 18, 11, 18, 11, 18), /* 12mbps */
108 IWL_DECLARE_RATE_INFO(18, 18, 12, 24, 12, 24, 11, 24), /* 18mbps */
109 IWL_DECLARE_RATE_INFO(24, 24, 18, 36, 18, 36, 18, 36), /* 24mbps */
110 IWL_DECLARE_RATE_INFO(36, 36, 24, 48, 24, 48, 24, 48), /* 36mbps */
111 IWL_DECLARE_RATE_INFO(48, 48, 36, 54, 36, 54, 36, 54), /* 48mbps */
112 IWL_DECLARE_RATE_INFO(54, 54, 48, INV, 48, INV, 48, INV),/* 54mbps */
113 IWL_DECLARE_RATE_INFO(60, 60, 48, INV, 48, INV, 48, INV),/* 60mbps */
114};
115
116static int iwl4965_hwrate_to_plcp_idx(u32 rate_n_flags)
117{
118 int idx = 0;
119
120 /* HT rate format */
121 if (rate_n_flags & RATE_MCS_HT_MSK) {
122 idx = (rate_n_flags & 0xff);
123
124 if (idx >= IWL_RATE_MIMO2_6M_PLCP)
125 idx = idx - IWL_RATE_MIMO2_6M_PLCP;
126
127 idx += IWL_FIRST_OFDM_RATE;
128 /* skip 9M not supported in ht*/
129 if (idx >= IWL_RATE_9M_INDEX)
130 idx += 1;
131 if ((idx >= IWL_FIRST_OFDM_RATE) && (idx <= IWL_LAST_OFDM_RATE))
132 return idx;
133
134 /* legacy rate format, search for match in table */
135 } else {
136 for (idx = 0; idx < ARRAY_SIZE(iwlegacy_rates); idx++)
137 if (iwlegacy_rates[idx].plcp == (rate_n_flags & 0xFF))
138 return idx;
139 }
140
141 return -1;
142}
143
144static void iwl4965_rs_rate_scale_perform(struct iwl_priv *priv,
145 struct sk_buff *skb,
146 struct ieee80211_sta *sta,
147 struct iwl_lq_sta *lq_sta);
148static void iwl4965_rs_fill_link_cmd(struct iwl_priv *priv,
149 struct iwl_lq_sta *lq_sta, u32 rate_n_flags);
150static void iwl4965_rs_stay_in_table(struct iwl_lq_sta *lq_sta,
151 bool force_search);
152
153#ifdef CONFIG_MAC80211_DEBUGFS
154static void iwl4965_rs_dbgfs_set_mcs(struct iwl_lq_sta *lq_sta,
155 u32 *rate_n_flags, int index);
156#else
157static void iwl4965_rs_dbgfs_set_mcs(struct iwl_lq_sta *lq_sta,
158 u32 *rate_n_flags, int index)
159{}
160#endif
161
162/**
163 * The following tables contain the expected throughput metrics for all rates
164 *
165 * 1, 2, 5.5, 11, 6, 9, 12, 18, 24, 36, 48, 54, 60 MBits
166 *
167 * where invalid entries are zeros.
168 *
169 * CCK rates are only valid in legacy table and will only be used in G
170 * (2.4 GHz) band.
171 */
172
173static s32 expected_tpt_legacy[IWL_RATE_COUNT] = {
174 7, 13, 35, 58, 40, 57, 72, 98, 121, 154, 177, 186, 0
175};
176
177static s32 expected_tpt_siso20MHz[4][IWL_RATE_COUNT] = {
178 {0, 0, 0, 0, 42, 0, 76, 102, 124, 158, 183, 193, 202}, /* Norm */
179 {0, 0, 0, 0, 46, 0, 82, 110, 132, 167, 192, 202, 210}, /* SGI */
180 {0, 0, 0, 0, 48, 0, 93, 135, 176, 251, 319, 351, 381}, /* AGG */
181 {0, 0, 0, 0, 53, 0, 102, 149, 193, 275, 348, 381, 413}, /* AGG+SGI */
182};
183
184static s32 expected_tpt_siso40MHz[4][IWL_RATE_COUNT] = {
185 {0, 0, 0, 0, 77, 0, 127, 160, 184, 220, 242, 250, 257}, /* Norm */
186 {0, 0, 0, 0, 83, 0, 135, 169, 193, 229, 250, 257, 264}, /* SGI */
187 {0, 0, 0, 0, 96, 0, 182, 259, 328, 451, 553, 598, 640}, /* AGG */
188 {0, 0, 0, 0, 106, 0, 199, 282, 357, 487, 593, 640, 683}, /* AGG+SGI */
189};
190
191static s32 expected_tpt_mimo2_20MHz[4][IWL_RATE_COUNT] = {
192 {0, 0, 0, 0, 74, 0, 123, 155, 179, 213, 235, 243, 250}, /* Norm */
193 {0, 0, 0, 0, 81, 0, 131, 164, 187, 221, 242, 250, 256}, /* SGI */
194 {0, 0, 0, 0, 92, 0, 175, 250, 317, 436, 534, 578, 619}, /* AGG */
195 {0, 0, 0, 0, 102, 0, 192, 273, 344, 470, 573, 619, 660}, /* AGG+SGI*/
196};
197
198static s32 expected_tpt_mimo2_40MHz[4][IWL_RATE_COUNT] = {
199 {0, 0, 0, 0, 123, 0, 182, 214, 235, 264, 279, 285, 289}, /* Norm */
200 {0, 0, 0, 0, 131, 0, 191, 222, 242, 270, 284, 289, 293}, /* SGI */
201 {0, 0, 0, 0, 180, 0, 327, 446, 545, 708, 828, 878, 922}, /* AGG */
202 {0, 0, 0, 0, 197, 0, 355, 481, 584, 752, 872, 922, 966}, /* AGG+SGI */
203};
204
205/* mbps, mcs */
206static const struct iwl_rate_mcs_info iwl_rate_mcs[IWL_RATE_COUNT] = {
207 { "1", "BPSK DSSS"},
208 { "2", "QPSK DSSS"},
209 {"5.5", "BPSK CCK"},
210 { "11", "QPSK CCK"},
211 { "6", "BPSK 1/2"},
212 { "9", "BPSK 1/2"},
213 { "12", "QPSK 1/2"},
214 { "18", "QPSK 3/4"},
215 { "24", "16QAM 1/2"},
216 { "36", "16QAM 3/4"},
217 { "48", "64QAM 2/3"},
218 { "54", "64QAM 3/4"},
219 { "60", "64QAM 5/6"},
220};
221
222#define MCS_INDEX_PER_STREAM (8)
223
224static inline u8 iwl4965_rs_extract_rate(u32 rate_n_flags)
225{
226 return (u8)(rate_n_flags & 0xFF);
227}
228
229static void
230iwl4965_rs_rate_scale_clear_window(struct iwl_rate_scale_data *window)
231{
232 window->data = 0;
233 window->success_counter = 0;
234 window->success_ratio = IWL_INVALID_VALUE;
235 window->counter = 0;
236 window->average_tpt = IWL_INVALID_VALUE;
237 window->stamp = 0;
238}
239
240static inline u8 iwl4965_rs_is_valid_ant(u8 valid_antenna, u8 ant_type)
241{
242 return (ant_type & valid_antenna) == ant_type;
243}
244
245/*
246 * removes the old data from the statistics. All data that is older than
247 * TID_MAX_TIME_DIFF, will be deleted.
248 */
249static void
250iwl4965_rs_tl_rm_old_stats(struct iwl_traffic_load *tl, u32 curr_time)
251{
252 /* The oldest age we want to keep */
253 u32 oldest_time = curr_time - TID_MAX_TIME_DIFF;
254
255 while (tl->queue_count &&
256 (tl->time_stamp < oldest_time)) {
257 tl->total -= tl->packet_count[tl->head];
258 tl->packet_count[tl->head] = 0;
259 tl->time_stamp += TID_QUEUE_CELL_SPACING;
260 tl->queue_count--;
261 tl->head++;
262 if (tl->head >= TID_QUEUE_MAX_SIZE)
263 tl->head = 0;
264 }
265}
266
267/*
268 * increment traffic load value for tid and also remove
269 * any old values if passed the certain time period
270 */
271static u8 iwl4965_rs_tl_add_packet(struct iwl_lq_sta *lq_data,
272 struct ieee80211_hdr *hdr)
273{
274 u32 curr_time = jiffies_to_msecs(jiffies);
275 u32 time_diff;
276 s32 index;
277 struct iwl_traffic_load *tl = NULL;
278 u8 tid;
279
280 if (ieee80211_is_data_qos(hdr->frame_control)) {
281 u8 *qc = ieee80211_get_qos_ctl(hdr);
282 tid = qc[0] & 0xf;
283 } else
284 return MAX_TID_COUNT;
285
286 if (unlikely(tid >= TID_MAX_LOAD_COUNT))
287 return MAX_TID_COUNT;
288
289 tl = &lq_data->load[tid];
290
291 curr_time -= curr_time % TID_ROUND_VALUE;
292
293 /* Happens only for the first packet. Initialize the data */
294 if (!(tl->queue_count)) {
295 tl->total = 1;
296 tl->time_stamp = curr_time;
297 tl->queue_count = 1;
298 tl->head = 0;
299 tl->packet_count[0] = 1;
300 return MAX_TID_COUNT;
301 }
302
303 time_diff = TIME_WRAP_AROUND(tl->time_stamp, curr_time);
304 index = time_diff / TID_QUEUE_CELL_SPACING;
305
306 /* The history is too long: remove data that is older than */
307 /* TID_MAX_TIME_DIFF */
308 if (index >= TID_QUEUE_MAX_SIZE)
309 iwl4965_rs_tl_rm_old_stats(tl, curr_time);
310
311 index = (tl->head + index) % TID_QUEUE_MAX_SIZE;
312 tl->packet_count[index] = tl->packet_count[index] + 1;
313 tl->total = tl->total + 1;
314
315 if ((index + 1) > tl->queue_count)
316 tl->queue_count = index + 1;
317
318 return tid;
319}
320
321/*
322 get the traffic load value for tid
323*/
324static u32 iwl4965_rs_tl_get_load(struct iwl_lq_sta *lq_data, u8 tid)
325{
326 u32 curr_time = jiffies_to_msecs(jiffies);
327 u32 time_diff;
328 s32 index;
329 struct iwl_traffic_load *tl = NULL;
330
331 if (tid >= TID_MAX_LOAD_COUNT)
332 return 0;
333
334 tl = &(lq_data->load[tid]);
335
336 curr_time -= curr_time % TID_ROUND_VALUE;
337
338 if (!(tl->queue_count))
339 return 0;
340
341 time_diff = TIME_WRAP_AROUND(tl->time_stamp, curr_time);
342 index = time_diff / TID_QUEUE_CELL_SPACING;
343
344 /* The history is too long: remove data that is older than */
345 /* TID_MAX_TIME_DIFF */
346 if (index >= TID_QUEUE_MAX_SIZE)
347 iwl4965_rs_tl_rm_old_stats(tl, curr_time);
348
349 return tl->total;
350}
351
352static int iwl4965_rs_tl_turn_on_agg_for_tid(struct iwl_priv *priv,
353 struct iwl_lq_sta *lq_data, u8 tid,
354 struct ieee80211_sta *sta)
355{
356 int ret = -EAGAIN;
357 u32 load;
358
359 load = iwl4965_rs_tl_get_load(lq_data, tid);
360
361 if (load > IWL_AGG_LOAD_THRESHOLD) {
362 IWL_DEBUG_HT(priv, "Starting Tx agg: STA: %pM tid: %d\n",
363 sta->addr, tid);
364 ret = ieee80211_start_tx_ba_session(sta, tid, 5000);
365 if (ret == -EAGAIN) {
366 /*
367 * driver and mac80211 is out of sync
368 * this might be cause by reloading firmware
369 * stop the tx ba session here
370 */
371 IWL_ERR(priv, "Fail start Tx agg on tid: %d\n",
372 tid);
373 ieee80211_stop_tx_ba_session(sta, tid);
374 }
375 } else {
376 IWL_ERR(priv, "Aggregation not enabled for tid %d "
377 "because load = %u\n", tid, load);
378 }
379 return ret;
380}
381
382static void iwl4965_rs_tl_turn_on_agg(struct iwl_priv *priv, u8 tid,
383 struct iwl_lq_sta *lq_data,
384 struct ieee80211_sta *sta)
385{
386 if (tid < TID_MAX_LOAD_COUNT)
387 iwl4965_rs_tl_turn_on_agg_for_tid(priv, lq_data, tid, sta);
388 else
389 IWL_ERR(priv, "tid exceeds max load count: %d/%d\n",
390 tid, TID_MAX_LOAD_COUNT);
391}
392
393static inline int iwl4965_get_iwl4965_num_of_ant_from_rate(u32 rate_n_flags)
394{
395 return !!(rate_n_flags & RATE_MCS_ANT_A_MSK) +
396 !!(rate_n_flags & RATE_MCS_ANT_B_MSK) +
397 !!(rate_n_flags & RATE_MCS_ANT_C_MSK);
398}
399
400/*
401 * Static function to get the expected throughput from an iwl_scale_tbl_info
402 * that wraps a NULL pointer check
403 */
404static s32
405iwl4965_get_expected_tpt(struct iwl_scale_tbl_info *tbl, int rs_index)
406{
407 if (tbl->expected_tpt)
408 return tbl->expected_tpt[rs_index];
409 return 0;
410}
411
412/**
413 * iwl4965_rs_collect_tx_data - Update the success/failure sliding window
414 *
415 * We keep a sliding window of the last 62 packets transmitted
416 * at this rate. window->data contains the bitmask of successful
417 * packets.
418 */
419static int iwl4965_rs_collect_tx_data(struct iwl_scale_tbl_info *tbl,
420 int scale_index, int attempts, int successes)
421{
422 struct iwl_rate_scale_data *window = NULL;
423 static const u64 mask = (((u64)1) << (IWL_RATE_MAX_WINDOW - 1));
424 s32 fail_count, tpt;
425
426 if (scale_index < 0 || scale_index >= IWL_RATE_COUNT)
427 return -EINVAL;
428
429 /* Select window for current tx bit rate */
430 window = &(tbl->win[scale_index]);
431
432 /* Get expected throughput */
433 tpt = iwl4965_get_expected_tpt(tbl, scale_index);
434
435 /*
436 * Keep track of only the latest 62 tx frame attempts in this rate's
437 * history window; anything older isn't really relevant any more.
438 * If we have filled up the sliding window, drop the oldest attempt;
439 * if the oldest attempt (highest bit in bitmap) shows "success",
440 * subtract "1" from the success counter (this is the main reason
441 * we keep these bitmaps!).
442 */
443 while (attempts > 0) {
444 if (window->counter >= IWL_RATE_MAX_WINDOW) {
445
446 /* remove earliest */
447 window->counter = IWL_RATE_MAX_WINDOW - 1;
448
449 if (window->data & mask) {
450 window->data &= ~mask;
451 window->success_counter--;
452 }
453 }
454
455 /* Increment frames-attempted counter */
456 window->counter++;
457
458 /* Shift bitmap by one frame to throw away oldest history */
459 window->data <<= 1;
460
461 /* Mark the most recent #successes attempts as successful */
462 if (successes > 0) {
463 window->success_counter++;
464 window->data |= 0x1;
465 successes--;
466 }
467
468 attempts--;
469 }
470
471 /* Calculate current success ratio, avoid divide-by-0! */
472 if (window->counter > 0)
473 window->success_ratio = 128 * (100 * window->success_counter)
474 / window->counter;
475 else
476 window->success_ratio = IWL_INVALID_VALUE;
477
478 fail_count = window->counter - window->success_counter;
479
480 /* Calculate average throughput, if we have enough history. */
481 if ((fail_count >= IWL_RATE_MIN_FAILURE_TH) ||
482 (window->success_counter >= IWL_RATE_MIN_SUCCESS_TH))
483 window->average_tpt = (window->success_ratio * tpt + 64) / 128;
484 else
485 window->average_tpt = IWL_INVALID_VALUE;
486
487 /* Tag this window as having been updated */
488 window->stamp = jiffies;
489
490 return 0;
491}
492
493/*
494 * Fill uCode API rate_n_flags field, based on "search" or "active" table.
495 */
496static u32 iwl4965_rate_n_flags_from_tbl(struct iwl_priv *priv,
497 struct iwl_scale_tbl_info *tbl,
498 int index, u8 use_green)
499{
500 u32 rate_n_flags = 0;
501
502 if (is_legacy(tbl->lq_type)) {
503 rate_n_flags = iwlegacy_rates[index].plcp;
504 if (index >= IWL_FIRST_CCK_RATE && index <= IWL_LAST_CCK_RATE)
505 rate_n_flags |= RATE_MCS_CCK_MSK;
506
507 } else if (is_Ht(tbl->lq_type)) {
508 if (index > IWL_LAST_OFDM_RATE) {
509 IWL_ERR(priv, "Invalid HT rate index %d\n", index);
510 index = IWL_LAST_OFDM_RATE;
511 }
512 rate_n_flags = RATE_MCS_HT_MSK;
513
514 if (is_siso(tbl->lq_type))
515 rate_n_flags |= iwlegacy_rates[index].plcp_siso;
516 else
517 rate_n_flags |= iwlegacy_rates[index].plcp_mimo2;
518 } else {
519 IWL_ERR(priv, "Invalid tbl->lq_type %d\n", tbl->lq_type);
520 }
521
522 rate_n_flags |= ((tbl->ant_type << RATE_MCS_ANT_POS) &
523 RATE_MCS_ANT_ABC_MSK);
524
525 if (is_Ht(tbl->lq_type)) {
526 if (tbl->is_ht40) {
527 if (tbl->is_dup)
528 rate_n_flags |= RATE_MCS_DUP_MSK;
529 else
530 rate_n_flags |= RATE_MCS_HT40_MSK;
531 }
532 if (tbl->is_SGI)
533 rate_n_flags |= RATE_MCS_SGI_MSK;
534
535 if (use_green) {
536 rate_n_flags |= RATE_MCS_GF_MSK;
537 if (is_siso(tbl->lq_type) && tbl->is_SGI) {
538 rate_n_flags &= ~RATE_MCS_SGI_MSK;
539 IWL_ERR(priv, "GF was set with SGI:SISO\n");
540 }
541 }
542 }
543 return rate_n_flags;
544}
545
546/*
547 * Interpret uCode API's rate_n_flags format,
548 * fill "search" or "active" tx mode table.
549 */
550static int iwl4965_rs_get_tbl_info_from_mcs(const u32 rate_n_flags,
551 enum ieee80211_band band,
552 struct iwl_scale_tbl_info *tbl,
553 int *rate_idx)
554{
555 u32 ant_msk = (rate_n_flags & RATE_MCS_ANT_ABC_MSK);
556 u8 iwl4965_num_of_ant = iwl4965_get_iwl4965_num_of_ant_from_rate(rate_n_flags);
557 u8 mcs;
558
559 memset(tbl, 0, sizeof(struct iwl_scale_tbl_info));
560 *rate_idx = iwl4965_hwrate_to_plcp_idx(rate_n_flags);
561
562 if (*rate_idx == IWL_RATE_INVALID) {
563 *rate_idx = -1;
564 return -EINVAL;
565 }
566 tbl->is_SGI = 0; /* default legacy setup */
567 tbl->is_ht40 = 0;
568 tbl->is_dup = 0;
569 tbl->ant_type = (ant_msk >> RATE_MCS_ANT_POS);
570 tbl->lq_type = LQ_NONE;
571 tbl->max_search = IWL_MAX_SEARCH;
572
573 /* legacy rate format */
574 if (!(rate_n_flags & RATE_MCS_HT_MSK)) {
575 if (iwl4965_num_of_ant == 1) {
576 if (band == IEEE80211_BAND_5GHZ)
577 tbl->lq_type = LQ_A;
578 else
579 tbl->lq_type = LQ_G;
580 }
581 /* HT rate format */
582 } else {
583 if (rate_n_flags & RATE_MCS_SGI_MSK)
584 tbl->is_SGI = 1;
585
586 if ((rate_n_flags & RATE_MCS_HT40_MSK) ||
587 (rate_n_flags & RATE_MCS_DUP_MSK))
588 tbl->is_ht40 = 1;
589
590 if (rate_n_flags & RATE_MCS_DUP_MSK)
591 tbl->is_dup = 1;
592
593 mcs = iwl4965_rs_extract_rate(rate_n_flags);
594
595 /* SISO */
596 if (mcs <= IWL_RATE_SISO_60M_PLCP) {
597 if (iwl4965_num_of_ant == 1)
598 tbl->lq_type = LQ_SISO; /*else NONE*/
599 /* MIMO2 */
600 } else {
601 if (iwl4965_num_of_ant == 2)
602 tbl->lq_type = LQ_MIMO2;
603 }
604 }
605 return 0;
606}
607
608/* switch to another antenna/antennas and return 1 */
609/* if no other valid antenna found, return 0 */
610static int iwl4965_rs_toggle_antenna(u32 valid_ant, u32 *rate_n_flags,
611 struct iwl_scale_tbl_info *tbl)
612{
613 u8 new_ant_type;
614
615 if (!tbl->ant_type || tbl->ant_type > ANT_ABC)
616 return 0;
617
618 if (!iwl4965_rs_is_valid_ant(valid_ant, tbl->ant_type))
619 return 0;
620
621 new_ant_type = ant_toggle_lookup[tbl->ant_type];
622
623 while ((new_ant_type != tbl->ant_type) &&
624 !iwl4965_rs_is_valid_ant(valid_ant, new_ant_type))
625 new_ant_type = ant_toggle_lookup[new_ant_type];
626
627 if (new_ant_type == tbl->ant_type)
628 return 0;
629
630 tbl->ant_type = new_ant_type;
631 *rate_n_flags &= ~RATE_MCS_ANT_ABC_MSK;
632 *rate_n_flags |= new_ant_type << RATE_MCS_ANT_POS;
633 return 1;
634}
635
636/**
637 * Green-field mode is valid if the station supports it and
638 * there are no non-GF stations present in the BSS.
639 */
640static bool iwl4965_rs_use_green(struct ieee80211_sta *sta)
641{
642 struct iwl_station_priv *sta_priv = (void *)sta->drv_priv;
643 struct iwl_rxon_context *ctx = sta_priv->common.ctx;
644
645 return (sta->ht_cap.cap & IEEE80211_HT_CAP_GRN_FLD) &&
646 !(ctx->ht.non_gf_sta_present);
647}
648
649/**
650 * iwl4965_rs_get_supported_rates - get the available rates
651 *
652 * if management frame or broadcast frame only return
653 * basic available rates.
654 *
655 */
656static u16 iwl4965_rs_get_supported_rates(struct iwl_lq_sta *lq_sta,
657 struct ieee80211_hdr *hdr,
658 enum iwl_table_type rate_type)
659{
660 if (is_legacy(rate_type)) {
661 return lq_sta->active_legacy_rate;
662 } else {
663 if (is_siso(rate_type))
664 return lq_sta->active_siso_rate;
665 else
666 return lq_sta->active_mimo2_rate;
667 }
668}
669
670static u16
671iwl4965_rs_get_adjacent_rate(struct iwl_priv *priv, u8 index, u16 rate_mask,
672 int rate_type)
673{
674 u8 high = IWL_RATE_INVALID;
675 u8 low = IWL_RATE_INVALID;
676
677 /* 802.11A or ht walks to the next literal adjacent rate in
678 * the rate table */
679 if (is_a_band(rate_type) || !is_legacy(rate_type)) {
680 int i;
681 u32 mask;
682
683 /* Find the previous rate that is in the rate mask */
684 i = index - 1;
685 for (mask = (1 << i); i >= 0; i--, mask >>= 1) {
686 if (rate_mask & mask) {
687 low = i;
688 break;
689 }
690 }
691
692 /* Find the next rate that is in the rate mask */
693 i = index + 1;
694 for (mask = (1 << i); i < IWL_RATE_COUNT; i++, mask <<= 1) {
695 if (rate_mask & mask) {
696 high = i;
697 break;
698 }
699 }
700
701 return (high << 8) | low;
702 }
703
704 low = index;
705 while (low != IWL_RATE_INVALID) {
706 low = iwlegacy_rates[low].prev_rs;
707 if (low == IWL_RATE_INVALID)
708 break;
709 if (rate_mask & (1 << low))
710 break;
711 IWL_DEBUG_RATE(priv, "Skipping masked lower rate: %d\n", low);
712 }
713
714 high = index;
715 while (high != IWL_RATE_INVALID) {
716 high = iwlegacy_rates[high].next_rs;
717 if (high == IWL_RATE_INVALID)
718 break;
719 if (rate_mask & (1 << high))
720 break;
721 IWL_DEBUG_RATE(priv, "Skipping masked higher rate: %d\n", high);
722 }
723
724 return (high << 8) | low;
725}
726
727static u32 iwl4965_rs_get_lower_rate(struct iwl_lq_sta *lq_sta,
728 struct iwl_scale_tbl_info *tbl,
729 u8 scale_index, u8 ht_possible)
730{
731 s32 low;
732 u16 rate_mask;
733 u16 high_low;
734 u8 switch_to_legacy = 0;
735 u8 is_green = lq_sta->is_green;
736 struct iwl_priv *priv = lq_sta->drv;
737
738 /* check if we need to switch from HT to legacy rates.
739 * assumption is that mandatory rates (1Mbps or 6Mbps)
740 * are always supported (spec demand) */
741 if (!is_legacy(tbl->lq_type) && (!ht_possible || !scale_index)) {
742 switch_to_legacy = 1;
743 scale_index = rs_ht_to_legacy[scale_index];
744 if (lq_sta->band == IEEE80211_BAND_5GHZ)
745 tbl->lq_type = LQ_A;
746 else
747 tbl->lq_type = LQ_G;
748
749 if (iwl4965_num_of_ant(tbl->ant_type) > 1)
750 tbl->ant_type =
751 iwl4965_first_antenna(priv->hw_params.valid_tx_ant);
752
753 tbl->is_ht40 = 0;
754 tbl->is_SGI = 0;
755 tbl->max_search = IWL_MAX_SEARCH;
756 }
757
758 rate_mask = iwl4965_rs_get_supported_rates(lq_sta, NULL, tbl->lq_type);
759
760 /* Mask with station rate restriction */
761 if (is_legacy(tbl->lq_type)) {
762 /* supp_rates has no CCK bits in A mode */
763 if (lq_sta->band == IEEE80211_BAND_5GHZ)
764 rate_mask = (u16)(rate_mask &
765 (lq_sta->supp_rates << IWL_FIRST_OFDM_RATE));
766 else
767 rate_mask = (u16)(rate_mask & lq_sta->supp_rates);
768 }
769
770 /* If we switched from HT to legacy, check current rate */
771 if (switch_to_legacy && (rate_mask & (1 << scale_index))) {
772 low = scale_index;
773 goto out;
774 }
775
776 high_low = iwl4965_rs_get_adjacent_rate(lq_sta->drv,
777 scale_index, rate_mask,
778 tbl->lq_type);
779 low = high_low & 0xff;
780
781 if (low == IWL_RATE_INVALID)
782 low = scale_index;
783
784out:
785 return iwl4965_rate_n_flags_from_tbl(lq_sta->drv, tbl, low, is_green);
786}
787
788/*
789 * Simple function to compare two rate scale table types
790 */
791static bool iwl4965_table_type_matches(struct iwl_scale_tbl_info *a,
792 struct iwl_scale_tbl_info *b)
793{
794 return (a->lq_type == b->lq_type) && (a->ant_type == b->ant_type) &&
795 (a->is_SGI == b->is_SGI);
796}
797
798/*
799 * mac80211 sends us Tx status
800 */
801static void
802iwl4965_rs_tx_status(void *priv_r, struct ieee80211_supported_band *sband,
803 struct ieee80211_sta *sta, void *priv_sta,
804 struct sk_buff *skb)
805{
806 int legacy_success;
807 int retries;
808 int rs_index, mac_index, i;
809 struct iwl_lq_sta *lq_sta = priv_sta;
810 struct iwl_link_quality_cmd *table;
811 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
812 struct iwl_priv *priv = (struct iwl_priv *)priv_r;
813 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
814 enum mac80211_rate_control_flags mac_flags;
815 u32 tx_rate;
816 struct iwl_scale_tbl_info tbl_type;
817 struct iwl_scale_tbl_info *curr_tbl, *other_tbl, *tmp_tbl;
818 struct iwl_station_priv *sta_priv = (void *)sta->drv_priv;
819 struct iwl_rxon_context *ctx = sta_priv->common.ctx;
820
821 IWL_DEBUG_RATE_LIMIT(priv,
822 "get frame ack response, update rate scale window\n");
823
824 /* Treat uninitialized rate scaling data same as non-existing. */
825 if (!lq_sta) {
826 IWL_DEBUG_RATE(priv, "Station rate scaling not created yet.\n");
827 return;
828 } else if (!lq_sta->drv) {
829 IWL_DEBUG_RATE(priv, "Rate scaling not initialized yet.\n");
830 return;
831 }
832
833 if (!ieee80211_is_data(hdr->frame_control) ||
834 info->flags & IEEE80211_TX_CTL_NO_ACK)
835 return;
836
837 /* This packet was aggregated but doesn't carry status info */
838 if ((info->flags & IEEE80211_TX_CTL_AMPDU) &&
839 !(info->flags & IEEE80211_TX_STAT_AMPDU))
840 return;
841
842 /*
843 * Ignore this Tx frame response if its initial rate doesn't match
844 * that of latest Link Quality command. There may be stragglers
845 * from a previous Link Quality command, but we're no longer interested
846 * in those; they're either from the "active" mode while we're trying
847 * to check "search" mode, or a prior "search" mode after we've moved
848 * to a new "search" mode (which might become the new "active" mode).
849 */
850 table = &lq_sta->lq;
851 tx_rate = le32_to_cpu(table->rs_table[0].rate_n_flags);
852 iwl4965_rs_get_tbl_info_from_mcs(tx_rate,
853 priv->band, &tbl_type, &rs_index);
854 if (priv->band == IEEE80211_BAND_5GHZ)
855 rs_index -= IWL_FIRST_OFDM_RATE;
856 mac_flags = info->status.rates[0].flags;
857 mac_index = info->status.rates[0].idx;
858 /* For HT packets, map MCS to PLCP */
859 if (mac_flags & IEEE80211_TX_RC_MCS) {
860 mac_index &= RATE_MCS_CODE_MSK; /* Remove # of streams */
861 if (mac_index >= (IWL_RATE_9M_INDEX - IWL_FIRST_OFDM_RATE))
862 mac_index++;
863 /*
864 * mac80211 HT index is always zero-indexed; we need to move
865 * HT OFDM rates after CCK rates in 2.4 GHz band
866 */
867 if (priv->band == IEEE80211_BAND_2GHZ)
868 mac_index += IWL_FIRST_OFDM_RATE;
869 }
870 /* Here we actually compare this rate to the latest LQ command */
871 if ((mac_index < 0) ||
872 (tbl_type.is_SGI !=
873 !!(mac_flags & IEEE80211_TX_RC_SHORT_GI)) ||
874 (tbl_type.is_ht40 !=
875 !!(mac_flags & IEEE80211_TX_RC_40_MHZ_WIDTH)) ||
876 (tbl_type.is_dup !=
877 !!(mac_flags & IEEE80211_TX_RC_DUP_DATA)) ||
878 (tbl_type.ant_type != info->antenna_sel_tx) ||
879 (!!(tx_rate & RATE_MCS_HT_MSK) !=
880 !!(mac_flags & IEEE80211_TX_RC_MCS)) ||
881 (!!(tx_rate & RATE_MCS_GF_MSK) !=
882 !!(mac_flags & IEEE80211_TX_RC_GREEN_FIELD)) ||
883 (rs_index != mac_index)) {
884 IWL_DEBUG_RATE(priv,
885 "initial rate %d does not match %d (0x%x)\n",
886 mac_index, rs_index, tx_rate);
887 /*
888 * Since rates mis-match, the last LQ command may have failed.
889 * After IWL_MISSED_RATE_MAX mis-matches, resync the uCode with
890 * ... driver.
891 */
892 lq_sta->missed_rate_counter++;
893 if (lq_sta->missed_rate_counter > IWL_MISSED_RATE_MAX) {
894 lq_sta->missed_rate_counter = 0;
895 iwl_legacy_send_lq_cmd(priv, ctx, &lq_sta->lq,
896 CMD_ASYNC, false);
897 }
898 /* Regardless, ignore this status info for outdated rate */
899 return;
900 } else
901 /* Rate did match, so reset the missed_rate_counter */
902 lq_sta->missed_rate_counter = 0;
903
904 /* Figure out if rate scale algorithm is in active or search table */
905 if (iwl4965_table_type_matches(&tbl_type,
906 &(lq_sta->lq_info[lq_sta->active_tbl]))) {
907 curr_tbl = &(lq_sta->lq_info[lq_sta->active_tbl]);
908 other_tbl = &(lq_sta->lq_info[1 - lq_sta->active_tbl]);
909 } else if (iwl4965_table_type_matches(&tbl_type,
910 &lq_sta->lq_info[1 - lq_sta->active_tbl])) {
911 curr_tbl = &(lq_sta->lq_info[1 - lq_sta->active_tbl]);
912 other_tbl = &(lq_sta->lq_info[lq_sta->active_tbl]);
913 } else {
914 IWL_DEBUG_RATE(priv,
915 "Neither active nor search matches tx rate\n");
916 tmp_tbl = &(lq_sta->lq_info[lq_sta->active_tbl]);
917 IWL_DEBUG_RATE(priv, "active- lq:%x, ant:%x, SGI:%d\n",
918 tmp_tbl->lq_type, tmp_tbl->ant_type, tmp_tbl->is_SGI);
919 tmp_tbl = &(lq_sta->lq_info[1 - lq_sta->active_tbl]);
920 IWL_DEBUG_RATE(priv, "search- lq:%x, ant:%x, SGI:%d\n",
921 tmp_tbl->lq_type, tmp_tbl->ant_type, tmp_tbl->is_SGI);
922 IWL_DEBUG_RATE(priv, "actual- lq:%x, ant:%x, SGI:%d\n",
923 tbl_type.lq_type, tbl_type.ant_type, tbl_type.is_SGI);
924 /*
925 * no matching table found, let's by-pass the data collection
926 * and continue to perform rate scale to find the rate table
927 */
928 iwl4965_rs_stay_in_table(lq_sta, true);
929 goto done;
930 }
931
932 /*
933 * Updating the frame history depends on whether packets were
934 * aggregated.
935 *
936 * For aggregation, all packets were transmitted at the same rate, the
937 * first index into rate scale table.
938 */
939 if (info->flags & IEEE80211_TX_STAT_AMPDU) {
940 tx_rate = le32_to_cpu(table->rs_table[0].rate_n_flags);
941 iwl4965_rs_get_tbl_info_from_mcs(tx_rate, priv->band, &tbl_type,
942 &rs_index);
943 iwl4965_rs_collect_tx_data(curr_tbl, rs_index,
944 info->status.ampdu_len,
945 info->status.ampdu_ack_len);
946
947 /* Update success/fail counts if not searching for new mode */
948 if (lq_sta->stay_in_tbl) {
949 lq_sta->total_success += info->status.ampdu_ack_len;
950 lq_sta->total_failed += (info->status.ampdu_len -
951 info->status.ampdu_ack_len);
952 }
953 } else {
954 /*
955 * For legacy, update frame history with for each Tx retry.
956 */
957 retries = info->status.rates[0].count - 1;
958 /* HW doesn't send more than 15 retries */
959 retries = min(retries, 15);
960
961 /* The last transmission may have been successful */
962 legacy_success = !!(info->flags & IEEE80211_TX_STAT_ACK);
963 /* Collect data for each rate used during failed TX attempts */
964 for (i = 0; i <= retries; ++i) {
965 tx_rate = le32_to_cpu(table->rs_table[i].rate_n_flags);
966 iwl4965_rs_get_tbl_info_from_mcs(tx_rate, priv->band,
967 &tbl_type, &rs_index);
968 /*
969 * Only collect stats if retried rate is in the same RS
970 * table as active/search.
971 */
972 if (iwl4965_table_type_matches(&tbl_type, curr_tbl))
973 tmp_tbl = curr_tbl;
974 else if (iwl4965_table_type_matches(&tbl_type,
975 other_tbl))
976 tmp_tbl = other_tbl;
977 else
978 continue;
979 iwl4965_rs_collect_tx_data(tmp_tbl, rs_index, 1,
980 i < retries ? 0 : legacy_success);
981 }
982
983 /* Update success/fail counts if not searching for new mode */
984 if (lq_sta->stay_in_tbl) {
985 lq_sta->total_success += legacy_success;
986 lq_sta->total_failed += retries + (1 - legacy_success);
987 }
988 }
989 /* The last TX rate is cached in lq_sta; it's set in if/else above */
990 lq_sta->last_rate_n_flags = tx_rate;
991done:
992 /* See if there's a better rate or modulation mode to try. */
993 if (sta && sta->supp_rates[sband->band])
994 iwl4965_rs_rate_scale_perform(priv, skb, sta, lq_sta);
995}
996
997/*
998 * Begin a period of staying with a selected modulation mode.
999 * Set "stay_in_tbl" flag to prevent any mode switches.
1000 * Set frame tx success limits according to legacy vs. high-throughput,
1001 * and reset overall (spanning all rates) tx success history statistics.
1002 * These control how long we stay using same modulation mode before
1003 * searching for a new mode.
1004 */
1005static void iwl4965_rs_set_stay_in_table(struct iwl_priv *priv, u8 is_legacy,
1006 struct iwl_lq_sta *lq_sta)
1007{
1008 IWL_DEBUG_RATE(priv, "we are staying in the same table\n");
1009 lq_sta->stay_in_tbl = 1; /* only place this gets set */
1010 if (is_legacy) {
1011 lq_sta->table_count_limit = IWL_LEGACY_TABLE_COUNT;
1012 lq_sta->max_failure_limit = IWL_LEGACY_FAILURE_LIMIT;
1013 lq_sta->max_success_limit = IWL_LEGACY_SUCCESS_LIMIT;
1014 } else {
1015 lq_sta->table_count_limit = IWL_NONE_LEGACY_TABLE_COUNT;
1016 lq_sta->max_failure_limit = IWL_NONE_LEGACY_FAILURE_LIMIT;
1017 lq_sta->max_success_limit = IWL_NONE_LEGACY_SUCCESS_LIMIT;
1018 }
1019 lq_sta->table_count = 0;
1020 lq_sta->total_failed = 0;
1021 lq_sta->total_success = 0;
1022 lq_sta->flush_timer = jiffies;
1023 lq_sta->action_counter = 0;
1024}
1025
1026/*
1027 * Find correct throughput table for given mode of modulation
1028 */
1029static void iwl4965_rs_set_expected_tpt_table(struct iwl_lq_sta *lq_sta,
1030 struct iwl_scale_tbl_info *tbl)
1031{
1032 /* Used to choose among HT tables */
1033 s32 (*ht_tbl_pointer)[IWL_RATE_COUNT];
1034
1035 /* Check for invalid LQ type */
1036 if (WARN_ON_ONCE(!is_legacy(tbl->lq_type) && !is_Ht(tbl->lq_type))) {
1037 tbl->expected_tpt = expected_tpt_legacy;
1038 return;
1039 }
1040
1041 /* Legacy rates have only one table */
1042 if (is_legacy(tbl->lq_type)) {
1043 tbl->expected_tpt = expected_tpt_legacy;
1044 return;
1045 }
1046
1047 /* Choose among many HT tables depending on number of streams
1048 * (SISO/MIMO2), channel width (20/40), SGI, and aggregation
1049 * status */
1050 if (is_siso(tbl->lq_type) && (!tbl->is_ht40 || lq_sta->is_dup))
1051 ht_tbl_pointer = expected_tpt_siso20MHz;
1052 else if (is_siso(tbl->lq_type))
1053 ht_tbl_pointer = expected_tpt_siso40MHz;
1054 else if (is_mimo2(tbl->lq_type) && (!tbl->is_ht40 || lq_sta->is_dup))
1055 ht_tbl_pointer = expected_tpt_mimo2_20MHz;
1056 else /* if (is_mimo2(tbl->lq_type)) <-- must be true */
1057 ht_tbl_pointer = expected_tpt_mimo2_40MHz;
1058
1059 if (!tbl->is_SGI && !lq_sta->is_agg) /* Normal */
1060 tbl->expected_tpt = ht_tbl_pointer[0];
1061 else if (tbl->is_SGI && !lq_sta->is_agg) /* SGI */
1062 tbl->expected_tpt = ht_tbl_pointer[1];
1063 else if (!tbl->is_SGI && lq_sta->is_agg) /* AGG */
1064 tbl->expected_tpt = ht_tbl_pointer[2];
1065 else /* AGG+SGI */
1066 tbl->expected_tpt = ht_tbl_pointer[3];
1067}
1068
1069/*
1070 * Find starting rate for new "search" high-throughput mode of modulation.
1071 * Goal is to find lowest expected rate (under perfect conditions) that is
1072 * above the current measured throughput of "active" mode, to give new mode
1073 * a fair chance to prove itself without too many challenges.
1074 *
1075 * This gets called when transitioning to more aggressive modulation
1076 * (i.e. legacy to SISO or MIMO, or SISO to MIMO), as well as less aggressive
1077 * (i.e. MIMO to SISO). When moving to MIMO, bit rate will typically need
1078 * to decrease to match "active" throughput. When moving from MIMO to SISO,
1079 * bit rate will typically need to increase, but not if performance was bad.
1080 */
1081static s32 iwl4965_rs_get_best_rate(struct iwl_priv *priv,
1082 struct iwl_lq_sta *lq_sta,
1083 struct iwl_scale_tbl_info *tbl, /* "search" */
1084 u16 rate_mask, s8 index)
1085{
1086 /* "active" values */
1087 struct iwl_scale_tbl_info *active_tbl =
1088 &(lq_sta->lq_info[lq_sta->active_tbl]);
1089 s32 active_sr = active_tbl->win[index].success_ratio;
1090 s32 active_tpt = active_tbl->expected_tpt[index];
1091
1092 /* expected "search" throughput */
1093 s32 *tpt_tbl = tbl->expected_tpt;
1094
1095 s32 new_rate, high, low, start_hi;
1096 u16 high_low;
1097 s8 rate = index;
1098
1099 new_rate = high = low = start_hi = IWL_RATE_INVALID;
1100
1101 for (; ;) {
1102 high_low = iwl4965_rs_get_adjacent_rate(priv, rate, rate_mask,
1103 tbl->lq_type);
1104
1105 low = high_low & 0xff;
1106 high = (high_low >> 8) & 0xff;
1107
1108 /*
1109 * Lower the "search" bit rate, to give new "search" mode
1110 * approximately the same throughput as "active" if:
1111 *
1112 * 1) "Active" mode has been working modestly well (but not
1113 * great), and expected "search" throughput (under perfect
1114 * conditions) at candidate rate is above the actual
1115 * measured "active" throughput (but less than expected
1116 * "active" throughput under perfect conditions).
1117 * OR
1118 * 2) "Active" mode has been working perfectly or very well
1119 * and expected "search" throughput (under perfect
1120 * conditions) at candidate rate is above expected
1121 * "active" throughput (under perfect conditions).
1122 */
1123 if ((((100 * tpt_tbl[rate]) > lq_sta->last_tpt) &&
1124 ((active_sr > IWL_RATE_DECREASE_TH) &&
1125 (active_sr <= IWL_RATE_HIGH_TH) &&
1126 (tpt_tbl[rate] <= active_tpt))) ||
1127 ((active_sr >= IWL_RATE_SCALE_SWITCH) &&
1128 (tpt_tbl[rate] > active_tpt))) {
1129
1130 /* (2nd or later pass)
1131 * If we've already tried to raise the rate, and are
1132 * now trying to lower it, use the higher rate. */
1133 if (start_hi != IWL_RATE_INVALID) {
1134 new_rate = start_hi;
1135 break;
1136 }
1137
1138 new_rate = rate;
1139
1140 /* Loop again with lower rate */
1141 if (low != IWL_RATE_INVALID)
1142 rate = low;
1143
1144 /* Lower rate not available, use the original */
1145 else
1146 break;
1147
1148 /* Else try to raise the "search" rate to match "active" */
1149 } else {
1150 /* (2nd or later pass)
1151 * If we've already tried to lower the rate, and are
1152 * now trying to raise it, use the lower rate. */
1153 if (new_rate != IWL_RATE_INVALID)
1154 break;
1155
1156 /* Loop again with higher rate */
1157 else if (high != IWL_RATE_INVALID) {
1158 start_hi = high;
1159 rate = high;
1160
1161 /* Higher rate not available, use the original */
1162 } else {
1163 new_rate = rate;
1164 break;
1165 }
1166 }
1167 }
1168
1169 return new_rate;
1170}
1171
1172/*
1173 * Set up search table for MIMO2
1174 */
1175static int iwl4965_rs_switch_to_mimo2(struct iwl_priv *priv,
1176 struct iwl_lq_sta *lq_sta,
1177 struct ieee80211_conf *conf,
1178 struct ieee80211_sta *sta,
1179 struct iwl_scale_tbl_info *tbl, int index)
1180{
1181 u16 rate_mask;
1182 s32 rate;
1183 s8 is_green = lq_sta->is_green;
1184 struct iwl_station_priv *sta_priv = (void *)sta->drv_priv;
1185 struct iwl_rxon_context *ctx = sta_priv->common.ctx;
1186
1187 if (!conf_is_ht(conf) || !sta->ht_cap.ht_supported)
1188 return -1;
1189
1190 if (((sta->ht_cap.cap & IEEE80211_HT_CAP_SM_PS) >> 2)
1191 == WLAN_HT_CAP_SM_PS_STATIC)
1192 return -1;
1193
1194 /* Need both Tx chains/antennas to support MIMO */
1195 if (priv->hw_params.tx_chains_num < 2)
1196 return -1;
1197
1198 IWL_DEBUG_RATE(priv, "LQ: try to switch to MIMO2\n");
1199
1200 tbl->lq_type = LQ_MIMO2;
1201 tbl->is_dup = lq_sta->is_dup;
1202 tbl->action = 0;
1203 tbl->max_search = IWL_MAX_SEARCH;
1204 rate_mask = lq_sta->active_mimo2_rate;
1205
1206 if (iwl_legacy_is_ht40_tx_allowed(priv, ctx, &sta->ht_cap))
1207 tbl->is_ht40 = 1;
1208 else
1209 tbl->is_ht40 = 0;
1210
1211 iwl4965_rs_set_expected_tpt_table(lq_sta, tbl);
1212
1213 rate = iwl4965_rs_get_best_rate(priv, lq_sta, tbl, rate_mask, index);
1214
1215 IWL_DEBUG_RATE(priv, "LQ: MIMO2 best rate %d mask %X\n",
1216 rate, rate_mask);
1217 if ((rate == IWL_RATE_INVALID) || !((1 << rate) & rate_mask)) {
1218 IWL_DEBUG_RATE(priv,
1219 "Can't switch with index %d rate mask %x\n",
1220 rate, rate_mask);
1221 return -1;
1222 }
1223 tbl->current_rate = iwl4965_rate_n_flags_from_tbl(priv,
1224 tbl, rate, is_green);
1225
1226 IWL_DEBUG_RATE(priv, "LQ: Switch to new mcs %X index is green %X\n",
1227 tbl->current_rate, is_green);
1228 return 0;
1229}
1230
1231/*
1232 * Set up search table for SISO
1233 */
1234static int iwl4965_rs_switch_to_siso(struct iwl_priv *priv,
1235 struct iwl_lq_sta *lq_sta,
1236 struct ieee80211_conf *conf,
1237 struct ieee80211_sta *sta,
1238 struct iwl_scale_tbl_info *tbl, int index)
1239{
1240 u16 rate_mask;
1241 u8 is_green = lq_sta->is_green;
1242 s32 rate;
1243 struct iwl_station_priv *sta_priv = (void *)sta->drv_priv;
1244 struct iwl_rxon_context *ctx = sta_priv->common.ctx;
1245
1246 if (!conf_is_ht(conf) || !sta->ht_cap.ht_supported)
1247 return -1;
1248
1249 IWL_DEBUG_RATE(priv, "LQ: try to switch to SISO\n");
1250
1251 tbl->is_dup = lq_sta->is_dup;
1252 tbl->lq_type = LQ_SISO;
1253 tbl->action = 0;
1254 tbl->max_search = IWL_MAX_SEARCH;
1255 rate_mask = lq_sta->active_siso_rate;
1256
1257 if (iwl_legacy_is_ht40_tx_allowed(priv, ctx, &sta->ht_cap))
1258 tbl->is_ht40 = 1;
1259 else
1260 tbl->is_ht40 = 0;
1261
1262 if (is_green)
1263 tbl->is_SGI = 0; /*11n spec: no SGI in SISO+Greenfield*/
1264
1265 iwl4965_rs_set_expected_tpt_table(lq_sta, tbl);
1266 rate = iwl4965_rs_get_best_rate(priv, lq_sta, tbl, rate_mask, index);
1267
1268 IWL_DEBUG_RATE(priv, "LQ: get best rate %d mask %X\n", rate, rate_mask);
1269 if ((rate == IWL_RATE_INVALID) || !((1 << rate) & rate_mask)) {
1270 IWL_DEBUG_RATE(priv,
1271 "can not switch with index %d rate mask %x\n",
1272 rate, rate_mask);
1273 return -1;
1274 }
1275 tbl->current_rate = iwl4965_rate_n_flags_from_tbl(priv,
1276 tbl, rate, is_green);
1277 IWL_DEBUG_RATE(priv, "LQ: Switch to new mcs %X index is green %X\n",
1278 tbl->current_rate, is_green);
1279 return 0;
1280}
1281
1282/*
1283 * Try to switch to new modulation mode from legacy
1284 */
1285static int iwl4965_rs_move_legacy_other(struct iwl_priv *priv,
1286 struct iwl_lq_sta *lq_sta,
1287 struct ieee80211_conf *conf,
1288 struct ieee80211_sta *sta,
1289 int index)
1290{
1291 struct iwl_scale_tbl_info *tbl = &(lq_sta->lq_info[lq_sta->active_tbl]);
1292 struct iwl_scale_tbl_info *search_tbl =
1293 &(lq_sta->lq_info[(1 - lq_sta->active_tbl)]);
1294 struct iwl_rate_scale_data *window = &(tbl->win[index]);
1295 u32 sz = (sizeof(struct iwl_scale_tbl_info) -
1296 (sizeof(struct iwl_rate_scale_data) * IWL_RATE_COUNT));
1297 u8 start_action;
1298 u8 valid_tx_ant = priv->hw_params.valid_tx_ant;
1299 u8 tx_chains_num = priv->hw_params.tx_chains_num;
1300 int ret = 0;
1301 u8 update_search_tbl_counter = 0;
1302
1303 tbl->action = IWL_LEGACY_SWITCH_SISO;
1304
1305 start_action = tbl->action;
1306 for (; ;) {
1307 lq_sta->action_counter++;
1308 switch (tbl->action) {
1309 case IWL_LEGACY_SWITCH_ANTENNA1:
1310 case IWL_LEGACY_SWITCH_ANTENNA2:
1311 IWL_DEBUG_RATE(priv, "LQ: Legacy toggle Antenna\n");
1312
1313 if ((tbl->action == IWL_LEGACY_SWITCH_ANTENNA1 &&
1314 tx_chains_num <= 1) ||
1315 (tbl->action == IWL_LEGACY_SWITCH_ANTENNA2 &&
1316 tx_chains_num <= 2))
1317 break;
1318
1319 /* Don't change antenna if success has been great */
1320 if (window->success_ratio >= IWL_RS_GOOD_RATIO)
1321 break;
1322
1323 /* Set up search table to try other antenna */
1324 memcpy(search_tbl, tbl, sz);
1325
1326 if (iwl4965_rs_toggle_antenna(valid_tx_ant,
1327 &search_tbl->current_rate, search_tbl)) {
1328 update_search_tbl_counter = 1;
1329 iwl4965_rs_set_expected_tpt_table(lq_sta,
1330 search_tbl);
1331 goto out;
1332 }
1333 break;
1334 case IWL_LEGACY_SWITCH_SISO:
1335 IWL_DEBUG_RATE(priv, "LQ: Legacy switch to SISO\n");
1336
1337 /* Set up search table to try SISO */
1338 memcpy(search_tbl, tbl, sz);
1339 search_tbl->is_SGI = 0;
1340 ret = iwl4965_rs_switch_to_siso(priv, lq_sta, conf, sta,
1341 search_tbl, index);
1342 if (!ret) {
1343 lq_sta->action_counter = 0;
1344 goto out;
1345 }
1346
1347 break;
1348 case IWL_LEGACY_SWITCH_MIMO2_AB:
1349 case IWL_LEGACY_SWITCH_MIMO2_AC:
1350 case IWL_LEGACY_SWITCH_MIMO2_BC:
1351 IWL_DEBUG_RATE(priv, "LQ: Legacy switch to MIMO2\n");
1352
1353 /* Set up search table to try MIMO */
1354 memcpy(search_tbl, tbl, sz);
1355 search_tbl->is_SGI = 0;
1356
1357 if (tbl->action == IWL_LEGACY_SWITCH_MIMO2_AB)
1358 search_tbl->ant_type = ANT_AB;
1359 else if (tbl->action == IWL_LEGACY_SWITCH_MIMO2_AC)
1360 search_tbl->ant_type = ANT_AC;
1361 else
1362 search_tbl->ant_type = ANT_BC;
1363
1364 if (!iwl4965_rs_is_valid_ant(valid_tx_ant,
1365 search_tbl->ant_type))
1366 break;
1367
1368 ret = iwl4965_rs_switch_to_mimo2(priv, lq_sta,
1369 conf, sta,
1370 search_tbl, index);
1371 if (!ret) {
1372 lq_sta->action_counter = 0;
1373 goto out;
1374 }
1375 break;
1376 }
1377 tbl->action++;
1378 if (tbl->action > IWL_LEGACY_SWITCH_MIMO2_BC)
1379 tbl->action = IWL_LEGACY_SWITCH_ANTENNA1;
1380
1381 if (tbl->action == start_action)
1382 break;
1383
1384 }
1385 search_tbl->lq_type = LQ_NONE;
1386 return 0;
1387
1388out:
1389 lq_sta->search_better_tbl = 1;
1390 tbl->action++;
1391 if (tbl->action > IWL_LEGACY_SWITCH_MIMO2_BC)
1392 tbl->action = IWL_LEGACY_SWITCH_ANTENNA1;
1393 if (update_search_tbl_counter)
1394 search_tbl->action = tbl->action;
1395 return 0;
1396
1397}
1398
1399/*
1400 * Try to switch to new modulation mode from SISO
1401 */
1402static int iwl4965_rs_move_siso_to_other(struct iwl_priv *priv,
1403 struct iwl_lq_sta *lq_sta,
1404 struct ieee80211_conf *conf,
1405 struct ieee80211_sta *sta, int index)
1406{
1407 u8 is_green = lq_sta->is_green;
1408 struct iwl_scale_tbl_info *tbl = &(lq_sta->lq_info[lq_sta->active_tbl]);
1409 struct iwl_scale_tbl_info *search_tbl =
1410 &(lq_sta->lq_info[(1 - lq_sta->active_tbl)]);
1411 struct iwl_rate_scale_data *window = &(tbl->win[index]);
1412 struct ieee80211_sta_ht_cap *ht_cap = &sta->ht_cap;
1413 u32 sz = (sizeof(struct iwl_scale_tbl_info) -
1414 (sizeof(struct iwl_rate_scale_data) * IWL_RATE_COUNT));
1415 u8 start_action;
1416 u8 valid_tx_ant = priv->hw_params.valid_tx_ant;
1417 u8 tx_chains_num = priv->hw_params.tx_chains_num;
1418 u8 update_search_tbl_counter = 0;
1419 int ret;
1420
1421 start_action = tbl->action;
1422
1423 for (;;) {
1424 lq_sta->action_counter++;
1425 switch (tbl->action) {
1426 case IWL_SISO_SWITCH_ANTENNA1:
1427 case IWL_SISO_SWITCH_ANTENNA2:
1428 IWL_DEBUG_RATE(priv, "LQ: SISO toggle Antenna\n");
1429 if ((tbl->action == IWL_SISO_SWITCH_ANTENNA1 &&
1430 tx_chains_num <= 1) ||
1431 (tbl->action == IWL_SISO_SWITCH_ANTENNA2 &&
1432 tx_chains_num <= 2))
1433 break;
1434
1435 if (window->success_ratio >= IWL_RS_GOOD_RATIO)
1436 break;
1437
1438 memcpy(search_tbl, tbl, sz);
1439 if (iwl4965_rs_toggle_antenna(valid_tx_ant,
1440 &search_tbl->current_rate, search_tbl)) {
1441 update_search_tbl_counter = 1;
1442 goto out;
1443 }
1444 break;
1445 case IWL_SISO_SWITCH_MIMO2_AB:
1446 case IWL_SISO_SWITCH_MIMO2_AC:
1447 case IWL_SISO_SWITCH_MIMO2_BC:
1448 IWL_DEBUG_RATE(priv, "LQ: SISO switch to MIMO2\n");
1449 memcpy(search_tbl, tbl, sz);
1450 search_tbl->is_SGI = 0;
1451
1452 if (tbl->action == IWL_SISO_SWITCH_MIMO2_AB)
1453 search_tbl->ant_type = ANT_AB;
1454 else if (tbl->action == IWL_SISO_SWITCH_MIMO2_AC)
1455 search_tbl->ant_type = ANT_AC;
1456 else
1457 search_tbl->ant_type = ANT_BC;
1458
1459 if (!iwl4965_rs_is_valid_ant(valid_tx_ant,
1460 search_tbl->ant_type))
1461 break;
1462
1463 ret = iwl4965_rs_switch_to_mimo2(priv, lq_sta,
1464 conf, sta,
1465 search_tbl, index);
1466 if (!ret)
1467 goto out;
1468 break;
1469 case IWL_SISO_SWITCH_GI:
1470 if (!tbl->is_ht40 && !(ht_cap->cap &
1471 IEEE80211_HT_CAP_SGI_20))
1472 break;
1473 if (tbl->is_ht40 && !(ht_cap->cap &
1474 IEEE80211_HT_CAP_SGI_40))
1475 break;
1476
1477 IWL_DEBUG_RATE(priv, "LQ: SISO toggle SGI/NGI\n");
1478
1479 memcpy(search_tbl, tbl, sz);
1480 if (is_green) {
1481 if (!tbl->is_SGI)
1482 break;
1483 else
1484 IWL_ERR(priv,
1485 "SGI was set in GF+SISO\n");
1486 }
1487 search_tbl->is_SGI = !tbl->is_SGI;
1488 iwl4965_rs_set_expected_tpt_table(lq_sta, search_tbl);
1489 if (tbl->is_SGI) {
1490 s32 tpt = lq_sta->last_tpt / 100;
1491 if (tpt >= search_tbl->expected_tpt[index])
1492 break;
1493 }
1494 search_tbl->current_rate =
1495 iwl4965_rate_n_flags_from_tbl(priv, search_tbl,
1496 index, is_green);
1497 update_search_tbl_counter = 1;
1498 goto out;
1499 }
1500 tbl->action++;
1501 if (tbl->action > IWL_SISO_SWITCH_GI)
1502 tbl->action = IWL_SISO_SWITCH_ANTENNA1;
1503
1504 if (tbl->action == start_action)
1505 break;
1506 }
1507 search_tbl->lq_type = LQ_NONE;
1508 return 0;
1509
1510 out:
1511 lq_sta->search_better_tbl = 1;
1512 tbl->action++;
1513 if (tbl->action > IWL_SISO_SWITCH_GI)
1514 tbl->action = IWL_SISO_SWITCH_ANTENNA1;
1515 if (update_search_tbl_counter)
1516 search_tbl->action = tbl->action;
1517
1518 return 0;
1519}
1520
1521/*
1522 * Try to switch to new modulation mode from MIMO2
1523 */
1524static int iwl4965_rs_move_mimo2_to_other(struct iwl_priv *priv,
1525 struct iwl_lq_sta *lq_sta,
1526 struct ieee80211_conf *conf,
1527 struct ieee80211_sta *sta, int index)
1528{
1529 s8 is_green = lq_sta->is_green;
1530 struct iwl_scale_tbl_info *tbl = &(lq_sta->lq_info[lq_sta->active_tbl]);
1531 struct iwl_scale_tbl_info *search_tbl =
1532 &(lq_sta->lq_info[(1 - lq_sta->active_tbl)]);
1533 struct iwl_rate_scale_data *window = &(tbl->win[index]);
1534 struct ieee80211_sta_ht_cap *ht_cap = &sta->ht_cap;
1535 u32 sz = (sizeof(struct iwl_scale_tbl_info) -
1536 (sizeof(struct iwl_rate_scale_data) * IWL_RATE_COUNT));
1537 u8 start_action;
1538 u8 valid_tx_ant = priv->hw_params.valid_tx_ant;
1539 u8 tx_chains_num = priv->hw_params.tx_chains_num;
1540 u8 update_search_tbl_counter = 0;
1541 int ret;
1542
1543 start_action = tbl->action;
1544 for (;;) {
1545 lq_sta->action_counter++;
1546 switch (tbl->action) {
1547 case IWL_MIMO2_SWITCH_ANTENNA1:
1548 case IWL_MIMO2_SWITCH_ANTENNA2:
1549 IWL_DEBUG_RATE(priv, "LQ: MIMO2 toggle Antennas\n");
1550
1551 if (tx_chains_num <= 2)
1552 break;
1553
1554 if (window->success_ratio >= IWL_RS_GOOD_RATIO)
1555 break;
1556
1557 memcpy(search_tbl, tbl, sz);
1558 if (iwl4965_rs_toggle_antenna(valid_tx_ant,
1559 &search_tbl->current_rate, search_tbl)) {
1560 update_search_tbl_counter = 1;
1561 goto out;
1562 }
1563 break;
1564 case IWL_MIMO2_SWITCH_SISO_A:
1565 case IWL_MIMO2_SWITCH_SISO_B:
1566 case IWL_MIMO2_SWITCH_SISO_C:
1567 IWL_DEBUG_RATE(priv, "LQ: MIMO2 switch to SISO\n");
1568
1569 /* Set up new search table for SISO */
1570 memcpy(search_tbl, tbl, sz);
1571
1572 if (tbl->action == IWL_MIMO2_SWITCH_SISO_A)
1573 search_tbl->ant_type = ANT_A;
1574 else if (tbl->action == IWL_MIMO2_SWITCH_SISO_B)
1575 search_tbl->ant_type = ANT_B;
1576 else
1577 search_tbl->ant_type = ANT_C;
1578
1579 if (!iwl4965_rs_is_valid_ant(valid_tx_ant,
1580 search_tbl->ant_type))
1581 break;
1582
1583 ret = iwl4965_rs_switch_to_siso(priv, lq_sta,
1584 conf, sta,
1585 search_tbl, index);
1586 if (!ret)
1587 goto out;
1588
1589 break;
1590
1591 case IWL_MIMO2_SWITCH_GI:
1592 if (!tbl->is_ht40 && !(ht_cap->cap &
1593 IEEE80211_HT_CAP_SGI_20))
1594 break;
1595 if (tbl->is_ht40 && !(ht_cap->cap &
1596 IEEE80211_HT_CAP_SGI_40))
1597 break;
1598
1599 IWL_DEBUG_RATE(priv, "LQ: MIMO2 toggle SGI/NGI\n");
1600
1601 /* Set up new search table for MIMO2 */
1602 memcpy(search_tbl, tbl, sz);
1603 search_tbl->is_SGI = !tbl->is_SGI;
1604 iwl4965_rs_set_expected_tpt_table(lq_sta, search_tbl);
1605 /*
1606 * If active table already uses the fastest possible
1607 * modulation (dual stream with short guard interval),
1608 * and it's working well, there's no need to look
1609 * for a better type of modulation!
1610 */
1611 if (tbl->is_SGI) {
1612 s32 tpt = lq_sta->last_tpt / 100;
1613 if (tpt >= search_tbl->expected_tpt[index])
1614 break;
1615 }
1616 search_tbl->current_rate =
1617 iwl4965_rate_n_flags_from_tbl(priv, search_tbl,
1618 index, is_green);
1619 update_search_tbl_counter = 1;
1620 goto out;
1621
1622 }
1623 tbl->action++;
1624 if (tbl->action > IWL_MIMO2_SWITCH_GI)
1625 tbl->action = IWL_MIMO2_SWITCH_ANTENNA1;
1626
1627 if (tbl->action == start_action)
1628 break;
1629 }
1630 search_tbl->lq_type = LQ_NONE;
1631 return 0;
1632 out:
1633 lq_sta->search_better_tbl = 1;
1634 tbl->action++;
1635 if (tbl->action > IWL_MIMO2_SWITCH_GI)
1636 tbl->action = IWL_MIMO2_SWITCH_ANTENNA1;
1637 if (update_search_tbl_counter)
1638 search_tbl->action = tbl->action;
1639
1640 return 0;
1641
1642}
1643
1644/*
1645 * Check whether we should continue using same modulation mode, or
1646 * begin search for a new mode, based on:
1647 * 1) # tx successes or failures while using this mode
1648 * 2) # times calling this function
1649 * 3) elapsed time in this mode (not used, for now)
1650 */
1651static void
1652iwl4965_rs_stay_in_table(struct iwl_lq_sta *lq_sta, bool force_search)
1653{
1654 struct iwl_scale_tbl_info *tbl;
1655 int i;
1656 int active_tbl;
1657 int flush_interval_passed = 0;
1658 struct iwl_priv *priv;
1659
1660 priv = lq_sta->drv;
1661 active_tbl = lq_sta->active_tbl;
1662
1663 tbl = &(lq_sta->lq_info[active_tbl]);
1664
1665 /* If we've been disallowing search, see if we should now allow it */
1666 if (lq_sta->stay_in_tbl) {
1667
1668 /* Elapsed time using current modulation mode */
1669 if (lq_sta->flush_timer)
1670 flush_interval_passed =
1671 time_after(jiffies,
1672 (unsigned long)(lq_sta->flush_timer +
1673 IWL_RATE_SCALE_FLUSH_INTVL));
1674
1675 /*
1676 * Check if we should allow search for new modulation mode.
1677 * If many frames have failed or succeeded, or we've used
1678 * this same modulation for a long time, allow search, and
1679 * reset history stats that keep track of whether we should
1680 * allow a new search. Also (below) reset all bitmaps and
1681 * stats in active history.
1682 */
1683 if (force_search ||
1684 (lq_sta->total_failed > lq_sta->max_failure_limit) ||
1685 (lq_sta->total_success > lq_sta->max_success_limit) ||
1686 ((!lq_sta->search_better_tbl) && (lq_sta->flush_timer)
1687 && (flush_interval_passed))) {
1688 IWL_DEBUG_RATE(priv, "LQ: stay is expired %d %d %d\n:",
1689 lq_sta->total_failed,
1690 lq_sta->total_success,
1691 flush_interval_passed);
1692
1693 /* Allow search for new mode */
1694 lq_sta->stay_in_tbl = 0; /* only place reset */
1695 lq_sta->total_failed = 0;
1696 lq_sta->total_success = 0;
1697 lq_sta->flush_timer = 0;
1698
1699 /*
1700 * Else if we've used this modulation mode enough repetitions
1701 * (regardless of elapsed time or success/failure), reset
1702 * history bitmaps and rate-specific stats for all rates in
1703 * active table.
1704 */
1705 } else {
1706 lq_sta->table_count++;
1707 if (lq_sta->table_count >=
1708 lq_sta->table_count_limit) {
1709 lq_sta->table_count = 0;
1710
1711 IWL_DEBUG_RATE(priv,
1712 "LQ: stay in table clear win\n");
1713 for (i = 0; i < IWL_RATE_COUNT; i++)
1714 iwl4965_rs_rate_scale_clear_window(
1715 &(tbl->win[i]));
1716 }
1717 }
1718
1719 /* If transitioning to allow "search", reset all history
1720 * bitmaps and stats in active table (this will become the new
1721 * "search" table). */
1722 if (!lq_sta->stay_in_tbl) {
1723 for (i = 0; i < IWL_RATE_COUNT; i++)
1724 iwl4965_rs_rate_scale_clear_window(
1725 &(tbl->win[i]));
1726 }
1727 }
1728}
1729
1730/*
1731 * setup rate table in uCode
1732 * return rate_n_flags as used in the table
1733 */
1734static u32 iwl4965_rs_update_rate_tbl(struct iwl_priv *priv,
1735 struct iwl_rxon_context *ctx,
1736 struct iwl_lq_sta *lq_sta,
1737 struct iwl_scale_tbl_info *tbl,
1738 int index, u8 is_green)
1739{
1740 u32 rate;
1741
1742 /* Update uCode's rate table. */
1743 rate = iwl4965_rate_n_flags_from_tbl(priv, tbl, index, is_green);
1744 iwl4965_rs_fill_link_cmd(priv, lq_sta, rate);
1745 iwl_legacy_send_lq_cmd(priv, ctx, &lq_sta->lq, CMD_ASYNC, false);
1746
1747 return rate;
1748}
1749
1750/*
1751 * Do rate scaling and search for new modulation mode.
1752 */
1753static void iwl4965_rs_rate_scale_perform(struct iwl_priv *priv,
1754 struct sk_buff *skb,
1755 struct ieee80211_sta *sta,
1756 struct iwl_lq_sta *lq_sta)
1757{
1758 struct ieee80211_hw *hw = priv->hw;
1759 struct ieee80211_conf *conf = &hw->conf;
1760 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
1761 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
1762 int low = IWL_RATE_INVALID;
1763 int high = IWL_RATE_INVALID;
1764 int index;
1765 int i;
1766 struct iwl_rate_scale_data *window = NULL;
1767 int current_tpt = IWL_INVALID_VALUE;
1768 int low_tpt = IWL_INVALID_VALUE;
1769 int high_tpt = IWL_INVALID_VALUE;
1770 u32 fail_count;
1771 s8 scale_action = 0;
1772 u16 rate_mask;
1773 u8 update_lq = 0;
1774 struct iwl_scale_tbl_info *tbl, *tbl1;
1775 u16 rate_scale_index_msk = 0;
1776 u32 rate;
1777 u8 is_green = 0;
1778 u8 active_tbl = 0;
1779 u8 done_search = 0;
1780 u16 high_low;
1781 s32 sr;
1782 u8 tid = MAX_TID_COUNT;
1783 struct iwl_tid_data *tid_data;
1784 struct iwl_station_priv *sta_priv = (void *)sta->drv_priv;
1785 struct iwl_rxon_context *ctx = sta_priv->common.ctx;
1786
1787 IWL_DEBUG_RATE(priv, "rate scale calculate new rate for skb\n");
1788
1789 /* Send management frames and NO_ACK data using lowest rate. */
1790 /* TODO: this could probably be improved.. */
1791 if (!ieee80211_is_data(hdr->frame_control) ||
1792 info->flags & IEEE80211_TX_CTL_NO_ACK)
1793 return;
1794
1795 if (!sta || !lq_sta)
1796 return;
1797
1798 lq_sta->supp_rates = sta->supp_rates[lq_sta->band];
1799
1800 tid = iwl4965_rs_tl_add_packet(lq_sta, hdr);
1801 if ((tid != MAX_TID_COUNT) && (lq_sta->tx_agg_tid_en & (1 << tid))) {
1802 tid_data = &priv->stations[lq_sta->lq.sta_id].tid[tid];
1803 if (tid_data->agg.state == IWL_AGG_OFF)
1804 lq_sta->is_agg = 0;
1805 else
1806 lq_sta->is_agg = 1;
1807 } else
1808 lq_sta->is_agg = 0;
1809
1810 /*
1811 * Select rate-scale / modulation-mode table to work with in
1812 * the rest of this function: "search" if searching for better
1813 * modulation mode, or "active" if doing rate scaling within a mode.
1814 */
1815 if (!lq_sta->search_better_tbl)
1816 active_tbl = lq_sta->active_tbl;
1817 else
1818 active_tbl = 1 - lq_sta->active_tbl;
1819
1820 tbl = &(lq_sta->lq_info[active_tbl]);
1821 if (is_legacy(tbl->lq_type))
1822 lq_sta->is_green = 0;
1823 else
1824 lq_sta->is_green = iwl4965_rs_use_green(sta);
1825 is_green = lq_sta->is_green;
1826
1827 /* current tx rate */
1828 index = lq_sta->last_txrate_idx;
1829
1830 IWL_DEBUG_RATE(priv, "Rate scale index %d for type %d\n", index,
1831 tbl->lq_type);
1832
1833 /* rates available for this association, and for modulation mode */
1834 rate_mask = iwl4965_rs_get_supported_rates(lq_sta, hdr, tbl->lq_type);
1835
1836 IWL_DEBUG_RATE(priv, "mask 0x%04X\n", rate_mask);
1837
1838 /* mask with station rate restriction */
1839 if (is_legacy(tbl->lq_type)) {
1840 if (lq_sta->band == IEEE80211_BAND_5GHZ)
1841 /* supp_rates has no CCK bits in A mode */
1842 rate_scale_index_msk = (u16) (rate_mask &
1843 (lq_sta->supp_rates << IWL_FIRST_OFDM_RATE));
1844 else
1845 rate_scale_index_msk = (u16) (rate_mask &
1846 lq_sta->supp_rates);
1847
1848 } else
1849 rate_scale_index_msk = rate_mask;
1850
1851 if (!rate_scale_index_msk)
1852 rate_scale_index_msk = rate_mask;
1853
1854 if (!((1 << index) & rate_scale_index_msk)) {
1855 IWL_ERR(priv, "Current Rate is not valid\n");
1856 if (lq_sta->search_better_tbl) {
1857 /* revert to active table if search table is not valid*/
1858 tbl->lq_type = LQ_NONE;
1859 lq_sta->search_better_tbl = 0;
1860 tbl = &(lq_sta->lq_info[lq_sta->active_tbl]);
1861 /* get "active" rate info */
1862 index = iwl4965_hwrate_to_plcp_idx(tbl->current_rate);
1863 rate = iwl4965_rs_update_rate_tbl(priv, ctx, lq_sta,
1864 tbl, index, is_green);
1865 }
1866 return;
1867 }
1868
1869 /* Get expected throughput table and history window for current rate */
1870 if (!tbl->expected_tpt) {
1871 IWL_ERR(priv, "tbl->expected_tpt is NULL\n");
1872 return;
1873 }
1874
1875 /* force user max rate if set by user */
1876 if ((lq_sta->max_rate_idx != -1) &&
1877 (lq_sta->max_rate_idx < index)) {
1878 index = lq_sta->max_rate_idx;
1879 update_lq = 1;
1880 window = &(tbl->win[index]);
1881 goto lq_update;
1882 }
1883
1884 window = &(tbl->win[index]);
1885
1886 /*
1887 * If there is not enough history to calculate actual average
1888 * throughput, keep analyzing results of more tx frames, without
1889 * changing rate or mode (bypass most of the rest of this function).
1890 * Set up new rate table in uCode only if old rate is not supported
1891 * in current association (use new rate found above).
1892 */
1893 fail_count = window->counter - window->success_counter;
1894 if ((fail_count < IWL_RATE_MIN_FAILURE_TH) &&
1895 (window->success_counter < IWL_RATE_MIN_SUCCESS_TH)) {
1896 IWL_DEBUG_RATE(priv, "LQ: still below TH. succ=%d total=%d "
1897 "for index %d\n",
1898 window->success_counter, window->counter, index);
1899
1900 /* Can't calculate this yet; not enough history */
1901 window->average_tpt = IWL_INVALID_VALUE;
1902
1903 /* Should we stay with this modulation mode,
1904 * or search for a new one? */
1905 iwl4965_rs_stay_in_table(lq_sta, false);
1906
1907 goto out;
1908 }
1909 /* Else we have enough samples; calculate estimate of
1910 * actual average throughput */
1911 if (window->average_tpt != ((window->success_ratio *
1912 tbl->expected_tpt[index] + 64) / 128)) {
1913 IWL_ERR(priv,
1914 "expected_tpt should have been calculated by now\n");
1915 window->average_tpt = ((window->success_ratio *
1916 tbl->expected_tpt[index] + 64) / 128);
1917 }
1918
1919 /* If we are searching for better modulation mode, check success. */
1920 if (lq_sta->search_better_tbl) {
1921 /* If good success, continue using the "search" mode;
1922 * no need to send new link quality command, since we're
1923 * continuing to use the setup that we've been trying. */
1924 if (window->average_tpt > lq_sta->last_tpt) {
1925
1926 IWL_DEBUG_RATE(priv, "LQ: SWITCHING TO NEW TABLE "
1927 "suc=%d cur-tpt=%d old-tpt=%d\n",
1928 window->success_ratio,
1929 window->average_tpt,
1930 lq_sta->last_tpt);
1931
1932 if (!is_legacy(tbl->lq_type))
1933 lq_sta->enable_counter = 1;
1934
1935 /* Swap tables; "search" becomes "active" */
1936 lq_sta->active_tbl = active_tbl;
1937 current_tpt = window->average_tpt;
1938
1939 /* Else poor success; go back to mode in "active" table */
1940 } else {
1941
1942 IWL_DEBUG_RATE(priv, "LQ: GOING BACK TO THE OLD TABLE "
1943 "suc=%d cur-tpt=%d old-tpt=%d\n",
1944 window->success_ratio,
1945 window->average_tpt,
1946 lq_sta->last_tpt);
1947
1948 /* Nullify "search" table */
1949 tbl->lq_type = LQ_NONE;
1950
1951 /* Revert to "active" table */
1952 active_tbl = lq_sta->active_tbl;
1953 tbl = &(lq_sta->lq_info[active_tbl]);
1954
1955 /* Revert to "active" rate and throughput info */
1956 index = iwl4965_hwrate_to_plcp_idx(tbl->current_rate);
1957 current_tpt = lq_sta->last_tpt;
1958
1959 /* Need to set up a new rate table in uCode */
1960 update_lq = 1;
1961 }
1962
1963 /* Either way, we've made a decision; modulation mode
1964 * search is done, allow rate adjustment next time. */
1965 lq_sta->search_better_tbl = 0;
1966 done_search = 1; /* Don't switch modes below! */
1967 goto lq_update;
1968 }
1969
1970 /* (Else) not in search of better modulation mode, try for better
1971 * starting rate, while staying in this mode. */
1972 high_low = iwl4965_rs_get_adjacent_rate(priv, index,
1973 rate_scale_index_msk,
1974 tbl->lq_type);
1975 low = high_low & 0xff;
1976 high = (high_low >> 8) & 0xff;
1977
1978 /* If user set max rate, dont allow higher than user constrain */
1979 if ((lq_sta->max_rate_idx != -1) &&
1980 (lq_sta->max_rate_idx < high))
1981 high = IWL_RATE_INVALID;
1982
1983 sr = window->success_ratio;
1984
1985 /* Collect measured throughputs for current and adjacent rates */
1986 current_tpt = window->average_tpt;
1987 if (low != IWL_RATE_INVALID)
1988 low_tpt = tbl->win[low].average_tpt;
1989 if (high != IWL_RATE_INVALID)
1990 high_tpt = tbl->win[high].average_tpt;
1991
1992 scale_action = 0;
1993
1994 /* Too many failures, decrease rate */
1995 if ((sr <= IWL_RATE_DECREASE_TH) || (current_tpt == 0)) {
1996 IWL_DEBUG_RATE(priv,
1997 "decrease rate because of low success_ratio\n");
1998 scale_action = -1;
1999
2000 /* No throughput measured yet for adjacent rates; try increase. */
2001 } else if ((low_tpt == IWL_INVALID_VALUE) &&
2002 (high_tpt == IWL_INVALID_VALUE)) {
2003
2004 if (high != IWL_RATE_INVALID && sr >= IWL_RATE_INCREASE_TH)
2005 scale_action = 1;
2006 else if (low != IWL_RATE_INVALID)
2007 scale_action = 0;
2008 }
2009
2010 /* Both adjacent throughputs are measured, but neither one has better
2011 * throughput; we're using the best rate, don't change it! */
2012 else if ((low_tpt != IWL_INVALID_VALUE) &&
2013 (high_tpt != IWL_INVALID_VALUE) &&
2014 (low_tpt < current_tpt) &&
2015 (high_tpt < current_tpt))
2016 scale_action = 0;
2017
2018 /* At least one adjacent rate's throughput is measured,
2019 * and may have better performance. */
2020 else {
2021 /* Higher adjacent rate's throughput is measured */
2022 if (high_tpt != IWL_INVALID_VALUE) {
2023 /* Higher rate has better throughput */
2024 if (high_tpt > current_tpt &&
2025 sr >= IWL_RATE_INCREASE_TH) {
2026 scale_action = 1;
2027 } else {
2028 scale_action = 0;
2029 }
2030
2031 /* Lower adjacent rate's throughput is measured */
2032 } else if (low_tpt != IWL_INVALID_VALUE) {
2033 /* Lower rate has better throughput */
2034 if (low_tpt > current_tpt) {
2035 IWL_DEBUG_RATE(priv,
2036 "decrease rate because of low tpt\n");
2037 scale_action = -1;
2038 } else if (sr >= IWL_RATE_INCREASE_TH) {
2039 scale_action = 1;
2040 }
2041 }
2042 }
2043
2044 /* Sanity check; asked for decrease, but success rate or throughput
2045 * has been good at old rate. Don't change it. */
2046 if ((scale_action == -1) && (low != IWL_RATE_INVALID) &&
2047 ((sr > IWL_RATE_HIGH_TH) ||
2048 (current_tpt > (100 * tbl->expected_tpt[low]))))
2049 scale_action = 0;
2050
2051 switch (scale_action) {
2052 case -1:
2053 /* Decrease starting rate, update uCode's rate table */
2054 if (low != IWL_RATE_INVALID) {
2055 update_lq = 1;
2056 index = low;
2057 }
2058
2059 break;
2060 case 1:
2061 /* Increase starting rate, update uCode's rate table */
2062 if (high != IWL_RATE_INVALID) {
2063 update_lq = 1;
2064 index = high;
2065 }
2066
2067 break;
2068 case 0:
2069 /* No change */
2070 default:
2071 break;
2072 }
2073
2074 IWL_DEBUG_RATE(priv, "choose rate scale index %d action %d low %d "
2075 "high %d type %d\n",
2076 index, scale_action, low, high, tbl->lq_type);
2077
2078lq_update:
2079 /* Replace uCode's rate table for the destination station. */
2080 if (update_lq)
2081 rate = iwl4965_rs_update_rate_tbl(priv, ctx, lq_sta,
2082 tbl, index, is_green);
2083
2084 /* Should we stay with this modulation mode,
2085 * or search for a new one? */
2086 iwl4965_rs_stay_in_table(lq_sta, false);
2087
2088 /*
2089 * Search for new modulation mode if we're:
2090 * 1) Not changing rates right now
2091 * 2) Not just finishing up a search
2092 * 3) Allowing a new search
2093 */
2094 if (!update_lq && !done_search &&
2095 !lq_sta->stay_in_tbl && window->counter) {
2096 /* Save current throughput to compare with "search" throughput*/
2097 lq_sta->last_tpt = current_tpt;
2098
2099 /* Select a new "search" modulation mode to try.
2100 * If one is found, set up the new "search" table. */
2101 if (is_legacy(tbl->lq_type))
2102 iwl4965_rs_move_legacy_other(priv, lq_sta,
2103 conf, sta, index);
2104 else if (is_siso(tbl->lq_type))
2105 iwl4965_rs_move_siso_to_other(priv, lq_sta,
2106 conf, sta, index);
2107 else /* (is_mimo2(tbl->lq_type)) */
2108 iwl4965_rs_move_mimo2_to_other(priv, lq_sta,
2109 conf, sta, index);
2110
2111 /* If new "search" mode was selected, set up in uCode table */
2112 if (lq_sta->search_better_tbl) {
2113 /* Access the "search" table, clear its history. */
2114 tbl = &(lq_sta->lq_info[(1 - lq_sta->active_tbl)]);
2115 for (i = 0; i < IWL_RATE_COUNT; i++)
2116 iwl4965_rs_rate_scale_clear_window(
2117 &(tbl->win[i]));
2118
2119 /* Use new "search" start rate */
2120 index = iwl4965_hwrate_to_plcp_idx(tbl->current_rate);
2121
2122 IWL_DEBUG_RATE(priv,
2123 "Switch current mcs: %X index: %d\n",
2124 tbl->current_rate, index);
2125 iwl4965_rs_fill_link_cmd(priv, lq_sta,
2126 tbl->current_rate);
2127 iwl_legacy_send_lq_cmd(priv, ctx,
2128 &lq_sta->lq, CMD_ASYNC, false);
2129 } else
2130 done_search = 1;
2131 }
2132
2133 if (done_search && !lq_sta->stay_in_tbl) {
2134 /* If the "active" (non-search) mode was legacy,
2135 * and we've tried switching antennas,
2136 * but we haven't been able to try HT modes (not available),
2137 * stay with best antenna legacy modulation for a while
2138 * before next round of mode comparisons. */
2139 tbl1 = &(lq_sta->lq_info[lq_sta->active_tbl]);
2140 if (is_legacy(tbl1->lq_type) && !conf_is_ht(conf) &&
2141 lq_sta->action_counter > tbl1->max_search) {
2142 IWL_DEBUG_RATE(priv, "LQ: STAY in legacy table\n");
2143 iwl4965_rs_set_stay_in_table(priv, 1, lq_sta);
2144 }
2145
2146 /* If we're in an HT mode, and all 3 mode switch actions
2147 * have been tried and compared, stay in this best modulation
2148 * mode for a while before next round of mode comparisons. */
2149 if (lq_sta->enable_counter &&
2150 (lq_sta->action_counter >= tbl1->max_search)) {
2151 if ((lq_sta->last_tpt > IWL_AGG_TPT_THREHOLD) &&
2152 (lq_sta->tx_agg_tid_en & (1 << tid)) &&
2153 (tid != MAX_TID_COUNT)) {
2154 tid_data =
2155 &priv->stations[lq_sta->lq.sta_id].tid[tid];
2156 if (tid_data->agg.state == IWL_AGG_OFF) {
2157 IWL_DEBUG_RATE(priv,
2158 "try to aggregate tid %d\n",
2159 tid);
2160 iwl4965_rs_tl_turn_on_agg(priv, tid,
2161 lq_sta, sta);
2162 }
2163 }
2164 iwl4965_rs_set_stay_in_table(priv, 0, lq_sta);
2165 }
2166 }
2167
2168out:
2169 tbl->current_rate = iwl4965_rate_n_flags_from_tbl(priv, tbl,
2170 index, is_green);
2171 i = index;
2172 lq_sta->last_txrate_idx = i;
2173}
2174
2175/**
2176 * iwl4965_rs_initialize_lq - Initialize a station's hardware rate table
2177 *
2178 * The uCode's station table contains a table of fallback rates
2179 * for automatic fallback during transmission.
2180 *
2181 * NOTE: This sets up a default set of values. These will be replaced later
2182 * if the driver's iwl-4965-rs rate scaling algorithm is used, instead of
2183 * rc80211_simple.
2184 *
2185 * NOTE: Run REPLY_ADD_STA command to set up station table entry, before
2186 * calling this function (which runs REPLY_TX_LINK_QUALITY_CMD,
2187 * which requires station table entry to exist).
2188 */
2189static void iwl4965_rs_initialize_lq(struct iwl_priv *priv,
2190 struct ieee80211_conf *conf,
2191 struct ieee80211_sta *sta,
2192 struct iwl_lq_sta *lq_sta)
2193{
2194 struct iwl_scale_tbl_info *tbl;
2195 int rate_idx;
2196 int i;
2197 u32 rate;
2198 u8 use_green = iwl4965_rs_use_green(sta);
2199 u8 active_tbl = 0;
2200 u8 valid_tx_ant;
2201 struct iwl_station_priv *sta_priv;
2202 struct iwl_rxon_context *ctx;
2203
2204 if (!sta || !lq_sta)
2205 return;
2206
2207 sta_priv = (void *)sta->drv_priv;
2208 ctx = sta_priv->common.ctx;
2209
2210 i = lq_sta->last_txrate_idx;
2211
2212 valid_tx_ant = priv->hw_params.valid_tx_ant;
2213
2214 if (!lq_sta->search_better_tbl)
2215 active_tbl = lq_sta->active_tbl;
2216 else
2217 active_tbl = 1 - lq_sta->active_tbl;
2218
2219 tbl = &(lq_sta->lq_info[active_tbl]);
2220
2221 if ((i < 0) || (i >= IWL_RATE_COUNT))
2222 i = 0;
2223
2224 rate = iwlegacy_rates[i].plcp;
2225 tbl->ant_type = iwl4965_first_antenna(valid_tx_ant);
2226 rate |= tbl->ant_type << RATE_MCS_ANT_POS;
2227
2228 if (i >= IWL_FIRST_CCK_RATE && i <= IWL_LAST_CCK_RATE)
2229 rate |= RATE_MCS_CCK_MSK;
2230
2231 iwl4965_rs_get_tbl_info_from_mcs(rate, priv->band, tbl, &rate_idx);
2232 if (!iwl4965_rs_is_valid_ant(valid_tx_ant, tbl->ant_type))
2233 iwl4965_rs_toggle_antenna(valid_tx_ant, &rate, tbl);
2234
2235 rate = iwl4965_rate_n_flags_from_tbl(priv, tbl, rate_idx, use_green);
2236 tbl->current_rate = rate;
2237 iwl4965_rs_set_expected_tpt_table(lq_sta, tbl);
2238 iwl4965_rs_fill_link_cmd(NULL, lq_sta, rate);
2239 priv->stations[lq_sta->lq.sta_id].lq = &lq_sta->lq;
2240 iwl_legacy_send_lq_cmd(priv, ctx, &lq_sta->lq, CMD_SYNC, true);
2241}
2242
2243static void
2244iwl4965_rs_get_rate(void *priv_r, struct ieee80211_sta *sta, void *priv_sta,
2245 struct ieee80211_tx_rate_control *txrc)
2246{
2247
2248 struct sk_buff *skb = txrc->skb;
2249 struct ieee80211_supported_band *sband = txrc->sband;
2250 struct iwl_priv *priv __maybe_unused = (struct iwl_priv *)priv_r;
2251 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
2252 struct iwl_lq_sta *lq_sta = priv_sta;
2253 int rate_idx;
2254
2255 IWL_DEBUG_RATE_LIMIT(priv, "rate scale calculate new rate for skb\n");
2256
2257 /* Get max rate if user set max rate */
2258 if (lq_sta) {
2259 lq_sta->max_rate_idx = txrc->max_rate_idx;
2260 if ((sband->band == IEEE80211_BAND_5GHZ) &&
2261 (lq_sta->max_rate_idx != -1))
2262 lq_sta->max_rate_idx += IWL_FIRST_OFDM_RATE;
2263 if ((lq_sta->max_rate_idx < 0) ||
2264 (lq_sta->max_rate_idx >= IWL_RATE_COUNT))
2265 lq_sta->max_rate_idx = -1;
2266 }
2267
2268 /* Treat uninitialized rate scaling data same as non-existing. */
2269 if (lq_sta && !lq_sta->drv) {
2270 IWL_DEBUG_RATE(priv, "Rate scaling not initialized yet.\n");
2271 priv_sta = NULL;
2272 }
2273
2274 /* Send management frames and NO_ACK data using lowest rate. */
2275 if (rate_control_send_low(sta, priv_sta, txrc))
2276 return;
2277
2278 rate_idx = lq_sta->last_txrate_idx;
2279
2280 if (lq_sta->last_rate_n_flags & RATE_MCS_HT_MSK) {
2281 rate_idx -= IWL_FIRST_OFDM_RATE;
2282 /* 6M and 9M shared same MCS index */
2283 rate_idx = (rate_idx > 0) ? (rate_idx - 1) : 0;
2284 if (iwl4965_rs_extract_rate(lq_sta->last_rate_n_flags) >=
2285 IWL_RATE_MIMO2_6M_PLCP)
2286 rate_idx = rate_idx + MCS_INDEX_PER_STREAM;
2287 info->control.rates[0].flags = IEEE80211_TX_RC_MCS;
2288 if (lq_sta->last_rate_n_flags & RATE_MCS_SGI_MSK)
2289 info->control.rates[0].flags |=
2290 IEEE80211_TX_RC_SHORT_GI;
2291 if (lq_sta->last_rate_n_flags & RATE_MCS_DUP_MSK)
2292 info->control.rates[0].flags |=
2293 IEEE80211_TX_RC_DUP_DATA;
2294 if (lq_sta->last_rate_n_flags & RATE_MCS_HT40_MSK)
2295 info->control.rates[0].flags |=
2296 IEEE80211_TX_RC_40_MHZ_WIDTH;
2297 if (lq_sta->last_rate_n_flags & RATE_MCS_GF_MSK)
2298 info->control.rates[0].flags |=
2299 IEEE80211_TX_RC_GREEN_FIELD;
2300 } else {
2301 /* Check for invalid rates */
2302 if ((rate_idx < 0) || (rate_idx >= IWL_RATE_COUNT_LEGACY) ||
2303 ((sband->band == IEEE80211_BAND_5GHZ) &&
2304 (rate_idx < IWL_FIRST_OFDM_RATE)))
2305 rate_idx = rate_lowest_index(sband, sta);
2306 /* On valid 5 GHz rate, adjust index */
2307 else if (sband->band == IEEE80211_BAND_5GHZ)
2308 rate_idx -= IWL_FIRST_OFDM_RATE;
2309 info->control.rates[0].flags = 0;
2310 }
2311 info->control.rates[0].idx = rate_idx;
2312
2313}
2314
2315static void *iwl4965_rs_alloc_sta(void *priv_rate, struct ieee80211_sta *sta,
2316 gfp_t gfp)
2317{
2318 struct iwl_lq_sta *lq_sta;
2319 struct iwl_station_priv *sta_priv =
2320 (struct iwl_station_priv *) sta->drv_priv;
2321 struct iwl_priv *priv;
2322
2323 priv = (struct iwl_priv *)priv_rate;
2324 IWL_DEBUG_RATE(priv, "create station rate scale window\n");
2325
2326 lq_sta = &sta_priv->lq_sta;
2327
2328 return lq_sta;
2329}
2330
2331/*
2332 * Called after adding a new station to initialize rate scaling
2333 */
2334void
2335iwl4965_rs_rate_init(struct iwl_priv *priv,
2336 struct ieee80211_sta *sta,
2337 u8 sta_id)
2338{
2339 int i, j;
2340 struct ieee80211_hw *hw = priv->hw;
2341 struct ieee80211_conf *conf = &priv->hw->conf;
2342 struct ieee80211_sta_ht_cap *ht_cap = &sta->ht_cap;
2343 struct iwl_station_priv *sta_priv;
2344 struct iwl_lq_sta *lq_sta;
2345 struct ieee80211_supported_band *sband;
2346
2347 sta_priv = (struct iwl_station_priv *) sta->drv_priv;
2348 lq_sta = &sta_priv->lq_sta;
2349 sband = hw->wiphy->bands[conf->channel->band];
2350
2351
2352 lq_sta->lq.sta_id = sta_id;
2353
2354 for (j = 0; j < LQ_SIZE; j++)
2355 for (i = 0; i < IWL_RATE_COUNT; i++)
2356 iwl4965_rs_rate_scale_clear_window(
2357 &lq_sta->lq_info[j].win[i]);
2358
2359 lq_sta->flush_timer = 0;
2360 lq_sta->supp_rates = sta->supp_rates[sband->band];
2361 for (j = 0; j < LQ_SIZE; j++)
2362 for (i = 0; i < IWL_RATE_COUNT; i++)
2363 iwl4965_rs_rate_scale_clear_window(
2364 &lq_sta->lq_info[j].win[i]);
2365
2366 IWL_DEBUG_RATE(priv, "LQ:"
2367 "*** rate scale station global init for station %d ***\n",
2368 sta_id);
2369 /* TODO: what is a good starting rate for STA? About middle? Maybe not
2370 * the lowest or the highest rate.. Could consider using RSSI from
2371 * previous packets? Need to have IEEE 802.1X auth succeed immediately
2372 * after assoc.. */
2373
2374 lq_sta->is_dup = 0;
2375 lq_sta->max_rate_idx = -1;
2376 lq_sta->missed_rate_counter = IWL_MISSED_RATE_MAX;
2377 lq_sta->is_green = iwl4965_rs_use_green(sta);
2378 lq_sta->active_legacy_rate = priv->active_rate & ~(0x1000);
2379 lq_sta->band = priv->band;
2380 /*
2381 * active_siso_rate mask includes 9 MBits (bit 5), and CCK (bits 0-3),
2382 * supp_rates[] does not; shift to convert format, force 9 MBits off.
2383 */
2384 lq_sta->active_siso_rate = ht_cap->mcs.rx_mask[0] << 1;
2385 lq_sta->active_siso_rate |= ht_cap->mcs.rx_mask[0] & 0x1;
2386 lq_sta->active_siso_rate &= ~((u16)0x2);
2387 lq_sta->active_siso_rate <<= IWL_FIRST_OFDM_RATE;
2388
2389 /* Same here */
2390 lq_sta->active_mimo2_rate = ht_cap->mcs.rx_mask[1] << 1;
2391 lq_sta->active_mimo2_rate |= ht_cap->mcs.rx_mask[1] & 0x1;
2392 lq_sta->active_mimo2_rate &= ~((u16)0x2);
2393 lq_sta->active_mimo2_rate <<= IWL_FIRST_OFDM_RATE;
2394
2395 /* These values will be overridden later */
2396 lq_sta->lq.general_params.single_stream_ant_msk =
2397 iwl4965_first_antenna(priv->hw_params.valid_tx_ant);
2398 lq_sta->lq.general_params.dual_stream_ant_msk =
2399 priv->hw_params.valid_tx_ant &
2400 ~iwl4965_first_antenna(priv->hw_params.valid_tx_ant);
2401 if (!lq_sta->lq.general_params.dual_stream_ant_msk) {
2402 lq_sta->lq.general_params.dual_stream_ant_msk = ANT_AB;
2403 } else if (iwl4965_num_of_ant(priv->hw_params.valid_tx_ant) == 2) {
2404 lq_sta->lq.general_params.dual_stream_ant_msk =
2405 priv->hw_params.valid_tx_ant;
2406 }
2407
2408 /* as default allow aggregation for all tids */
2409 lq_sta->tx_agg_tid_en = IWL_AGG_ALL_TID;
2410 lq_sta->drv = priv;
2411
2412 /* Set last_txrate_idx to lowest rate */
2413 lq_sta->last_txrate_idx = rate_lowest_index(sband, sta);
2414 if (sband->band == IEEE80211_BAND_5GHZ)
2415 lq_sta->last_txrate_idx += IWL_FIRST_OFDM_RATE;
2416 lq_sta->is_agg = 0;
2417
2418#ifdef CONFIG_MAC80211_DEBUGFS
2419 lq_sta->dbg_fixed_rate = 0;
2420#endif
2421
2422 iwl4965_rs_initialize_lq(priv, conf, sta, lq_sta);
2423}
2424
2425static void iwl4965_rs_fill_link_cmd(struct iwl_priv *priv,
2426 struct iwl_lq_sta *lq_sta, u32 new_rate)
2427{
2428 struct iwl_scale_tbl_info tbl_type;
2429 int index = 0;
2430 int rate_idx;
2431 int repeat_rate = 0;
2432 u8 ant_toggle_cnt = 0;
2433 u8 use_ht_possible = 1;
2434 u8 valid_tx_ant = 0;
2435 struct iwl_link_quality_cmd *lq_cmd = &lq_sta->lq;
2436
2437 /* Override starting rate (index 0) if needed for debug purposes */
2438 iwl4965_rs_dbgfs_set_mcs(lq_sta, &new_rate, index);
2439
2440 /* Interpret new_rate (rate_n_flags) */
2441 iwl4965_rs_get_tbl_info_from_mcs(new_rate, lq_sta->band,
2442 &tbl_type, &rate_idx);
2443
2444 /* How many times should we repeat the initial rate? */
2445 if (is_legacy(tbl_type.lq_type)) {
2446 ant_toggle_cnt = 1;
2447 repeat_rate = IWL_NUMBER_TRY;
2448 } else {
2449 repeat_rate = IWL_HT_NUMBER_TRY;
2450 }
2451
2452 lq_cmd->general_params.mimo_delimiter =
2453 is_mimo(tbl_type.lq_type) ? 1 : 0;
2454
2455 /* Fill 1st table entry (index 0) */
2456 lq_cmd->rs_table[index].rate_n_flags = cpu_to_le32(new_rate);
2457
2458 if (iwl4965_num_of_ant(tbl_type.ant_type) == 1) {
2459 lq_cmd->general_params.single_stream_ant_msk =
2460 tbl_type.ant_type;
2461 } else if (iwl4965_num_of_ant(tbl_type.ant_type) == 2) {
2462 lq_cmd->general_params.dual_stream_ant_msk =
2463 tbl_type.ant_type;
2464 } /* otherwise we don't modify the existing value */
2465
2466 index++;
2467 repeat_rate--;
2468 if (priv)
2469 valid_tx_ant = priv->hw_params.valid_tx_ant;
2470
2471 /* Fill rest of rate table */
2472 while (index < LINK_QUAL_MAX_RETRY_NUM) {
2473 /* Repeat initial/next rate.
2474 * For legacy IWL_NUMBER_TRY == 1, this loop will not execute.
2475 * For HT IWL_HT_NUMBER_TRY == 3, this executes twice. */
2476 while (repeat_rate > 0 && (index < LINK_QUAL_MAX_RETRY_NUM)) {
2477 if (is_legacy(tbl_type.lq_type)) {
2478 if (ant_toggle_cnt < NUM_TRY_BEFORE_ANT_TOGGLE)
2479 ant_toggle_cnt++;
2480 else if (priv &&
2481 iwl4965_rs_toggle_antenna(valid_tx_ant,
2482 &new_rate, &tbl_type))
2483 ant_toggle_cnt = 1;
2484 }
2485
2486 /* Override next rate if needed for debug purposes */
2487 iwl4965_rs_dbgfs_set_mcs(lq_sta, &new_rate, index);
2488
2489 /* Fill next table entry */
2490 lq_cmd->rs_table[index].rate_n_flags =
2491 cpu_to_le32(new_rate);
2492 repeat_rate--;
2493 index++;
2494 }
2495
2496 iwl4965_rs_get_tbl_info_from_mcs(new_rate,
2497 lq_sta->band, &tbl_type,
2498 &rate_idx);
2499
2500 /* Indicate to uCode which entries might be MIMO.
2501 * If initial rate was MIMO, this will finally end up
2502 * as (IWL_HT_NUMBER_TRY * 2), after 2nd pass, otherwise 0. */
2503 if (is_mimo(tbl_type.lq_type))
2504 lq_cmd->general_params.mimo_delimiter = index;
2505
2506 /* Get next rate */
2507 new_rate = iwl4965_rs_get_lower_rate(lq_sta,
2508 &tbl_type, rate_idx,
2509 use_ht_possible);
2510
2511 /* How many times should we repeat the next rate? */
2512 if (is_legacy(tbl_type.lq_type)) {
2513 if (ant_toggle_cnt < NUM_TRY_BEFORE_ANT_TOGGLE)
2514 ant_toggle_cnt++;
2515 else if (priv &&
2516 iwl4965_rs_toggle_antenna(valid_tx_ant,
2517 &new_rate, &tbl_type))
2518 ant_toggle_cnt = 1;
2519
2520 repeat_rate = IWL_NUMBER_TRY;
2521 } else {
2522 repeat_rate = IWL_HT_NUMBER_TRY;
2523 }
2524
2525 /* Don't allow HT rates after next pass.
2526 * iwl4965_rs_get_lower_rate() will change type to LQ_A or LQ_G. */
2527 use_ht_possible = 0;
2528
2529 /* Override next rate if needed for debug purposes */
2530 iwl4965_rs_dbgfs_set_mcs(lq_sta, &new_rate, index);
2531
2532 /* Fill next table entry */
2533 lq_cmd->rs_table[index].rate_n_flags = cpu_to_le32(new_rate);
2534
2535 index++;
2536 repeat_rate--;
2537 }
2538
2539 lq_cmd->agg_params.agg_frame_cnt_limit = LINK_QUAL_AGG_FRAME_LIMIT_DEF;
2540 lq_cmd->agg_params.agg_dis_start_th = LINK_QUAL_AGG_DISABLE_START_DEF;
2541
2542 lq_cmd->agg_params.agg_time_limit =
2543 cpu_to_le16(LINK_QUAL_AGG_TIME_LIMIT_DEF);
2544}
2545
2546static void
2547*iwl4965_rs_alloc(struct ieee80211_hw *hw, struct dentry *debugfsdir)
2548{
2549 return hw->priv;
2550}
2551/* rate scale requires free function to be implemented */
2552static void iwl4965_rs_free(void *priv_rate)
2553{
2554 return;
2555}
2556
2557static void iwl4965_rs_free_sta(void *priv_r, struct ieee80211_sta *sta,
2558 void *priv_sta)
2559{
2560 struct iwl_priv *priv __maybe_unused = priv_r;
2561
2562 IWL_DEBUG_RATE(priv, "enter\n");
2563 IWL_DEBUG_RATE(priv, "leave\n");
2564}
2565
2566
2567#ifdef CONFIG_MAC80211_DEBUGFS
2568static int iwl4965_open_file_generic(struct inode *inode, struct file *file)
2569{
2570 file->private_data = inode->i_private;
2571 return 0;
2572}
2573static void iwl4965_rs_dbgfs_set_mcs(struct iwl_lq_sta *lq_sta,
2574 u32 *rate_n_flags, int index)
2575{
2576 struct iwl_priv *priv;
2577 u8 valid_tx_ant;
2578 u8 ant_sel_tx;
2579
2580 priv = lq_sta->drv;
2581 valid_tx_ant = priv->hw_params.valid_tx_ant;
2582 if (lq_sta->dbg_fixed_rate) {
2583 ant_sel_tx =
2584 ((lq_sta->dbg_fixed_rate & RATE_MCS_ANT_ABC_MSK)
2585 >> RATE_MCS_ANT_POS);
2586 if ((valid_tx_ant & ant_sel_tx) == ant_sel_tx) {
2587 *rate_n_flags = lq_sta->dbg_fixed_rate;
2588 IWL_DEBUG_RATE(priv, "Fixed rate ON\n");
2589 } else {
2590 lq_sta->dbg_fixed_rate = 0;
2591 IWL_ERR(priv,
2592 "Invalid antenna selection 0x%X, Valid is 0x%X\n",
2593 ant_sel_tx, valid_tx_ant);
2594 IWL_DEBUG_RATE(priv, "Fixed rate OFF\n");
2595 }
2596 } else {
2597 IWL_DEBUG_RATE(priv, "Fixed rate OFF\n");
2598 }
2599}
2600
2601static ssize_t iwl4965_rs_sta_dbgfs_scale_table_write(struct file *file,
2602 const char __user *user_buf, size_t count, loff_t *ppos)
2603{
2604 struct iwl_lq_sta *lq_sta = file->private_data;
2605 struct iwl_priv *priv;
2606 char buf[64];
2607 int buf_size;
2608 u32 parsed_rate;
2609 struct iwl_station_priv *sta_priv =
2610 container_of(lq_sta, struct iwl_station_priv, lq_sta);
2611 struct iwl_rxon_context *ctx = sta_priv->common.ctx;
2612
2613 priv = lq_sta->drv;
2614 memset(buf, 0, sizeof(buf));
2615 buf_size = min(count, sizeof(buf) - 1);
2616 if (copy_from_user(buf, user_buf, buf_size))
2617 return -EFAULT;
2618
2619 if (sscanf(buf, "%x", &parsed_rate) == 1)
2620 lq_sta->dbg_fixed_rate = parsed_rate;
2621 else
2622 lq_sta->dbg_fixed_rate = 0;
2623
2624 lq_sta->active_legacy_rate = 0x0FFF; /* 1 - 54 MBits, includes CCK */
2625 lq_sta->active_siso_rate = 0x1FD0; /* 6 - 60 MBits, no 9, no CCK */
2626 lq_sta->active_mimo2_rate = 0x1FD0; /* 6 - 60 MBits, no 9, no CCK */
2627
2628 IWL_DEBUG_RATE(priv, "sta_id %d rate 0x%X\n",
2629 lq_sta->lq.sta_id, lq_sta->dbg_fixed_rate);
2630
2631 if (lq_sta->dbg_fixed_rate) {
2632 iwl4965_rs_fill_link_cmd(NULL, lq_sta, lq_sta->dbg_fixed_rate);
2633 iwl_legacy_send_lq_cmd(lq_sta->drv, ctx, &lq_sta->lq, CMD_ASYNC,
2634 false);
2635 }
2636
2637 return count;
2638}
2639
2640static ssize_t iwl4965_rs_sta_dbgfs_scale_table_read(struct file *file,
2641 char __user *user_buf, size_t count, loff_t *ppos)
2642{
2643 char *buff;
2644 int desc = 0;
2645 int i = 0;
2646 int index = 0;
2647 ssize_t ret;
2648
2649 struct iwl_lq_sta *lq_sta = file->private_data;
2650 struct iwl_priv *priv;
2651 struct iwl_scale_tbl_info *tbl = &(lq_sta->lq_info[lq_sta->active_tbl]);
2652
2653 priv = lq_sta->drv;
2654 buff = kmalloc(1024, GFP_KERNEL);
2655 if (!buff)
2656 return -ENOMEM;
2657
2658 desc += sprintf(buff+desc, "sta_id %d\n", lq_sta->lq.sta_id);
2659 desc += sprintf(buff+desc, "failed=%d success=%d rate=0%X\n",
2660 lq_sta->total_failed, lq_sta->total_success,
2661 lq_sta->active_legacy_rate);
2662 desc += sprintf(buff+desc, "fixed rate 0x%X\n",
2663 lq_sta->dbg_fixed_rate);
2664 desc += sprintf(buff+desc, "valid_tx_ant %s%s%s\n",
2665 (priv->hw_params.valid_tx_ant & ANT_A) ? "ANT_A," : "",
2666 (priv->hw_params.valid_tx_ant & ANT_B) ? "ANT_B," : "",
2667 (priv->hw_params.valid_tx_ant & ANT_C) ? "ANT_C" : "");
2668 desc += sprintf(buff+desc, "lq type %s\n",
2669 (is_legacy(tbl->lq_type)) ? "legacy" : "HT");
2670 if (is_Ht(tbl->lq_type)) {
2671 desc += sprintf(buff+desc, " %s",
2672 (is_siso(tbl->lq_type)) ? "SISO" : "MIMO2");
2673 desc += sprintf(buff+desc, " %s",
2674 (tbl->is_ht40) ? "40MHz" : "20MHz");
2675 desc += sprintf(buff+desc, " %s %s %s\n",
2676 (tbl->is_SGI) ? "SGI" : "",
2677 (lq_sta->is_green) ? "GF enabled" : "",
2678 (lq_sta->is_agg) ? "AGG on" : "");
2679 }
2680 desc += sprintf(buff+desc, "last tx rate=0x%X\n",
2681 lq_sta->last_rate_n_flags);
2682 desc += sprintf(buff+desc, "general:"
2683 "flags=0x%X mimo-d=%d s-ant0x%x d-ant=0x%x\n",
2684 lq_sta->lq.general_params.flags,
2685 lq_sta->lq.general_params.mimo_delimiter,
2686 lq_sta->lq.general_params.single_stream_ant_msk,
2687 lq_sta->lq.general_params.dual_stream_ant_msk);
2688
2689 desc += sprintf(buff+desc, "agg:"
2690 "time_limit=%d dist_start_th=%d frame_cnt_limit=%d\n",
2691 le16_to_cpu(lq_sta->lq.agg_params.agg_time_limit),
2692 lq_sta->lq.agg_params.agg_dis_start_th,
2693 lq_sta->lq.agg_params.agg_frame_cnt_limit);
2694
2695 desc += sprintf(buff+desc,
2696 "Start idx [0]=0x%x [1]=0x%x [2]=0x%x [3]=0x%x\n",
2697 lq_sta->lq.general_params.start_rate_index[0],
2698 lq_sta->lq.general_params.start_rate_index[1],
2699 lq_sta->lq.general_params.start_rate_index[2],
2700 lq_sta->lq.general_params.start_rate_index[3]);
2701
2702 for (i = 0; i < LINK_QUAL_MAX_RETRY_NUM; i++) {
2703 index = iwl4965_hwrate_to_plcp_idx(
2704 le32_to_cpu(lq_sta->lq.rs_table[i].rate_n_flags));
2705 if (is_legacy(tbl->lq_type)) {
2706 desc += sprintf(buff+desc, " rate[%d] 0x%X %smbps\n",
2707 i,
2708 le32_to_cpu(lq_sta->lq.rs_table[i].rate_n_flags),
2709 iwl_rate_mcs[index].mbps);
2710 } else {
2711 desc += sprintf(buff+desc,
2712 " rate[%d] 0x%X %smbps (%s)\n",
2713 i,
2714 le32_to_cpu(lq_sta->lq.rs_table[i].rate_n_flags),
2715 iwl_rate_mcs[index].mbps, iwl_rate_mcs[index].mcs);
2716 }
2717 }
2718
2719 ret = simple_read_from_buffer(user_buf, count, ppos, buff, desc);
2720 kfree(buff);
2721 return ret;
2722}
2723
2724static const struct file_operations rs_sta_dbgfs_scale_table_ops = {
2725 .write = iwl4965_rs_sta_dbgfs_scale_table_write,
2726 .read = iwl4965_rs_sta_dbgfs_scale_table_read,
2727 .open = iwl4965_open_file_generic,
2728 .llseek = default_llseek,
2729};
2730static ssize_t iwl4965_rs_sta_dbgfs_stats_table_read(struct file *file,
2731 char __user *user_buf, size_t count, loff_t *ppos)
2732{
2733 char *buff;
2734 int desc = 0;
2735 int i, j;
2736 ssize_t ret;
2737
2738 struct iwl_lq_sta *lq_sta = file->private_data;
2739
2740 buff = kmalloc(1024, GFP_KERNEL);
2741 if (!buff)
2742 return -ENOMEM;
2743
2744 for (i = 0; i < LQ_SIZE; i++) {
2745 desc += sprintf(buff+desc,
2746 "%s type=%d SGI=%d HT40=%d DUP=%d GF=%d\n"
2747 "rate=0x%X\n",
2748 lq_sta->active_tbl == i ? "*" : "x",
2749 lq_sta->lq_info[i].lq_type,
2750 lq_sta->lq_info[i].is_SGI,
2751 lq_sta->lq_info[i].is_ht40,
2752 lq_sta->lq_info[i].is_dup,
2753 lq_sta->is_green,
2754 lq_sta->lq_info[i].current_rate);
2755 for (j = 0; j < IWL_RATE_COUNT; j++) {
2756 desc += sprintf(buff+desc,
2757 "counter=%d success=%d %%=%d\n",
2758 lq_sta->lq_info[i].win[j].counter,
2759 lq_sta->lq_info[i].win[j].success_counter,
2760 lq_sta->lq_info[i].win[j].success_ratio);
2761 }
2762 }
2763 ret = simple_read_from_buffer(user_buf, count, ppos, buff, desc);
2764 kfree(buff);
2765 return ret;
2766}
2767
2768static const struct file_operations rs_sta_dbgfs_stats_table_ops = {
2769 .read = iwl4965_rs_sta_dbgfs_stats_table_read,
2770 .open = iwl4965_open_file_generic,
2771 .llseek = default_llseek,
2772};
2773
2774static ssize_t iwl4965_rs_sta_dbgfs_rate_scale_data_read(struct file *file,
2775 char __user *user_buf, size_t count, loff_t *ppos)
2776{
2777 char buff[120];
2778 int desc = 0;
2779 ssize_t ret;
2780
2781 struct iwl_lq_sta *lq_sta = file->private_data;
2782 struct iwl_priv *priv;
2783 struct iwl_scale_tbl_info *tbl = &lq_sta->lq_info[lq_sta->active_tbl];
2784
2785 priv = lq_sta->drv;
2786
2787 if (is_Ht(tbl->lq_type))
2788 desc += sprintf(buff+desc,
2789 "Bit Rate= %d Mb/s\n",
2790 tbl->expected_tpt[lq_sta->last_txrate_idx]);
2791 else
2792 desc += sprintf(buff+desc,
2793 "Bit Rate= %d Mb/s\n",
2794 iwlegacy_rates[lq_sta->last_txrate_idx].ieee >> 1);
2795
2796 ret = simple_read_from_buffer(user_buf, count, ppos, buff, desc);
2797 return ret;
2798}
2799
2800static const struct file_operations rs_sta_dbgfs_rate_scale_data_ops = {
2801 .read = iwl4965_rs_sta_dbgfs_rate_scale_data_read,
2802 .open = iwl4965_open_file_generic,
2803 .llseek = default_llseek,
2804};
2805
2806static void iwl4965_rs_add_debugfs(void *priv, void *priv_sta,
2807 struct dentry *dir)
2808{
2809 struct iwl_lq_sta *lq_sta = priv_sta;
2810 lq_sta->rs_sta_dbgfs_scale_table_file =
2811 debugfs_create_file("rate_scale_table", S_IRUSR | S_IWUSR, dir,
2812 lq_sta, &rs_sta_dbgfs_scale_table_ops);
2813 lq_sta->rs_sta_dbgfs_stats_table_file =
2814 debugfs_create_file("rate_stats_table", S_IRUSR, dir,
2815 lq_sta, &rs_sta_dbgfs_stats_table_ops);
2816 lq_sta->rs_sta_dbgfs_rate_scale_data_file =
2817 debugfs_create_file("rate_scale_data", S_IRUSR, dir,
2818 lq_sta, &rs_sta_dbgfs_rate_scale_data_ops);
2819 lq_sta->rs_sta_dbgfs_tx_agg_tid_en_file =
2820 debugfs_create_u8("tx_agg_tid_enable", S_IRUSR | S_IWUSR, dir,
2821 &lq_sta->tx_agg_tid_en);
2822
2823}
2824
2825static void iwl4965_rs_remove_debugfs(void *priv, void *priv_sta)
2826{
2827 struct iwl_lq_sta *lq_sta = priv_sta;
2828 debugfs_remove(lq_sta->rs_sta_dbgfs_scale_table_file);
2829 debugfs_remove(lq_sta->rs_sta_dbgfs_stats_table_file);
2830 debugfs_remove(lq_sta->rs_sta_dbgfs_rate_scale_data_file);
2831 debugfs_remove(lq_sta->rs_sta_dbgfs_tx_agg_tid_en_file);
2832}
2833#endif
2834
2835/*
2836 * Initialization of rate scaling information is done by driver after
2837 * the station is added. Since mac80211 calls this function before a
2838 * station is added we ignore it.
2839 */
2840static void
2841iwl4965_rs_rate_init_stub(void *priv_r, struct ieee80211_supported_band *sband,
2842 struct ieee80211_sta *sta, void *priv_sta)
2843{
2844}
2845static struct rate_control_ops rs_4965_ops = {
2846 .module = NULL,
2847 .name = IWL4965_RS_NAME,
2848 .tx_status = iwl4965_rs_tx_status,
2849 .get_rate = iwl4965_rs_get_rate,
2850 .rate_init = iwl4965_rs_rate_init_stub,
2851 .alloc = iwl4965_rs_alloc,
2852 .free = iwl4965_rs_free,
2853 .alloc_sta = iwl4965_rs_alloc_sta,
2854 .free_sta = iwl4965_rs_free_sta,
2855#ifdef CONFIG_MAC80211_DEBUGFS
2856 .add_sta_debugfs = iwl4965_rs_add_debugfs,
2857 .remove_sta_debugfs = iwl4965_rs_remove_debugfs,
2858#endif
2859};
2860
2861int iwl4965_rate_control_register(void)
2862{
2863 pr_err("Registering 4965 rate control operations\n");
2864 return ieee80211_rate_control_register(&rs_4965_ops);
2865}
2866
2867void iwl4965_rate_control_unregister(void)
2868{
2869 ieee80211_rate_control_unregister(&rs_4965_ops);
2870}
diff --git a/drivers/net/wireless/iwlegacy/iwl-4965-rx.c b/drivers/net/wireless/iwlegacy/iwl-4965-rx.c
new file mode 100644
index 000000000000..b9fa2f6411a7
--- /dev/null
+++ b/drivers/net/wireless/iwlegacy/iwl-4965-rx.c
@@ -0,0 +1,291 @@
1/******************************************************************************
2 *
3 * GPL LICENSE SUMMARY
4 *
5 * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of version 2 of the GNU General Public License as
9 * published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful, but
12 * WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
19 * USA
20 *
21 * The full GNU General Public License is included in this distribution
22 * in the file called LICENSE.GPL.
23 *
24 * Contact Information:
25 * Intel Linux Wireless <ilw@linux.intel.com>
26 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
27 *
28 *****************************************************************************/
29
30#include <linux/kernel.h>
31#include <linux/module.h>
32#include <linux/init.h>
33#include <linux/sched.h>
34
35#include "iwl-dev.h"
36#include "iwl-core.h"
37#include "iwl-4965-calib.h"
38#include "iwl-sta.h"
39#include "iwl-io.h"
40#include "iwl-helpers.h"
41#include "iwl-4965-hw.h"
42#include "iwl-4965.h"
43
44void iwl4965_rx_missed_beacon_notif(struct iwl_priv *priv,
45 struct iwl_rx_mem_buffer *rxb)
46
47{
48 struct iwl_rx_packet *pkt = rxb_addr(rxb);
49 struct iwl_missed_beacon_notif *missed_beacon;
50
51 missed_beacon = &pkt->u.missed_beacon;
52 if (le32_to_cpu(missed_beacon->consecutive_missed_beacons) >
53 priv->missed_beacon_threshold) {
54 IWL_DEBUG_CALIB(priv,
55 "missed bcn cnsq %d totl %d rcd %d expctd %d\n",
56 le32_to_cpu(missed_beacon->consecutive_missed_beacons),
57 le32_to_cpu(missed_beacon->total_missed_becons),
58 le32_to_cpu(missed_beacon->num_recvd_beacons),
59 le32_to_cpu(missed_beacon->num_expected_beacons));
60 if (!test_bit(STATUS_SCANNING, &priv->status))
61 iwl4965_init_sensitivity(priv);
62 }
63}
64
65/* Calculate noise level, based on measurements during network silence just
66 * before arriving beacon. This measurement can be done only if we know
67 * exactly when to expect beacons, therefore only when we're associated. */
68static void iwl4965_rx_calc_noise(struct iwl_priv *priv)
69{
70 struct statistics_rx_non_phy *rx_info;
71 int num_active_rx = 0;
72 int total_silence = 0;
73 int bcn_silence_a, bcn_silence_b, bcn_silence_c;
74 int last_rx_noise;
75
76 rx_info = &(priv->_4965.statistics.rx.general);
77 bcn_silence_a =
78 le32_to_cpu(rx_info->beacon_silence_rssi_a) & IN_BAND_FILTER;
79 bcn_silence_b =
80 le32_to_cpu(rx_info->beacon_silence_rssi_b) & IN_BAND_FILTER;
81 bcn_silence_c =
82 le32_to_cpu(rx_info->beacon_silence_rssi_c) & IN_BAND_FILTER;
83
84 if (bcn_silence_a) {
85 total_silence += bcn_silence_a;
86 num_active_rx++;
87 }
88 if (bcn_silence_b) {
89 total_silence += bcn_silence_b;
90 num_active_rx++;
91 }
92 if (bcn_silence_c) {
93 total_silence += bcn_silence_c;
94 num_active_rx++;
95 }
96
97 /* Average among active antennas */
98 if (num_active_rx)
99 last_rx_noise = (total_silence / num_active_rx) - 107;
100 else
101 last_rx_noise = IWL_NOISE_MEAS_NOT_AVAILABLE;
102
103 IWL_DEBUG_CALIB(priv, "inband silence a %u, b %u, c %u, dBm %d\n",
104 bcn_silence_a, bcn_silence_b, bcn_silence_c,
105 last_rx_noise);
106}
107
108#ifdef CONFIG_IWLWIFI_LEGACY_DEBUGFS
109/*
110 * based on the assumption of all statistics counter are in DWORD
111 * FIXME: This function is for debugging, do not deal with
112 * the case of counters roll-over.
113 */
114static void iwl4965_accumulative_statistics(struct iwl_priv *priv,
115 __le32 *stats)
116{
117 int i, size;
118 __le32 *prev_stats;
119 u32 *accum_stats;
120 u32 *delta, *max_delta;
121 struct statistics_general_common *general, *accum_general;
122 struct statistics_tx *tx, *accum_tx;
123
124 prev_stats = (__le32 *)&priv->_4965.statistics;
125 accum_stats = (u32 *)&priv->_4965.accum_statistics;
126 size = sizeof(struct iwl_notif_statistics);
127 general = &priv->_4965.statistics.general.common;
128 accum_general = &priv->_4965.accum_statistics.general.common;
129 tx = &priv->_4965.statistics.tx;
130 accum_tx = &priv->_4965.accum_statistics.tx;
131 delta = (u32 *)&priv->_4965.delta_statistics;
132 max_delta = (u32 *)&priv->_4965.max_delta;
133
134 for (i = sizeof(__le32); i < size;
135 i += sizeof(__le32), stats++, prev_stats++, delta++,
136 max_delta++, accum_stats++) {
137 if (le32_to_cpu(*stats) > le32_to_cpu(*prev_stats)) {
138 *delta = (le32_to_cpu(*stats) -
139 le32_to_cpu(*prev_stats));
140 *accum_stats += *delta;
141 if (*delta > *max_delta)
142 *max_delta = *delta;
143 }
144 }
145
146 /* reset accumulative statistics for "no-counter" type statistics */
147 accum_general->temperature = general->temperature;
148 accum_general->ttl_timestamp = general->ttl_timestamp;
149}
150#endif
151
152#define REG_RECALIB_PERIOD (60)
153
154/**
155 * iwl4965_good_plcp_health - checks for plcp error.
156 *
157 * When the plcp error is exceeding the thresholds, reset the radio
158 * to improve the throughput.
159 */
160bool iwl4965_good_plcp_health(struct iwl_priv *priv,
161 struct iwl_rx_packet *pkt)
162{
163 bool rc = true;
164 int combined_plcp_delta;
165 unsigned int plcp_msec;
166 unsigned long plcp_received_jiffies;
167
168 if (priv->cfg->base_params->plcp_delta_threshold ==
169 IWL_MAX_PLCP_ERR_THRESHOLD_DISABLE) {
170 IWL_DEBUG_RADIO(priv, "plcp_err check disabled\n");
171 return rc;
172 }
173
174 /*
175 * check for plcp_err and trigger radio reset if it exceeds
176 * the plcp error threshold plcp_delta.
177 */
178 plcp_received_jiffies = jiffies;
179 plcp_msec = jiffies_to_msecs((long) plcp_received_jiffies -
180 (long) priv->plcp_jiffies);
181 priv->plcp_jiffies = plcp_received_jiffies;
182 /*
183 * check to make sure plcp_msec is not 0 to prevent division
184 * by zero.
185 */
186 if (plcp_msec) {
187 struct statistics_rx_phy *ofdm;
188 struct statistics_rx_ht_phy *ofdm_ht;
189
190 ofdm = &pkt->u.stats.rx.ofdm;
191 ofdm_ht = &pkt->u.stats.rx.ofdm_ht;
192 combined_plcp_delta =
193 (le32_to_cpu(ofdm->plcp_err) -
194 le32_to_cpu(priv->_4965.statistics.
195 rx.ofdm.plcp_err)) +
196 (le32_to_cpu(ofdm_ht->plcp_err) -
197 le32_to_cpu(priv->_4965.statistics.
198 rx.ofdm_ht.plcp_err));
199
200 if ((combined_plcp_delta > 0) &&
201 ((combined_plcp_delta * 100) / plcp_msec) >
202 priv->cfg->base_params->plcp_delta_threshold) {
203 /*
204 * if plcp_err exceed the threshold,
205 * the following data is printed in csv format:
206 * Text: plcp_err exceeded %d,
207 * Received ofdm.plcp_err,
208 * Current ofdm.plcp_err,
209 * Received ofdm_ht.plcp_err,
210 * Current ofdm_ht.plcp_err,
211 * combined_plcp_delta,
212 * plcp_msec
213 */
214 IWL_DEBUG_RADIO(priv, "plcp_err exceeded %u, "
215 "%u, %u, %u, %u, %d, %u mSecs\n",
216 priv->cfg->base_params->plcp_delta_threshold,
217 le32_to_cpu(ofdm->plcp_err),
218 le32_to_cpu(ofdm->plcp_err),
219 le32_to_cpu(ofdm_ht->plcp_err),
220 le32_to_cpu(ofdm_ht->plcp_err),
221 combined_plcp_delta, plcp_msec);
222
223 rc = false;
224 }
225 }
226 return rc;
227}
228
229void iwl4965_rx_statistics(struct iwl_priv *priv,
230 struct iwl_rx_mem_buffer *rxb)
231{
232 int change;
233 struct iwl_rx_packet *pkt = rxb_addr(rxb);
234
235 IWL_DEBUG_RX(priv,
236 "Statistics notification received (%d vs %d).\n",
237 (int)sizeof(struct iwl_notif_statistics),
238 le32_to_cpu(pkt->len_n_flags) &
239 FH_RSCSR_FRAME_SIZE_MSK);
240
241 change = ((priv->_4965.statistics.general.common.temperature !=
242 pkt->u.stats.general.common.temperature) ||
243 ((priv->_4965.statistics.flag &
244 STATISTICS_REPLY_FLG_HT40_MODE_MSK) !=
245 (pkt->u.stats.flag &
246 STATISTICS_REPLY_FLG_HT40_MODE_MSK)));
247#ifdef CONFIG_IWLWIFI_LEGACY_DEBUGFS
248 iwl4965_accumulative_statistics(priv, (__le32 *)&pkt->u.stats);
249#endif
250
251 iwl_legacy_recover_from_statistics(priv, pkt);
252
253 memcpy(&priv->_4965.statistics, &pkt->u.stats,
254 sizeof(priv->_4965.statistics));
255
256 set_bit(STATUS_STATISTICS, &priv->status);
257
258 /* Reschedule the statistics timer to occur in
259 * REG_RECALIB_PERIOD seconds to ensure we get a
260 * thermal update even if the uCode doesn't give
261 * us one */
262 mod_timer(&priv->statistics_periodic, jiffies +
263 msecs_to_jiffies(REG_RECALIB_PERIOD * 1000));
264
265 if (unlikely(!test_bit(STATUS_SCANNING, &priv->status)) &&
266 (pkt->hdr.cmd == STATISTICS_NOTIFICATION)) {
267 iwl4965_rx_calc_noise(priv);
268 queue_work(priv->workqueue, &priv->run_time_calib_work);
269 }
270 if (priv->cfg->ops->lib->temp_ops.temperature && change)
271 priv->cfg->ops->lib->temp_ops.temperature(priv);
272}
273
274void iwl4965_reply_statistics(struct iwl_priv *priv,
275 struct iwl_rx_mem_buffer *rxb)
276{
277 struct iwl_rx_packet *pkt = rxb_addr(rxb);
278
279 if (le32_to_cpu(pkt->u.stats.flag) & UCODE_STATISTICS_CLEAR_MSK) {
280#ifdef CONFIG_IWLWIFI_LEGACY_DEBUGFS
281 memset(&priv->_4965.accum_statistics, 0,
282 sizeof(struct iwl_notif_statistics));
283 memset(&priv->_4965.delta_statistics, 0,
284 sizeof(struct iwl_notif_statistics));
285 memset(&priv->_4965.max_delta, 0,
286 sizeof(struct iwl_notif_statistics));
287#endif
288 IWL_DEBUG_RX(priv, "Statistics have been cleared\n");
289 }
290 iwl4965_rx_statistics(priv, rxb);
291}
diff --git a/drivers/net/wireless/iwlegacy/iwl-4965-sta.c b/drivers/net/wireless/iwlegacy/iwl-4965-sta.c
new file mode 100644
index 000000000000..a262c23553d2
--- /dev/null
+++ b/drivers/net/wireless/iwlegacy/iwl-4965-sta.c
@@ -0,0 +1,721 @@
1/******************************************************************************
2 *
3 * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved.
4 *
5 * Portions of this file are derived from the ipw3945 project, as well
6 * as portions of the ieee80211 subsystem header files.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of version 2 of the GNU General Public License as
10 * published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it will be useful, but WITHOUT
13 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 * more details.
16 *
17 * You should have received a copy of the GNU General Public License along with
18 * this program; if not, write to the Free Software Foundation, Inc.,
19 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
20 *
21 * The full GNU General Public License is included in this distribution in the
22 * file called LICENSE.
23 *
24 * Contact Information:
25 * Intel Linux Wireless <ilw@linux.intel.com>
26 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
27 *
28 *****************************************************************************/
29
30#include <net/mac80211.h>
31
32#include "iwl-dev.h"
33#include "iwl-core.h"
34#include "iwl-sta.h"
35#include "iwl-4965.h"
36
37static struct iwl_link_quality_cmd *
38iwl4965_sta_alloc_lq(struct iwl_priv *priv, u8 sta_id)
39{
40 int i, r;
41 struct iwl_link_quality_cmd *link_cmd;
42 u32 rate_flags = 0;
43 __le32 rate_n_flags;
44
45 link_cmd = kzalloc(sizeof(struct iwl_link_quality_cmd), GFP_KERNEL);
46 if (!link_cmd) {
47 IWL_ERR(priv, "Unable to allocate memory for LQ cmd.\n");
48 return NULL;
49 }
50 /* Set up the rate scaling to start at selected rate, fall back
51 * all the way down to 1M in IEEE order, and then spin on 1M */
52 if (priv->band == IEEE80211_BAND_5GHZ)
53 r = IWL_RATE_6M_INDEX;
54 else
55 r = IWL_RATE_1M_INDEX;
56
57 if (r >= IWL_FIRST_CCK_RATE && r <= IWL_LAST_CCK_RATE)
58 rate_flags |= RATE_MCS_CCK_MSK;
59
60 rate_flags |= iwl4965_first_antenna(priv->hw_params.valid_tx_ant) <<
61 RATE_MCS_ANT_POS;
62 rate_n_flags = iwl4965_hw_set_rate_n_flags(iwlegacy_rates[r].plcp,
63 rate_flags);
64 for (i = 0; i < LINK_QUAL_MAX_RETRY_NUM; i++)
65 link_cmd->rs_table[i].rate_n_flags = rate_n_flags;
66
67 link_cmd->general_params.single_stream_ant_msk =
68 iwl4965_first_antenna(priv->hw_params.valid_tx_ant);
69
70 link_cmd->general_params.dual_stream_ant_msk =
71 priv->hw_params.valid_tx_ant &
72 ~iwl4965_first_antenna(priv->hw_params.valid_tx_ant);
73 if (!link_cmd->general_params.dual_stream_ant_msk) {
74 link_cmd->general_params.dual_stream_ant_msk = ANT_AB;
75 } else if (iwl4965_num_of_ant(priv->hw_params.valid_tx_ant) == 2) {
76 link_cmd->general_params.dual_stream_ant_msk =
77 priv->hw_params.valid_tx_ant;
78 }
79
80 link_cmd->agg_params.agg_dis_start_th = LINK_QUAL_AGG_DISABLE_START_DEF;
81 link_cmd->agg_params.agg_time_limit =
82 cpu_to_le16(LINK_QUAL_AGG_TIME_LIMIT_DEF);
83
84 link_cmd->sta_id = sta_id;
85
86 return link_cmd;
87}
88
89/*
90 * iwl4965_add_bssid_station - Add the special IBSS BSSID station
91 *
92 * Function sleeps.
93 */
94int
95iwl4965_add_bssid_station(struct iwl_priv *priv, struct iwl_rxon_context *ctx,
96 const u8 *addr, u8 *sta_id_r)
97{
98 int ret;
99 u8 sta_id;
100 struct iwl_link_quality_cmd *link_cmd;
101 unsigned long flags;
102
103 if (sta_id_r)
104 *sta_id_r = IWL_INVALID_STATION;
105
106 ret = iwl_legacy_add_station_common(priv, ctx, addr, 0, NULL, &sta_id);
107 if (ret) {
108 IWL_ERR(priv, "Unable to add station %pM\n", addr);
109 return ret;
110 }
111
112 if (sta_id_r)
113 *sta_id_r = sta_id;
114
115 spin_lock_irqsave(&priv->sta_lock, flags);
116 priv->stations[sta_id].used |= IWL_STA_LOCAL;
117 spin_unlock_irqrestore(&priv->sta_lock, flags);
118
119 /* Set up default rate scaling table in device's station table */
120 link_cmd = iwl4965_sta_alloc_lq(priv, sta_id);
121 if (!link_cmd) {
122 IWL_ERR(priv,
123 "Unable to initialize rate scaling for station %pM.\n",
124 addr);
125 return -ENOMEM;
126 }
127
128 ret = iwl_legacy_send_lq_cmd(priv, ctx, link_cmd, CMD_SYNC, true);
129 if (ret)
130 IWL_ERR(priv, "Link quality command failed (%d)\n", ret);
131
132 spin_lock_irqsave(&priv->sta_lock, flags);
133 priv->stations[sta_id].lq = link_cmd;
134 spin_unlock_irqrestore(&priv->sta_lock, flags);
135
136 return 0;
137}
138
139static int iwl4965_static_wepkey_cmd(struct iwl_priv *priv,
140 struct iwl_rxon_context *ctx,
141 bool send_if_empty)
142{
143 int i, not_empty = 0;
144 u8 buff[sizeof(struct iwl_wep_cmd) +
145 sizeof(struct iwl_wep_key) * WEP_KEYS_MAX];
146 struct iwl_wep_cmd *wep_cmd = (struct iwl_wep_cmd *)buff;
147 size_t cmd_size = sizeof(struct iwl_wep_cmd);
148 struct iwl_host_cmd cmd = {
149 .id = ctx->wep_key_cmd,
150 .data = wep_cmd,
151 .flags = CMD_SYNC,
152 };
153
154 might_sleep();
155
156 memset(wep_cmd, 0, cmd_size +
157 (sizeof(struct iwl_wep_key) * WEP_KEYS_MAX));
158
159 for (i = 0; i < WEP_KEYS_MAX ; i++) {
160 wep_cmd->key[i].key_index = i;
161 if (ctx->wep_keys[i].key_size) {
162 wep_cmd->key[i].key_offset = i;
163 not_empty = 1;
164 } else {
165 wep_cmd->key[i].key_offset = WEP_INVALID_OFFSET;
166 }
167
168 wep_cmd->key[i].key_size = ctx->wep_keys[i].key_size;
169 memcpy(&wep_cmd->key[i].key[3], ctx->wep_keys[i].key,
170 ctx->wep_keys[i].key_size);
171 }
172
173 wep_cmd->global_key_type = WEP_KEY_WEP_TYPE;
174 wep_cmd->num_keys = WEP_KEYS_MAX;
175
176 cmd_size += sizeof(struct iwl_wep_key) * WEP_KEYS_MAX;
177
178 cmd.len = cmd_size;
179
180 if (not_empty || send_if_empty)
181 return iwl_legacy_send_cmd(priv, &cmd);
182 else
183 return 0;
184}
185
186int iwl4965_restore_default_wep_keys(struct iwl_priv *priv,
187 struct iwl_rxon_context *ctx)
188{
189 lockdep_assert_held(&priv->mutex);
190
191 return iwl4965_static_wepkey_cmd(priv, ctx, false);
192}
193
194int iwl4965_remove_default_wep_key(struct iwl_priv *priv,
195 struct iwl_rxon_context *ctx,
196 struct ieee80211_key_conf *keyconf)
197{
198 int ret;
199
200 lockdep_assert_held(&priv->mutex);
201
202 IWL_DEBUG_WEP(priv, "Removing default WEP key: idx=%d\n",
203 keyconf->keyidx);
204
205 memset(&ctx->wep_keys[keyconf->keyidx], 0, sizeof(ctx->wep_keys[0]));
206 if (iwl_legacy_is_rfkill(priv)) {
207 IWL_DEBUG_WEP(priv,
208 "Not sending REPLY_WEPKEY command due to RFKILL.\n");
209 /* but keys in device are clear anyway so return success */
210 return 0;
211 }
212 ret = iwl4965_static_wepkey_cmd(priv, ctx, 1);
213 IWL_DEBUG_WEP(priv, "Remove default WEP key: idx=%d ret=%d\n",
214 keyconf->keyidx, ret);
215
216 return ret;
217}
218
219int iwl4965_set_default_wep_key(struct iwl_priv *priv,
220 struct iwl_rxon_context *ctx,
221 struct ieee80211_key_conf *keyconf)
222{
223 int ret;
224
225 lockdep_assert_held(&priv->mutex);
226
227 if (keyconf->keylen != WEP_KEY_LEN_128 &&
228 keyconf->keylen != WEP_KEY_LEN_64) {
229 IWL_DEBUG_WEP(priv, "Bad WEP key length %d\n", keyconf->keylen);
230 return -EINVAL;
231 }
232
233 keyconf->flags &= ~IEEE80211_KEY_FLAG_GENERATE_IV;
234 keyconf->hw_key_idx = HW_KEY_DEFAULT;
235 priv->stations[ctx->ap_sta_id].keyinfo.cipher = keyconf->cipher;
236
237 ctx->wep_keys[keyconf->keyidx].key_size = keyconf->keylen;
238 memcpy(&ctx->wep_keys[keyconf->keyidx].key, &keyconf->key,
239 keyconf->keylen);
240
241 ret = iwl4965_static_wepkey_cmd(priv, ctx, false);
242 IWL_DEBUG_WEP(priv, "Set default WEP key: len=%d idx=%d ret=%d\n",
243 keyconf->keylen, keyconf->keyidx, ret);
244
245 return ret;
246}
247
248static int iwl4965_set_wep_dynamic_key_info(struct iwl_priv *priv,
249 struct iwl_rxon_context *ctx,
250 struct ieee80211_key_conf *keyconf,
251 u8 sta_id)
252{
253 unsigned long flags;
254 __le16 key_flags = 0;
255 struct iwl_legacy_addsta_cmd sta_cmd;
256
257 lockdep_assert_held(&priv->mutex);
258
259 keyconf->flags &= ~IEEE80211_KEY_FLAG_GENERATE_IV;
260
261 key_flags |= (STA_KEY_FLG_WEP | STA_KEY_FLG_MAP_KEY_MSK);
262 key_flags |= cpu_to_le16(keyconf->keyidx << STA_KEY_FLG_KEYID_POS);
263 key_flags &= ~STA_KEY_FLG_INVALID;
264
265 if (keyconf->keylen == WEP_KEY_LEN_128)
266 key_flags |= STA_KEY_FLG_KEY_SIZE_MSK;
267
268 if (sta_id == ctx->bcast_sta_id)
269 key_flags |= STA_KEY_MULTICAST_MSK;
270
271 spin_lock_irqsave(&priv->sta_lock, flags);
272
273 priv->stations[sta_id].keyinfo.cipher = keyconf->cipher;
274 priv->stations[sta_id].keyinfo.keylen = keyconf->keylen;
275 priv->stations[sta_id].keyinfo.keyidx = keyconf->keyidx;
276
277 memcpy(priv->stations[sta_id].keyinfo.key,
278 keyconf->key, keyconf->keylen);
279
280 memcpy(&priv->stations[sta_id].sta.key.key[3],
281 keyconf->key, keyconf->keylen);
282
283 if ((priv->stations[sta_id].sta.key.key_flags & STA_KEY_FLG_ENCRYPT_MSK)
284 == STA_KEY_FLG_NO_ENC)
285 priv->stations[sta_id].sta.key.key_offset =
286 iwl_legacy_get_free_ucode_key_index(priv);
287 /* else, we are overriding an existing key => no need to allocated room
288 * in uCode. */
289
290 WARN(priv->stations[sta_id].sta.key.key_offset == WEP_INVALID_OFFSET,
291 "no space for a new key");
292
293 priv->stations[sta_id].sta.key.key_flags = key_flags;
294 priv->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_KEY_MASK;
295 priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
296
297 memcpy(&sta_cmd, &priv->stations[sta_id].sta,
298 sizeof(struct iwl_legacy_addsta_cmd));
299 spin_unlock_irqrestore(&priv->sta_lock, flags);
300
301 return iwl_legacy_send_add_sta(priv, &sta_cmd, CMD_SYNC);
302}
303
304static int iwl4965_set_ccmp_dynamic_key_info(struct iwl_priv *priv,
305 struct iwl_rxon_context *ctx,
306 struct ieee80211_key_conf *keyconf,
307 u8 sta_id)
308{
309 unsigned long flags;
310 __le16 key_flags = 0;
311 struct iwl_legacy_addsta_cmd sta_cmd;
312
313 lockdep_assert_held(&priv->mutex);
314
315 key_flags |= (STA_KEY_FLG_CCMP | STA_KEY_FLG_MAP_KEY_MSK);
316 key_flags |= cpu_to_le16(keyconf->keyidx << STA_KEY_FLG_KEYID_POS);
317 key_flags &= ~STA_KEY_FLG_INVALID;
318
319 if (sta_id == ctx->bcast_sta_id)
320 key_flags |= STA_KEY_MULTICAST_MSK;
321
322 keyconf->flags |= IEEE80211_KEY_FLAG_GENERATE_IV;
323
324 spin_lock_irqsave(&priv->sta_lock, flags);
325 priv->stations[sta_id].keyinfo.cipher = keyconf->cipher;
326 priv->stations[sta_id].keyinfo.keylen = keyconf->keylen;
327
328 memcpy(priv->stations[sta_id].keyinfo.key, keyconf->key,
329 keyconf->keylen);
330
331 memcpy(priv->stations[sta_id].sta.key.key, keyconf->key,
332 keyconf->keylen);
333
334 if ((priv->stations[sta_id].sta.key.key_flags & STA_KEY_FLG_ENCRYPT_MSK)
335 == STA_KEY_FLG_NO_ENC)
336 priv->stations[sta_id].sta.key.key_offset =
337 iwl_legacy_get_free_ucode_key_index(priv);
338 /* else, we are overriding an existing key => no need to allocated room
339 * in uCode. */
340
341 WARN(priv->stations[sta_id].sta.key.key_offset == WEP_INVALID_OFFSET,
342 "no space for a new key");
343
344 priv->stations[sta_id].sta.key.key_flags = key_flags;
345 priv->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_KEY_MASK;
346 priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
347
348 memcpy(&sta_cmd, &priv->stations[sta_id].sta,
349 sizeof(struct iwl_legacy_addsta_cmd));
350 spin_unlock_irqrestore(&priv->sta_lock, flags);
351
352 return iwl_legacy_send_add_sta(priv, &sta_cmd, CMD_SYNC);
353}
354
355static int iwl4965_set_tkip_dynamic_key_info(struct iwl_priv *priv,
356 struct iwl_rxon_context *ctx,
357 struct ieee80211_key_conf *keyconf,
358 u8 sta_id)
359{
360 unsigned long flags;
361 int ret = 0;
362 __le16 key_flags = 0;
363
364 key_flags |= (STA_KEY_FLG_TKIP | STA_KEY_FLG_MAP_KEY_MSK);
365 key_flags |= cpu_to_le16(keyconf->keyidx << STA_KEY_FLG_KEYID_POS);
366 key_flags &= ~STA_KEY_FLG_INVALID;
367
368 if (sta_id == ctx->bcast_sta_id)
369 key_flags |= STA_KEY_MULTICAST_MSK;
370
371 keyconf->flags |= IEEE80211_KEY_FLAG_GENERATE_IV;
372 keyconf->flags |= IEEE80211_KEY_FLAG_GENERATE_MMIC;
373
374 spin_lock_irqsave(&priv->sta_lock, flags);
375
376 priv->stations[sta_id].keyinfo.cipher = keyconf->cipher;
377 priv->stations[sta_id].keyinfo.keylen = 16;
378
379 if ((priv->stations[sta_id].sta.key.key_flags & STA_KEY_FLG_ENCRYPT_MSK)
380 == STA_KEY_FLG_NO_ENC)
381 priv->stations[sta_id].sta.key.key_offset =
382 iwl_legacy_get_free_ucode_key_index(priv);
383 /* else, we are overriding an existing key => no need to allocated room
384 * in uCode. */
385
386 WARN(priv->stations[sta_id].sta.key.key_offset == WEP_INVALID_OFFSET,
387 "no space for a new key");
388
389 priv->stations[sta_id].sta.key.key_flags = key_flags;
390
391
392 /* This copy is acutally not needed: we get the key with each TX */
393 memcpy(priv->stations[sta_id].keyinfo.key, keyconf->key, 16);
394
395 memcpy(priv->stations[sta_id].sta.key.key, keyconf->key, 16);
396
397 spin_unlock_irqrestore(&priv->sta_lock, flags);
398
399 return ret;
400}
401
402void iwl4965_update_tkip_key(struct iwl_priv *priv,
403 struct iwl_rxon_context *ctx,
404 struct ieee80211_key_conf *keyconf,
405 struct ieee80211_sta *sta, u32 iv32, u16 *phase1key)
406{
407 u8 sta_id;
408 unsigned long flags;
409 int i;
410
411 if (iwl_legacy_scan_cancel(priv)) {
412 /* cancel scan failed, just live w/ bad key and rely
413 briefly on SW decryption */
414 return;
415 }
416
417 sta_id = iwl_legacy_sta_id_or_broadcast(priv, ctx, sta);
418 if (sta_id == IWL_INVALID_STATION)
419 return;
420
421 spin_lock_irqsave(&priv->sta_lock, flags);
422
423 priv->stations[sta_id].sta.key.tkip_rx_tsc_byte2 = (u8) iv32;
424
425 for (i = 0; i < 5; i++)
426 priv->stations[sta_id].sta.key.tkip_rx_ttak[i] =
427 cpu_to_le16(phase1key[i]);
428
429 priv->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_KEY_MASK;
430 priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
431
432 iwl_legacy_send_add_sta(priv, &priv->stations[sta_id].sta, CMD_ASYNC);
433
434 spin_unlock_irqrestore(&priv->sta_lock, flags);
435
436}
437
438int iwl4965_remove_dynamic_key(struct iwl_priv *priv,
439 struct iwl_rxon_context *ctx,
440 struct ieee80211_key_conf *keyconf,
441 u8 sta_id)
442{
443 unsigned long flags;
444 u16 key_flags;
445 u8 keyidx;
446 struct iwl_legacy_addsta_cmd sta_cmd;
447
448 lockdep_assert_held(&priv->mutex);
449
450 ctx->key_mapping_keys--;
451
452 spin_lock_irqsave(&priv->sta_lock, flags);
453 key_flags = le16_to_cpu(priv->stations[sta_id].sta.key.key_flags);
454 keyidx = (key_flags >> STA_KEY_FLG_KEYID_POS) & 0x3;
455
456 IWL_DEBUG_WEP(priv, "Remove dynamic key: idx=%d sta=%d\n",
457 keyconf->keyidx, sta_id);
458
459 if (keyconf->keyidx != keyidx) {
460 /* We need to remove a key with index different that the one
461 * in the uCode. This means that the key we need to remove has
462 * been replaced by another one with different index.
463 * Don't do anything and return ok
464 */
465 spin_unlock_irqrestore(&priv->sta_lock, flags);
466 return 0;
467 }
468
469 if (priv->stations[sta_id].sta.key.key_offset == WEP_INVALID_OFFSET) {
470 IWL_WARN(priv, "Removing wrong key %d 0x%x\n",
471 keyconf->keyidx, key_flags);
472 spin_unlock_irqrestore(&priv->sta_lock, flags);
473 return 0;
474 }
475
476 if (!test_and_clear_bit(priv->stations[sta_id].sta.key.key_offset,
477 &priv->ucode_key_table))
478 IWL_ERR(priv, "index %d not used in uCode key table.\n",
479 priv->stations[sta_id].sta.key.key_offset);
480 memset(&priv->stations[sta_id].keyinfo, 0,
481 sizeof(struct iwl_hw_key));
482 memset(&priv->stations[sta_id].sta.key, 0,
483 sizeof(struct iwl4965_keyinfo));
484 priv->stations[sta_id].sta.key.key_flags =
485 STA_KEY_FLG_NO_ENC | STA_KEY_FLG_INVALID;
486 priv->stations[sta_id].sta.key.key_offset = WEP_INVALID_OFFSET;
487 priv->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_KEY_MASK;
488 priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
489
490 if (iwl_legacy_is_rfkill(priv)) {
491 IWL_DEBUG_WEP(priv,
492 "Not sending REPLY_ADD_STA command because RFKILL enabled.\n");
493 spin_unlock_irqrestore(&priv->sta_lock, flags);
494 return 0;
495 }
496 memcpy(&sta_cmd, &priv->stations[sta_id].sta,
497 sizeof(struct iwl_legacy_addsta_cmd));
498 spin_unlock_irqrestore(&priv->sta_lock, flags);
499
500 return iwl_legacy_send_add_sta(priv, &sta_cmd, CMD_SYNC);
501}
502
503int iwl4965_set_dynamic_key(struct iwl_priv *priv, struct iwl_rxon_context *ctx,
504 struct ieee80211_key_conf *keyconf, u8 sta_id)
505{
506 int ret;
507
508 lockdep_assert_held(&priv->mutex);
509
510 ctx->key_mapping_keys++;
511 keyconf->hw_key_idx = HW_KEY_DYNAMIC;
512
513 switch (keyconf->cipher) {
514 case WLAN_CIPHER_SUITE_CCMP:
515 ret = iwl4965_set_ccmp_dynamic_key_info(priv, ctx,
516 keyconf, sta_id);
517 break;
518 case WLAN_CIPHER_SUITE_TKIP:
519 ret = iwl4965_set_tkip_dynamic_key_info(priv, ctx,
520 keyconf, sta_id);
521 break;
522 case WLAN_CIPHER_SUITE_WEP40:
523 case WLAN_CIPHER_SUITE_WEP104:
524 ret = iwl4965_set_wep_dynamic_key_info(priv, ctx,
525 keyconf, sta_id);
526 break;
527 default:
528 IWL_ERR(priv,
529 "Unknown alg: %s cipher = %x\n", __func__,
530 keyconf->cipher);
531 ret = -EINVAL;
532 }
533
534 IWL_DEBUG_WEP(priv,
535 "Set dynamic key: cipher=%x len=%d idx=%d sta=%d ret=%d\n",
536 keyconf->cipher, keyconf->keylen, keyconf->keyidx,
537 sta_id, ret);
538
539 return ret;
540}
541
542/**
543 * iwl4965_alloc_bcast_station - add broadcast station into driver's station table.
544 *
545 * This adds the broadcast station into the driver's station table
546 * and marks it driver active, so that it will be restored to the
547 * device at the next best time.
548 */
549int iwl4965_alloc_bcast_station(struct iwl_priv *priv,
550 struct iwl_rxon_context *ctx)
551{
552 struct iwl_link_quality_cmd *link_cmd;
553 unsigned long flags;
554 u8 sta_id;
555
556 spin_lock_irqsave(&priv->sta_lock, flags);
557 sta_id = iwl_legacy_prep_station(priv, ctx, iwlegacy_bcast_addr,
558 false, NULL);
559 if (sta_id == IWL_INVALID_STATION) {
560 IWL_ERR(priv, "Unable to prepare broadcast station\n");
561 spin_unlock_irqrestore(&priv->sta_lock, flags);
562
563 return -EINVAL;
564 }
565
566 priv->stations[sta_id].used |= IWL_STA_DRIVER_ACTIVE;
567 priv->stations[sta_id].used |= IWL_STA_BCAST;
568 spin_unlock_irqrestore(&priv->sta_lock, flags);
569
570 link_cmd = iwl4965_sta_alloc_lq(priv, sta_id);
571 if (!link_cmd) {
572 IWL_ERR(priv,
573 "Unable to initialize rate scaling for bcast station.\n");
574 return -ENOMEM;
575 }
576
577 spin_lock_irqsave(&priv->sta_lock, flags);
578 priv->stations[sta_id].lq = link_cmd;
579 spin_unlock_irqrestore(&priv->sta_lock, flags);
580
581 return 0;
582}
583
584/**
585 * iwl4965_update_bcast_station - update broadcast station's LQ command
586 *
587 * Only used by iwl4965. Placed here to have all bcast station management
588 * code together.
589 */
590static int iwl4965_update_bcast_station(struct iwl_priv *priv,
591 struct iwl_rxon_context *ctx)
592{
593 unsigned long flags;
594 struct iwl_link_quality_cmd *link_cmd;
595 u8 sta_id = ctx->bcast_sta_id;
596
597 link_cmd = iwl4965_sta_alloc_lq(priv, sta_id);
598 if (!link_cmd) {
599 IWL_ERR(priv,
600 "Unable to initialize rate scaling for bcast station.\n");
601 return -ENOMEM;
602 }
603
604 spin_lock_irqsave(&priv->sta_lock, flags);
605 if (priv->stations[sta_id].lq)
606 kfree(priv->stations[sta_id].lq);
607 else
608 IWL_DEBUG_INFO(priv,
609 "Bcast station rate scaling has not been initialized yet.\n");
610 priv->stations[sta_id].lq = link_cmd;
611 spin_unlock_irqrestore(&priv->sta_lock, flags);
612
613 return 0;
614}
615
616int iwl4965_update_bcast_stations(struct iwl_priv *priv)
617{
618 struct iwl_rxon_context *ctx;
619 int ret = 0;
620
621 for_each_context(priv, ctx) {
622 ret = iwl4965_update_bcast_station(priv, ctx);
623 if (ret)
624 break;
625 }
626
627 return ret;
628}
629
630/**
631 * iwl4965_sta_tx_modify_enable_tid - Enable Tx for this TID in station table
632 */
633int iwl4965_sta_tx_modify_enable_tid(struct iwl_priv *priv, int sta_id, int tid)
634{
635 unsigned long flags;
636 struct iwl_legacy_addsta_cmd sta_cmd;
637
638 lockdep_assert_held(&priv->mutex);
639
640 /* Remove "disable" flag, to enable Tx for this TID */
641 spin_lock_irqsave(&priv->sta_lock, flags);
642 priv->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_TID_DISABLE_TX;
643 priv->stations[sta_id].sta.tid_disable_tx &= cpu_to_le16(~(1 << tid));
644 priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
645 memcpy(&sta_cmd, &priv->stations[sta_id].sta,
646 sizeof(struct iwl_legacy_addsta_cmd));
647 spin_unlock_irqrestore(&priv->sta_lock, flags);
648
649 return iwl_legacy_send_add_sta(priv, &sta_cmd, CMD_SYNC);
650}
651
652int iwl4965_sta_rx_agg_start(struct iwl_priv *priv, struct ieee80211_sta *sta,
653 int tid, u16 ssn)
654{
655 unsigned long flags;
656 int sta_id;
657 struct iwl_legacy_addsta_cmd sta_cmd;
658
659 lockdep_assert_held(&priv->mutex);
660
661 sta_id = iwl_legacy_sta_id(sta);
662 if (sta_id == IWL_INVALID_STATION)
663 return -ENXIO;
664
665 spin_lock_irqsave(&priv->sta_lock, flags);
666 priv->stations[sta_id].sta.station_flags_msk = 0;
667 priv->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_ADDBA_TID_MSK;
668 priv->stations[sta_id].sta.add_immediate_ba_tid = (u8)tid;
669 priv->stations[sta_id].sta.add_immediate_ba_ssn = cpu_to_le16(ssn);
670 priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
671 memcpy(&sta_cmd, &priv->stations[sta_id].sta,
672 sizeof(struct iwl_legacy_addsta_cmd));
673 spin_unlock_irqrestore(&priv->sta_lock, flags);
674
675 return iwl_legacy_send_add_sta(priv, &sta_cmd, CMD_SYNC);
676}
677
678int iwl4965_sta_rx_agg_stop(struct iwl_priv *priv, struct ieee80211_sta *sta,
679 int tid)
680{
681 unsigned long flags;
682 int sta_id;
683 struct iwl_legacy_addsta_cmd sta_cmd;
684
685 lockdep_assert_held(&priv->mutex);
686
687 sta_id = iwl_legacy_sta_id(sta);
688 if (sta_id == IWL_INVALID_STATION) {
689 IWL_ERR(priv, "Invalid station for AGG tid %d\n", tid);
690 return -ENXIO;
691 }
692
693 spin_lock_irqsave(&priv->sta_lock, flags);
694 priv->stations[sta_id].sta.station_flags_msk = 0;
695 priv->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_DELBA_TID_MSK;
696 priv->stations[sta_id].sta.remove_immediate_ba_tid = (u8)tid;
697 priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
698 memcpy(&sta_cmd, &priv->stations[sta_id].sta,
699 sizeof(struct iwl_legacy_addsta_cmd));
700 spin_unlock_irqrestore(&priv->sta_lock, flags);
701
702 return iwl_legacy_send_add_sta(priv, &sta_cmd, CMD_SYNC);
703}
704
705void
706iwl4965_sta_modify_sleep_tx_count(struct iwl_priv *priv, int sta_id, int cnt)
707{
708 unsigned long flags;
709
710 spin_lock_irqsave(&priv->sta_lock, flags);
711 priv->stations[sta_id].sta.station_flags |= STA_FLG_PWR_SAVE_MSK;
712 priv->stations[sta_id].sta.station_flags_msk = STA_FLG_PWR_SAVE_MSK;
713 priv->stations[sta_id].sta.sta.modify_mask =
714 STA_MODIFY_SLEEP_TX_COUNT_MSK;
715 priv->stations[sta_id].sta.sleep_tx_count = cpu_to_le16(cnt);
716 priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
717 iwl_legacy_send_add_sta(priv,
718 &priv->stations[sta_id].sta, CMD_ASYNC);
719 spin_unlock_irqrestore(&priv->sta_lock, flags);
720
721}
diff --git a/drivers/net/wireless/iwlegacy/iwl-4965-tx.c b/drivers/net/wireless/iwlegacy/iwl-4965-tx.c
new file mode 100644
index 000000000000..5c40502f869a
--- /dev/null
+++ b/drivers/net/wireless/iwlegacy/iwl-4965-tx.c
@@ -0,0 +1,1369 @@
1/******************************************************************************
2 *
3 * GPL LICENSE SUMMARY
4 *
5 * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of version 2 of the GNU General Public License as
9 * published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful, but
12 * WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
19 * USA
20 *
21 * The full GNU General Public License is included in this distribution
22 * in the file called LICENSE.GPL.
23 *
24 * Contact Information:
25 * Intel Linux Wireless <ilw@linux.intel.com>
26 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
27 *
28 *****************************************************************************/
29
30#include <linux/kernel.h>
31#include <linux/module.h>
32#include <linux/init.h>
33#include <linux/sched.h>
34
35#include "iwl-dev.h"
36#include "iwl-core.h"
37#include "iwl-sta.h"
38#include "iwl-io.h"
39#include "iwl-helpers.h"
40#include "iwl-4965-hw.h"
41#include "iwl-4965.h"
42
43/*
44 * mac80211 queues, ACs, hardware queues, FIFOs.
45 *
46 * Cf. http://wireless.kernel.org/en/developers/Documentation/mac80211/queues
47 *
48 * Mac80211 uses the following numbers, which we get as from it
49 * by way of skb_get_queue_mapping(skb):
50 *
51 * VO 0
52 * VI 1
53 * BE 2
54 * BK 3
55 *
56 *
57 * Regular (not A-MPDU) frames are put into hardware queues corresponding
58 * to the FIFOs, see comments in iwl-prph.h. Aggregated frames get their
59 * own queue per aggregation session (RA/TID combination), such queues are
60 * set up to map into FIFOs too, for which we need an AC->FIFO mapping. In
61 * order to map frames to the right queue, we also need an AC->hw queue
62 * mapping. This is implemented here.
63 *
64 * Due to the way hw queues are set up (by the hw specific modules like
65 * iwl-4965.c), the AC->hw queue mapping is the identity
66 * mapping.
67 */
68
69static const u8 tid_to_ac[] = {
70 IEEE80211_AC_BE,
71 IEEE80211_AC_BK,
72 IEEE80211_AC_BK,
73 IEEE80211_AC_BE,
74 IEEE80211_AC_VI,
75 IEEE80211_AC_VI,
76 IEEE80211_AC_VO,
77 IEEE80211_AC_VO
78};
79
80static inline int iwl4965_get_ac_from_tid(u16 tid)
81{
82 if (likely(tid < ARRAY_SIZE(tid_to_ac)))
83 return tid_to_ac[tid];
84
85 /* no support for TIDs 8-15 yet */
86 return -EINVAL;
87}
88
89static inline int
90iwl4965_get_fifo_from_tid(struct iwl_rxon_context *ctx, u16 tid)
91{
92 if (likely(tid < ARRAY_SIZE(tid_to_ac)))
93 return ctx->ac_to_fifo[tid_to_ac[tid]];
94
95 /* no support for TIDs 8-15 yet */
96 return -EINVAL;
97}
98
99/*
100 * handle build REPLY_TX command notification.
101 */
102static void iwl4965_tx_cmd_build_basic(struct iwl_priv *priv,
103 struct sk_buff *skb,
104 struct iwl_tx_cmd *tx_cmd,
105 struct ieee80211_tx_info *info,
106 struct ieee80211_hdr *hdr,
107 u8 std_id)
108{
109 __le16 fc = hdr->frame_control;
110 __le32 tx_flags = tx_cmd->tx_flags;
111
112 tx_cmd->stop_time.life_time = TX_CMD_LIFE_TIME_INFINITE;
113 if (!(info->flags & IEEE80211_TX_CTL_NO_ACK)) {
114 tx_flags |= TX_CMD_FLG_ACK_MSK;
115 if (ieee80211_is_mgmt(fc))
116 tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK;
117 if (ieee80211_is_probe_resp(fc) &&
118 !(le16_to_cpu(hdr->seq_ctrl) & 0xf))
119 tx_flags |= TX_CMD_FLG_TSF_MSK;
120 } else {
121 tx_flags &= (~TX_CMD_FLG_ACK_MSK);
122 tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK;
123 }
124
125 if (ieee80211_is_back_req(fc))
126 tx_flags |= TX_CMD_FLG_ACK_MSK | TX_CMD_FLG_IMM_BA_RSP_MASK;
127
128 tx_cmd->sta_id = std_id;
129 if (ieee80211_has_morefrags(fc))
130 tx_flags |= TX_CMD_FLG_MORE_FRAG_MSK;
131
132 if (ieee80211_is_data_qos(fc)) {
133 u8 *qc = ieee80211_get_qos_ctl(hdr);
134 tx_cmd->tid_tspec = qc[0] & 0xf;
135 tx_flags &= ~TX_CMD_FLG_SEQ_CTL_MSK;
136 } else {
137 tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK;
138 }
139
140 iwl_legacy_tx_cmd_protection(priv, info, fc, &tx_flags);
141
142 tx_flags &= ~(TX_CMD_FLG_ANT_SEL_MSK);
143 if (ieee80211_is_mgmt(fc)) {
144 if (ieee80211_is_assoc_req(fc) || ieee80211_is_reassoc_req(fc))
145 tx_cmd->timeout.pm_frame_timeout = cpu_to_le16(3);
146 else
147 tx_cmd->timeout.pm_frame_timeout = cpu_to_le16(2);
148 } else {
149 tx_cmd->timeout.pm_frame_timeout = 0;
150 }
151
152 tx_cmd->driver_txop = 0;
153 tx_cmd->tx_flags = tx_flags;
154 tx_cmd->next_frame_len = 0;
155}
156
157#define RTS_DFAULT_RETRY_LIMIT 60
158
159static void iwl4965_tx_cmd_build_rate(struct iwl_priv *priv,
160 struct iwl_tx_cmd *tx_cmd,
161 struct ieee80211_tx_info *info,
162 __le16 fc)
163{
164 u32 rate_flags;
165 int rate_idx;
166 u8 rts_retry_limit;
167 u8 data_retry_limit;
168 u8 rate_plcp;
169
170 /* Set retry limit on DATA packets and Probe Responses*/
171 if (ieee80211_is_probe_resp(fc))
172 data_retry_limit = 3;
173 else
174 data_retry_limit = IWL4965_DEFAULT_TX_RETRY;
175 tx_cmd->data_retry_limit = data_retry_limit;
176
177 /* Set retry limit on RTS packets */
178 rts_retry_limit = RTS_DFAULT_RETRY_LIMIT;
179 if (data_retry_limit < rts_retry_limit)
180 rts_retry_limit = data_retry_limit;
181 tx_cmd->rts_retry_limit = rts_retry_limit;
182
183 /* DATA packets will use the uCode station table for rate/antenna
184 * selection */
185 if (ieee80211_is_data(fc)) {
186 tx_cmd->initial_rate_index = 0;
187 tx_cmd->tx_flags |= TX_CMD_FLG_STA_RATE_MSK;
188 return;
189 }
190
191 /**
192 * If the current TX rate stored in mac80211 has the MCS bit set, it's
193 * not really a TX rate. Thus, we use the lowest supported rate for
194 * this band. Also use the lowest supported rate if the stored rate
195 * index is invalid.
196 */
197 rate_idx = info->control.rates[0].idx;
198 if (info->control.rates[0].flags & IEEE80211_TX_RC_MCS ||
199 (rate_idx < 0) || (rate_idx > IWL_RATE_COUNT_LEGACY))
200 rate_idx = rate_lowest_index(&priv->bands[info->band],
201 info->control.sta);
202 /* For 5 GHZ band, remap mac80211 rate indices into driver indices */
203 if (info->band == IEEE80211_BAND_5GHZ)
204 rate_idx += IWL_FIRST_OFDM_RATE;
205 /* Get PLCP rate for tx_cmd->rate_n_flags */
206 rate_plcp = iwlegacy_rates[rate_idx].plcp;
207 /* Zero out flags for this packet */
208 rate_flags = 0;
209
210 /* Set CCK flag as needed */
211 if ((rate_idx >= IWL_FIRST_CCK_RATE) && (rate_idx <= IWL_LAST_CCK_RATE))
212 rate_flags |= RATE_MCS_CCK_MSK;
213
214 /* Set up antennas */
215 priv->mgmt_tx_ant = iwl4965_toggle_tx_ant(priv, priv->mgmt_tx_ant,
216 priv->hw_params.valid_tx_ant);
217
218 rate_flags |= iwl4965_ant_idx_to_flags(priv->mgmt_tx_ant);
219
220 /* Set the rate in the TX cmd */
221 tx_cmd->rate_n_flags = iwl4965_hw_set_rate_n_flags(rate_plcp, rate_flags);
222}
223
224static void iwl4965_tx_cmd_build_hwcrypto(struct iwl_priv *priv,
225 struct ieee80211_tx_info *info,
226 struct iwl_tx_cmd *tx_cmd,
227 struct sk_buff *skb_frag,
228 int sta_id)
229{
230 struct ieee80211_key_conf *keyconf = info->control.hw_key;
231
232 switch (keyconf->cipher) {
233 case WLAN_CIPHER_SUITE_CCMP:
234 tx_cmd->sec_ctl = TX_CMD_SEC_CCM;
235 memcpy(tx_cmd->key, keyconf->key, keyconf->keylen);
236 if (info->flags & IEEE80211_TX_CTL_AMPDU)
237 tx_cmd->tx_flags |= TX_CMD_FLG_AGG_CCMP_MSK;
238 IWL_DEBUG_TX(priv, "tx_cmd with AES hwcrypto\n");
239 break;
240
241 case WLAN_CIPHER_SUITE_TKIP:
242 tx_cmd->sec_ctl = TX_CMD_SEC_TKIP;
243 ieee80211_get_tkip_key(keyconf, skb_frag,
244 IEEE80211_TKIP_P2_KEY, tx_cmd->key);
245 IWL_DEBUG_TX(priv, "tx_cmd with tkip hwcrypto\n");
246 break;
247
248 case WLAN_CIPHER_SUITE_WEP104:
249 tx_cmd->sec_ctl |= TX_CMD_SEC_KEY128;
250 /* fall through */
251 case WLAN_CIPHER_SUITE_WEP40:
252 tx_cmd->sec_ctl |= (TX_CMD_SEC_WEP |
253 (keyconf->keyidx & TX_CMD_SEC_MSK) << TX_CMD_SEC_SHIFT);
254
255 memcpy(&tx_cmd->key[3], keyconf->key, keyconf->keylen);
256
257 IWL_DEBUG_TX(priv, "Configuring packet for WEP encryption "
258 "with key %d\n", keyconf->keyidx);
259 break;
260
261 default:
262 IWL_ERR(priv, "Unknown encode cipher %x\n", keyconf->cipher);
263 break;
264 }
265}
266
267/*
268 * start REPLY_TX command process
269 */
270int iwl4965_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
271{
272 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
273 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
274 struct ieee80211_sta *sta = info->control.sta;
275 struct iwl_station_priv *sta_priv = NULL;
276 struct iwl_tx_queue *txq;
277 struct iwl_queue *q;
278 struct iwl_device_cmd *out_cmd;
279 struct iwl_cmd_meta *out_meta;
280 struct iwl_tx_cmd *tx_cmd;
281 struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
282 int txq_id;
283 dma_addr_t phys_addr;
284 dma_addr_t txcmd_phys;
285 dma_addr_t scratch_phys;
286 u16 len, firstlen, secondlen;
287 u16 seq_number = 0;
288 __le16 fc;
289 u8 hdr_len;
290 u8 sta_id;
291 u8 wait_write_ptr = 0;
292 u8 tid = 0;
293 u8 *qc = NULL;
294 unsigned long flags;
295 bool is_agg = false;
296
297 if (info->control.vif)
298 ctx = iwl_legacy_rxon_ctx_from_vif(info->control.vif);
299
300 spin_lock_irqsave(&priv->lock, flags);
301 if (iwl_legacy_is_rfkill(priv)) {
302 IWL_DEBUG_DROP(priv, "Dropping - RF KILL\n");
303 goto drop_unlock;
304 }
305
306 fc = hdr->frame_control;
307
308#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
309 if (ieee80211_is_auth(fc))
310 IWL_DEBUG_TX(priv, "Sending AUTH frame\n");
311 else if (ieee80211_is_assoc_req(fc))
312 IWL_DEBUG_TX(priv, "Sending ASSOC frame\n");
313 else if (ieee80211_is_reassoc_req(fc))
314 IWL_DEBUG_TX(priv, "Sending REASSOC frame\n");
315#endif
316
317 hdr_len = ieee80211_hdrlen(fc);
318
319 /* Find index into station table for destination station */
320 sta_id = iwl_legacy_sta_id_or_broadcast(priv, ctx, info->control.sta);
321 if (sta_id == IWL_INVALID_STATION) {
322 IWL_DEBUG_DROP(priv, "Dropping - INVALID STATION: %pM\n",
323 hdr->addr1);
324 goto drop_unlock;
325 }
326
327 IWL_DEBUG_TX(priv, "station Id %d\n", sta_id);
328
329 if (sta)
330 sta_priv = (void *)sta->drv_priv;
331
332 if (sta_priv && sta_priv->asleep &&
333 (info->flags & IEEE80211_TX_CTL_PSPOLL_RESPONSE)) {
334 /*
335 * This sends an asynchronous command to the device,
336 * but we can rely on it being processed before the
337 * next frame is processed -- and the next frame to
338 * this station is the one that will consume this
339 * counter.
340 * For now set the counter to just 1 since we do not
341 * support uAPSD yet.
342 */
343 iwl4965_sta_modify_sleep_tx_count(priv, sta_id, 1);
344 }
345
346 /*
347 * Send this frame after DTIM -- there's a special queue
348 * reserved for this for contexts that support AP mode.
349 */
350 if (info->flags & IEEE80211_TX_CTL_SEND_AFTER_DTIM) {
351 txq_id = ctx->mcast_queue;
352 /*
353 * The microcode will clear the more data
354 * bit in the last frame it transmits.
355 */
356 hdr->frame_control |=
357 cpu_to_le16(IEEE80211_FCTL_MOREDATA);
358 } else
359 txq_id = ctx->ac_to_queue[skb_get_queue_mapping(skb)];
360
361 /* irqs already disabled/saved above when locking priv->lock */
362 spin_lock(&priv->sta_lock);
363
364 if (ieee80211_is_data_qos(fc)) {
365 qc = ieee80211_get_qos_ctl(hdr);
366 tid = qc[0] & IEEE80211_QOS_CTL_TID_MASK;
367 if (WARN_ON_ONCE(tid >= MAX_TID_COUNT)) {
368 spin_unlock(&priv->sta_lock);
369 goto drop_unlock;
370 }
371 seq_number = priv->stations[sta_id].tid[tid].seq_number;
372 seq_number &= IEEE80211_SCTL_SEQ;
373 hdr->seq_ctrl = hdr->seq_ctrl &
374 cpu_to_le16(IEEE80211_SCTL_FRAG);
375 hdr->seq_ctrl |= cpu_to_le16(seq_number);
376 seq_number += 0x10;
377 /* aggregation is on for this <sta,tid> */
378 if (info->flags & IEEE80211_TX_CTL_AMPDU &&
379 priv->stations[sta_id].tid[tid].agg.state == IWL_AGG_ON) {
380 txq_id = priv->stations[sta_id].tid[tid].agg.txq_id;
381 is_agg = true;
382 }
383 }
384
385 txq = &priv->txq[txq_id];
386 q = &txq->q;
387
388 if (unlikely(iwl_legacy_queue_space(q) < q->high_mark)) {
389 spin_unlock(&priv->sta_lock);
390 goto drop_unlock;
391 }
392
393 if (ieee80211_is_data_qos(fc)) {
394 priv->stations[sta_id].tid[tid].tfds_in_queue++;
395 if (!ieee80211_has_morefrags(fc))
396 priv->stations[sta_id].tid[tid].seq_number = seq_number;
397 }
398
399 spin_unlock(&priv->sta_lock);
400
401 /* Set up driver data for this TFD */
402 memset(&(txq->txb[q->write_ptr]), 0, sizeof(struct iwl_tx_info));
403 txq->txb[q->write_ptr].skb = skb;
404 txq->txb[q->write_ptr].ctx = ctx;
405
406 /* Set up first empty entry in queue's array of Tx/cmd buffers */
407 out_cmd = txq->cmd[q->write_ptr];
408 out_meta = &txq->meta[q->write_ptr];
409 tx_cmd = &out_cmd->cmd.tx;
410 memset(&out_cmd->hdr, 0, sizeof(out_cmd->hdr));
411 memset(tx_cmd, 0, sizeof(struct iwl_tx_cmd));
412
413 /*
414 * Set up the Tx-command (not MAC!) header.
415 * Store the chosen Tx queue and TFD index within the sequence field;
416 * after Tx, uCode's Tx response will return this value so driver can
417 * locate the frame within the tx queue and do post-tx processing.
418 */
419 out_cmd->hdr.cmd = REPLY_TX;
420 out_cmd->hdr.sequence = cpu_to_le16((u16)(QUEUE_TO_SEQ(txq_id) |
421 INDEX_TO_SEQ(q->write_ptr)));
422
423 /* Copy MAC header from skb into command buffer */
424 memcpy(tx_cmd->hdr, hdr, hdr_len);
425
426
427 /* Total # bytes to be transmitted */
428 len = (u16)skb->len;
429 tx_cmd->len = cpu_to_le16(len);
430
431 if (info->control.hw_key)
432 iwl4965_tx_cmd_build_hwcrypto(priv, info, tx_cmd, skb, sta_id);
433
434 /* TODO need this for burst mode later on */
435 iwl4965_tx_cmd_build_basic(priv, skb, tx_cmd, info, hdr, sta_id);
436 iwl_legacy_dbg_log_tx_data_frame(priv, len, hdr);
437
438 iwl4965_tx_cmd_build_rate(priv, tx_cmd, info, fc);
439
440 iwl_legacy_update_stats(priv, true, fc, len);
441 /*
442 * Use the first empty entry in this queue's command buffer array
443 * to contain the Tx command and MAC header concatenated together
444 * (payload data will be in another buffer).
445 * Size of this varies, due to varying MAC header length.
446 * If end is not dword aligned, we'll have 2 extra bytes at the end
447 * of the MAC header (device reads on dword boundaries).
448 * We'll tell device about this padding later.
449 */
450 len = sizeof(struct iwl_tx_cmd) +
451 sizeof(struct iwl_cmd_header) + hdr_len;
452 firstlen = (len + 3) & ~3;
453
454 /* Tell NIC about any 2-byte padding after MAC header */
455 if (firstlen != len)
456 tx_cmd->tx_flags |= TX_CMD_FLG_MH_PAD_MSK;
457
458 /* Physical address of this Tx command's header (not MAC header!),
459 * within command buffer array. */
460 txcmd_phys = pci_map_single(priv->pci_dev,
461 &out_cmd->hdr, firstlen,
462 PCI_DMA_BIDIRECTIONAL);
463 dma_unmap_addr_set(out_meta, mapping, txcmd_phys);
464 dma_unmap_len_set(out_meta, len, firstlen);
465 /* Add buffer containing Tx command and MAC(!) header to TFD's
466 * first entry */
467 priv->cfg->ops->lib->txq_attach_buf_to_tfd(priv, txq,
468 txcmd_phys, firstlen, 1, 0);
469
470 if (!ieee80211_has_morefrags(hdr->frame_control)) {
471 txq->need_update = 1;
472 } else {
473 wait_write_ptr = 1;
474 txq->need_update = 0;
475 }
476
477 /* Set up TFD's 2nd entry to point directly to remainder of skb,
478 * if any (802.11 null frames have no payload). */
479 secondlen = skb->len - hdr_len;
480 if (secondlen > 0) {
481 phys_addr = pci_map_single(priv->pci_dev, skb->data + hdr_len,
482 secondlen, PCI_DMA_TODEVICE);
483 priv->cfg->ops->lib->txq_attach_buf_to_tfd(priv, txq,
484 phys_addr, secondlen,
485 0, 0);
486 }
487
488 scratch_phys = txcmd_phys + sizeof(struct iwl_cmd_header) +
489 offsetof(struct iwl_tx_cmd, scratch);
490
491 /* take back ownership of DMA buffer to enable update */
492 pci_dma_sync_single_for_cpu(priv->pci_dev, txcmd_phys,
493 firstlen, PCI_DMA_BIDIRECTIONAL);
494 tx_cmd->dram_lsb_ptr = cpu_to_le32(scratch_phys);
495 tx_cmd->dram_msb_ptr = iwl_legacy_get_dma_hi_addr(scratch_phys);
496
497 IWL_DEBUG_TX(priv, "sequence nr = 0X%x\n",
498 le16_to_cpu(out_cmd->hdr.sequence));
499 IWL_DEBUG_TX(priv, "tx_flags = 0X%x\n", le32_to_cpu(tx_cmd->tx_flags));
500 iwl_print_hex_dump(priv, IWL_DL_TX, (u8 *)tx_cmd, sizeof(*tx_cmd));
501 iwl_print_hex_dump(priv, IWL_DL_TX, (u8 *)tx_cmd->hdr, hdr_len);
502
503 /* Set up entry for this TFD in Tx byte-count array */
504 if (info->flags & IEEE80211_TX_CTL_AMPDU)
505 priv->cfg->ops->lib->txq_update_byte_cnt_tbl(priv, txq,
506 le16_to_cpu(tx_cmd->len));
507
508 pci_dma_sync_single_for_device(priv->pci_dev, txcmd_phys,
509 firstlen, PCI_DMA_BIDIRECTIONAL);
510
511 trace_iwlwifi_legacy_dev_tx(priv,
512 &((struct iwl_tfd *)txq->tfds)[txq->q.write_ptr],
513 sizeof(struct iwl_tfd),
514 &out_cmd->hdr, firstlen,
515 skb->data + hdr_len, secondlen);
516
517 /* Tell device the write index *just past* this latest filled TFD */
518 q->write_ptr = iwl_legacy_queue_inc_wrap(q->write_ptr, q->n_bd);
519 iwl_legacy_txq_update_write_ptr(priv, txq);
520 spin_unlock_irqrestore(&priv->lock, flags);
521
522 /*
523 * At this point the frame is "transmitted" successfully
524 * and we will get a TX status notification eventually,
525 * regardless of the value of ret. "ret" only indicates
526 * whether or not we should update the write pointer.
527 */
528
529 /*
530 * Avoid atomic ops if it isn't an associated client.
531 * Also, if this is a packet for aggregation, don't
532 * increase the counter because the ucode will stop
533 * aggregation queues when their respective station
534 * goes to sleep.
535 */
536 if (sta_priv && sta_priv->client && !is_agg)
537 atomic_inc(&sta_priv->pending_frames);
538
539 if ((iwl_legacy_queue_space(q) < q->high_mark) &&
540 priv->mac80211_registered) {
541 if (wait_write_ptr) {
542 spin_lock_irqsave(&priv->lock, flags);
543 txq->need_update = 1;
544 iwl_legacy_txq_update_write_ptr(priv, txq);
545 spin_unlock_irqrestore(&priv->lock, flags);
546 } else {
547 iwl_legacy_stop_queue(priv, txq);
548 }
549 }
550
551 return 0;
552
553drop_unlock:
554 spin_unlock_irqrestore(&priv->lock, flags);
555 return -1;
556}
557
558static inline int iwl4965_alloc_dma_ptr(struct iwl_priv *priv,
559 struct iwl_dma_ptr *ptr, size_t size)
560{
561 ptr->addr = dma_alloc_coherent(&priv->pci_dev->dev, size, &ptr->dma,
562 GFP_KERNEL);
563 if (!ptr->addr)
564 return -ENOMEM;
565 ptr->size = size;
566 return 0;
567}
568
569static inline void iwl4965_free_dma_ptr(struct iwl_priv *priv,
570 struct iwl_dma_ptr *ptr)
571{
572 if (unlikely(!ptr->addr))
573 return;
574
575 dma_free_coherent(&priv->pci_dev->dev, ptr->size, ptr->addr, ptr->dma);
576 memset(ptr, 0, sizeof(*ptr));
577}
578
579/**
580 * iwl4965_hw_txq_ctx_free - Free TXQ Context
581 *
582 * Destroy all TX DMA queues and structures
583 */
584void iwl4965_hw_txq_ctx_free(struct iwl_priv *priv)
585{
586 int txq_id;
587
588 /* Tx queues */
589 if (priv->txq) {
590 for (txq_id = 0; txq_id < priv->hw_params.max_txq_num; txq_id++)
591 if (txq_id == priv->cmd_queue)
592 iwl_legacy_cmd_queue_free(priv);
593 else
594 iwl_legacy_tx_queue_free(priv, txq_id);
595 }
596 iwl4965_free_dma_ptr(priv, &priv->kw);
597
598 iwl4965_free_dma_ptr(priv, &priv->scd_bc_tbls);
599
600 /* free tx queue structure */
601 iwl_legacy_txq_mem(priv);
602}
603
604/**
605 * iwl4965_txq_ctx_alloc - allocate TX queue context
606 * Allocate all Tx DMA structures and initialize them
607 *
608 * @param priv
609 * @return error code
610 */
611int iwl4965_txq_ctx_alloc(struct iwl_priv *priv)
612{
613 int ret;
614 int txq_id, slots_num;
615 unsigned long flags;
616
617 /* Free all tx/cmd queues and keep-warm buffer */
618 iwl4965_hw_txq_ctx_free(priv);
619
620 ret = iwl4965_alloc_dma_ptr(priv, &priv->scd_bc_tbls,
621 priv->hw_params.scd_bc_tbls_size);
622 if (ret) {
623 IWL_ERR(priv, "Scheduler BC Table allocation failed\n");
624 goto error_bc_tbls;
625 }
626 /* Alloc keep-warm buffer */
627 ret = iwl4965_alloc_dma_ptr(priv, &priv->kw, IWL_KW_SIZE);
628 if (ret) {
629 IWL_ERR(priv, "Keep Warm allocation failed\n");
630 goto error_kw;
631 }
632
633 /* allocate tx queue structure */
634 ret = iwl_legacy_alloc_txq_mem(priv);
635 if (ret)
636 goto error;
637
638 spin_lock_irqsave(&priv->lock, flags);
639
640 /* Turn off all Tx DMA fifos */
641 iwl4965_txq_set_sched(priv, 0);
642
643 /* Tell NIC where to find the "keep warm" buffer */
644 iwl_legacy_write_direct32(priv, FH_KW_MEM_ADDR_REG, priv->kw.dma >> 4);
645
646 spin_unlock_irqrestore(&priv->lock, flags);
647
648 /* Alloc and init all Tx queues, including the command queue (#4/#9) */
649 for (txq_id = 0; txq_id < priv->hw_params.max_txq_num; txq_id++) {
650 slots_num = (txq_id == priv->cmd_queue) ?
651 TFD_CMD_SLOTS : TFD_TX_CMD_SLOTS;
652 ret = iwl_legacy_tx_queue_init(priv,
653 &priv->txq[txq_id], slots_num,
654 txq_id);
655 if (ret) {
656 IWL_ERR(priv, "Tx %d queue init failed\n", txq_id);
657 goto error;
658 }
659 }
660
661 return ret;
662
663 error:
664 iwl4965_hw_txq_ctx_free(priv);
665 iwl4965_free_dma_ptr(priv, &priv->kw);
666 error_kw:
667 iwl4965_free_dma_ptr(priv, &priv->scd_bc_tbls);
668 error_bc_tbls:
669 return ret;
670}
671
672void iwl4965_txq_ctx_reset(struct iwl_priv *priv)
673{
674 int txq_id, slots_num;
675 unsigned long flags;
676
677 spin_lock_irqsave(&priv->lock, flags);
678
679 /* Turn off all Tx DMA fifos */
680 iwl4965_txq_set_sched(priv, 0);
681
682 /* Tell NIC where to find the "keep warm" buffer */
683 iwl_legacy_write_direct32(priv, FH_KW_MEM_ADDR_REG, priv->kw.dma >> 4);
684
685 spin_unlock_irqrestore(&priv->lock, flags);
686
687 /* Alloc and init all Tx queues, including the command queue (#4) */
688 for (txq_id = 0; txq_id < priv->hw_params.max_txq_num; txq_id++) {
689 slots_num = txq_id == priv->cmd_queue ?
690 TFD_CMD_SLOTS : TFD_TX_CMD_SLOTS;
691 iwl_legacy_tx_queue_reset(priv, &priv->txq[txq_id],
692 slots_num, txq_id);
693 }
694}
695
696/**
697 * iwl4965_txq_ctx_stop - Stop all Tx DMA channels
698 */
699void iwl4965_txq_ctx_stop(struct iwl_priv *priv)
700{
701 int ch, txq_id;
702 unsigned long flags;
703
704 /* Turn off all Tx DMA fifos */
705 spin_lock_irqsave(&priv->lock, flags);
706
707 iwl4965_txq_set_sched(priv, 0);
708
709 /* Stop each Tx DMA channel, and wait for it to be idle */
710 for (ch = 0; ch < priv->hw_params.dma_chnl_num; ch++) {
711 iwl_legacy_write_direct32(priv,
712 FH_TCSR_CHNL_TX_CONFIG_REG(ch), 0x0);
713 if (iwl_poll_direct_bit(priv, FH_TSSR_TX_STATUS_REG,
714 FH_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(ch),
715 1000))
716 IWL_ERR(priv, "Failing on timeout while stopping"
717 " DMA channel %d [0x%08x]", ch,
718 iwl_legacy_read_direct32(priv,
719 FH_TSSR_TX_STATUS_REG));
720 }
721 spin_unlock_irqrestore(&priv->lock, flags);
722
723 if (!priv->txq)
724 return;
725
726 /* Unmap DMA from host system and free skb's */
727 for (txq_id = 0; txq_id < priv->hw_params.max_txq_num; txq_id++)
728 if (txq_id == priv->cmd_queue)
729 iwl_legacy_cmd_queue_unmap(priv);
730 else
731 iwl_legacy_tx_queue_unmap(priv, txq_id);
732}
733
734/*
735 * Find first available (lowest unused) Tx Queue, mark it "active".
736 * Called only when finding queue for aggregation.
737 * Should never return anything < 7, because they should already
738 * be in use as EDCA AC (0-3), Command (4), reserved (5, 6)
739 */
740static int iwl4965_txq_ctx_activate_free(struct iwl_priv *priv)
741{
742 int txq_id;
743
744 for (txq_id = 0; txq_id < priv->hw_params.max_txq_num; txq_id++)
745 if (!test_and_set_bit(txq_id, &priv->txq_ctx_active_msk))
746 return txq_id;
747 return -1;
748}
749
750/**
751 * iwl4965_tx_queue_stop_scheduler - Stop queue, but keep configuration
752 */
753static void iwl4965_tx_queue_stop_scheduler(struct iwl_priv *priv,
754 u16 txq_id)
755{
756 /* Simply stop the queue, but don't change any configuration;
757 * the SCD_ACT_EN bit is the write-enable mask for the ACTIVE bit. */
758 iwl_legacy_write_prph(priv,
759 IWL49_SCD_QUEUE_STATUS_BITS(txq_id),
760 (0 << IWL49_SCD_QUEUE_STTS_REG_POS_ACTIVE)|
761 (1 << IWL49_SCD_QUEUE_STTS_REG_POS_SCD_ACT_EN));
762}
763
764/**
765 * iwl4965_tx_queue_set_q2ratid - Map unique receiver/tid combination to a queue
766 */
767static int iwl4965_tx_queue_set_q2ratid(struct iwl_priv *priv, u16 ra_tid,
768 u16 txq_id)
769{
770 u32 tbl_dw_addr;
771 u32 tbl_dw;
772 u16 scd_q2ratid;
773
774 scd_q2ratid = ra_tid & IWL_SCD_QUEUE_RA_TID_MAP_RATID_MSK;
775
776 tbl_dw_addr = priv->scd_base_addr +
777 IWL49_SCD_TRANSLATE_TBL_OFFSET_QUEUE(txq_id);
778
779 tbl_dw = iwl_legacy_read_targ_mem(priv, tbl_dw_addr);
780
781 if (txq_id & 0x1)
782 tbl_dw = (scd_q2ratid << 16) | (tbl_dw & 0x0000FFFF);
783 else
784 tbl_dw = scd_q2ratid | (tbl_dw & 0xFFFF0000);
785
786 iwl_legacy_write_targ_mem(priv, tbl_dw_addr, tbl_dw);
787
788 return 0;
789}
790
791/**
792 * iwl4965_tx_queue_agg_enable - Set up & enable aggregation for selected queue
793 *
794 * NOTE: txq_id must be greater than IWL49_FIRST_AMPDU_QUEUE,
795 * i.e. it must be one of the higher queues used for aggregation
796 */
797static int iwl4965_txq_agg_enable(struct iwl_priv *priv, int txq_id,
798 int tx_fifo, int sta_id, int tid, u16 ssn_idx)
799{
800 unsigned long flags;
801 u16 ra_tid;
802 int ret;
803
804 if ((IWL49_FIRST_AMPDU_QUEUE > txq_id) ||
805 (IWL49_FIRST_AMPDU_QUEUE +
806 priv->cfg->base_params->num_of_ampdu_queues <= txq_id)) {
807 IWL_WARN(priv,
808 "queue number out of range: %d, must be %d to %d\n",
809 txq_id, IWL49_FIRST_AMPDU_QUEUE,
810 IWL49_FIRST_AMPDU_QUEUE +
811 priv->cfg->base_params->num_of_ampdu_queues - 1);
812 return -EINVAL;
813 }
814
815 ra_tid = BUILD_RAxTID(sta_id, tid);
816
817 /* Modify device's station table to Tx this TID */
818 ret = iwl4965_sta_tx_modify_enable_tid(priv, sta_id, tid);
819 if (ret)
820 return ret;
821
822 spin_lock_irqsave(&priv->lock, flags);
823
824 /* Stop this Tx queue before configuring it */
825 iwl4965_tx_queue_stop_scheduler(priv, txq_id);
826
827 /* Map receiver-address / traffic-ID to this queue */
828 iwl4965_tx_queue_set_q2ratid(priv, ra_tid, txq_id);
829
830 /* Set this queue as a chain-building queue */
831 iwl_legacy_set_bits_prph(priv, IWL49_SCD_QUEUECHAIN_SEL, (1 << txq_id));
832
833 /* Place first TFD at index corresponding to start sequence number.
834 * Assumes that ssn_idx is valid (!= 0xFFF) */
835 priv->txq[txq_id].q.read_ptr = (ssn_idx & 0xff);
836 priv->txq[txq_id].q.write_ptr = (ssn_idx & 0xff);
837 iwl4965_set_wr_ptrs(priv, txq_id, ssn_idx);
838
839 /* Set up Tx window size and frame limit for this queue */
840 iwl_legacy_write_targ_mem(priv,
841 priv->scd_base_addr + IWL49_SCD_CONTEXT_QUEUE_OFFSET(txq_id),
842 (SCD_WIN_SIZE << IWL49_SCD_QUEUE_CTX_REG1_WIN_SIZE_POS) &
843 IWL49_SCD_QUEUE_CTX_REG1_WIN_SIZE_MSK);
844
845 iwl_legacy_write_targ_mem(priv, priv->scd_base_addr +
846 IWL49_SCD_CONTEXT_QUEUE_OFFSET(txq_id) + sizeof(u32),
847 (SCD_FRAME_LIMIT << IWL49_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS)
848 & IWL49_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK);
849
850 iwl_legacy_set_bits_prph(priv, IWL49_SCD_INTERRUPT_MASK, (1 << txq_id));
851
852 /* Set up Status area in SRAM, map to Tx DMA/FIFO, activate the queue */
853 iwl4965_tx_queue_set_status(priv, &priv->txq[txq_id], tx_fifo, 1);
854
855 spin_unlock_irqrestore(&priv->lock, flags);
856
857 return 0;
858}
859
860
861int iwl4965_tx_agg_start(struct iwl_priv *priv, struct ieee80211_vif *vif,
862 struct ieee80211_sta *sta, u16 tid, u16 *ssn)
863{
864 int sta_id;
865 int tx_fifo;
866 int txq_id;
867 int ret;
868 unsigned long flags;
869 struct iwl_tid_data *tid_data;
870
871 tx_fifo = iwl4965_get_fifo_from_tid(iwl_legacy_rxon_ctx_from_vif(vif), tid);
872 if (unlikely(tx_fifo < 0))
873 return tx_fifo;
874
875 IWL_WARN(priv, "%s on ra = %pM tid = %d\n",
876 __func__, sta->addr, tid);
877
878 sta_id = iwl_legacy_sta_id(sta);
879 if (sta_id == IWL_INVALID_STATION) {
880 IWL_ERR(priv, "Start AGG on invalid station\n");
881 return -ENXIO;
882 }
883 if (unlikely(tid >= MAX_TID_COUNT))
884 return -EINVAL;
885
886 if (priv->stations[sta_id].tid[tid].agg.state != IWL_AGG_OFF) {
887 IWL_ERR(priv, "Start AGG when state is not IWL_AGG_OFF !\n");
888 return -ENXIO;
889 }
890
891 txq_id = iwl4965_txq_ctx_activate_free(priv);
892 if (txq_id == -1) {
893 IWL_ERR(priv, "No free aggregation queue available\n");
894 return -ENXIO;
895 }
896
897 spin_lock_irqsave(&priv->sta_lock, flags);
898 tid_data = &priv->stations[sta_id].tid[tid];
899 *ssn = SEQ_TO_SN(tid_data->seq_number);
900 tid_data->agg.txq_id = txq_id;
901 iwl_legacy_set_swq_id(&priv->txq[txq_id],
902 iwl4965_get_ac_from_tid(tid), txq_id);
903 spin_unlock_irqrestore(&priv->sta_lock, flags);
904
905 ret = iwl4965_txq_agg_enable(priv, txq_id, tx_fifo,
906 sta_id, tid, *ssn);
907 if (ret)
908 return ret;
909
910 spin_lock_irqsave(&priv->sta_lock, flags);
911 tid_data = &priv->stations[sta_id].tid[tid];
912 if (tid_data->tfds_in_queue == 0) {
913 IWL_DEBUG_HT(priv, "HW queue is empty\n");
914 tid_data->agg.state = IWL_AGG_ON;
915 ieee80211_start_tx_ba_cb_irqsafe(vif, sta->addr, tid);
916 } else {
917 IWL_DEBUG_HT(priv,
918 "HW queue is NOT empty: %d packets in HW queue\n",
919 tid_data->tfds_in_queue);
920 tid_data->agg.state = IWL_EMPTYING_HW_QUEUE_ADDBA;
921 }
922 spin_unlock_irqrestore(&priv->sta_lock, flags);
923 return ret;
924}
925
926/**
927 * txq_id must be greater than IWL49_FIRST_AMPDU_QUEUE
928 * priv->lock must be held by the caller
929 */
930static int iwl4965_txq_agg_disable(struct iwl_priv *priv, u16 txq_id,
931 u16 ssn_idx, u8 tx_fifo)
932{
933 if ((IWL49_FIRST_AMPDU_QUEUE > txq_id) ||
934 (IWL49_FIRST_AMPDU_QUEUE +
935 priv->cfg->base_params->num_of_ampdu_queues <= txq_id)) {
936 IWL_WARN(priv,
937 "queue number out of range: %d, must be %d to %d\n",
938 txq_id, IWL49_FIRST_AMPDU_QUEUE,
939 IWL49_FIRST_AMPDU_QUEUE +
940 priv->cfg->base_params->num_of_ampdu_queues - 1);
941 return -EINVAL;
942 }
943
944 iwl4965_tx_queue_stop_scheduler(priv, txq_id);
945
946 iwl_legacy_clear_bits_prph(priv,
947 IWL49_SCD_QUEUECHAIN_SEL, (1 << txq_id));
948
949 priv->txq[txq_id].q.read_ptr = (ssn_idx & 0xff);
950 priv->txq[txq_id].q.write_ptr = (ssn_idx & 0xff);
951 /* supposes that ssn_idx is valid (!= 0xFFF) */
952 iwl4965_set_wr_ptrs(priv, txq_id, ssn_idx);
953
954 iwl_legacy_clear_bits_prph(priv,
955 IWL49_SCD_INTERRUPT_MASK, (1 << txq_id));
956 iwl_txq_ctx_deactivate(priv, txq_id);
957 iwl4965_tx_queue_set_status(priv, &priv->txq[txq_id], tx_fifo, 0);
958
959 return 0;
960}
961
962int iwl4965_tx_agg_stop(struct iwl_priv *priv, struct ieee80211_vif *vif,
963 struct ieee80211_sta *sta, u16 tid)
964{
965 int tx_fifo_id, txq_id, sta_id, ssn;
966 struct iwl_tid_data *tid_data;
967 int write_ptr, read_ptr;
968 unsigned long flags;
969
970 tx_fifo_id = iwl4965_get_fifo_from_tid(iwl_legacy_rxon_ctx_from_vif(vif), tid);
971 if (unlikely(tx_fifo_id < 0))
972 return tx_fifo_id;
973
974 sta_id = iwl_legacy_sta_id(sta);
975
976 if (sta_id == IWL_INVALID_STATION) {
977 IWL_ERR(priv, "Invalid station for AGG tid %d\n", tid);
978 return -ENXIO;
979 }
980
981 spin_lock_irqsave(&priv->sta_lock, flags);
982
983 tid_data = &priv->stations[sta_id].tid[tid];
984 ssn = (tid_data->seq_number & IEEE80211_SCTL_SEQ) >> 4;
985 txq_id = tid_data->agg.txq_id;
986
987 switch (priv->stations[sta_id].tid[tid].agg.state) {
988 case IWL_EMPTYING_HW_QUEUE_ADDBA:
989 /*
990 * This can happen if the peer stops aggregation
991 * again before we've had a chance to drain the
992 * queue we selected previously, i.e. before the
993 * session was really started completely.
994 */
995 IWL_DEBUG_HT(priv, "AGG stop before setup done\n");
996 goto turn_off;
997 case IWL_AGG_ON:
998 break;
999 default:
1000 IWL_WARN(priv, "Stopping AGG while state not ON or starting\n");
1001 }
1002
1003 write_ptr = priv->txq[txq_id].q.write_ptr;
1004 read_ptr = priv->txq[txq_id].q.read_ptr;
1005
1006 /* The queue is not empty */
1007 if (write_ptr != read_ptr) {
1008 IWL_DEBUG_HT(priv, "Stopping a non empty AGG HW QUEUE\n");
1009 priv->stations[sta_id].tid[tid].agg.state =
1010 IWL_EMPTYING_HW_QUEUE_DELBA;
1011 spin_unlock_irqrestore(&priv->sta_lock, flags);
1012 return 0;
1013 }
1014
1015 IWL_DEBUG_HT(priv, "HW queue is empty\n");
1016 turn_off:
1017 priv->stations[sta_id].tid[tid].agg.state = IWL_AGG_OFF;
1018
1019 /* do not restore/save irqs */
1020 spin_unlock(&priv->sta_lock);
1021 spin_lock(&priv->lock);
1022
1023 /*
1024 * the only reason this call can fail is queue number out of range,
1025 * which can happen if uCode is reloaded and all the station
1026 * information are lost. if it is outside the range, there is no need
1027 * to deactivate the uCode queue, just return "success" to allow
1028 * mac80211 to clean up it own data.
1029 */
1030 iwl4965_txq_agg_disable(priv, txq_id, ssn, tx_fifo_id);
1031 spin_unlock_irqrestore(&priv->lock, flags);
1032
1033 ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid);
1034
1035 return 0;
1036}
1037
1038int iwl4965_txq_check_empty(struct iwl_priv *priv,
1039 int sta_id, u8 tid, int txq_id)
1040{
1041 struct iwl_queue *q = &priv->txq[txq_id].q;
1042 u8 *addr = priv->stations[sta_id].sta.sta.addr;
1043 struct iwl_tid_data *tid_data = &priv->stations[sta_id].tid[tid];
1044 struct iwl_rxon_context *ctx;
1045
1046 ctx = &priv->contexts[priv->stations[sta_id].ctxid];
1047
1048 lockdep_assert_held(&priv->sta_lock);
1049
1050 switch (priv->stations[sta_id].tid[tid].agg.state) {
1051 case IWL_EMPTYING_HW_QUEUE_DELBA:
1052 /* We are reclaiming the last packet of the */
1053 /* aggregated HW queue */
1054 if ((txq_id == tid_data->agg.txq_id) &&
1055 (q->read_ptr == q->write_ptr)) {
1056 u16 ssn = SEQ_TO_SN(tid_data->seq_number);
1057 int tx_fifo = iwl4965_get_fifo_from_tid(ctx, tid);
1058 IWL_DEBUG_HT(priv,
1059 "HW queue empty: continue DELBA flow\n");
1060 iwl4965_txq_agg_disable(priv, txq_id, ssn, tx_fifo);
1061 tid_data->agg.state = IWL_AGG_OFF;
1062 ieee80211_stop_tx_ba_cb_irqsafe(ctx->vif, addr, tid);
1063 }
1064 break;
1065 case IWL_EMPTYING_HW_QUEUE_ADDBA:
1066 /* We are reclaiming the last packet of the queue */
1067 if (tid_data->tfds_in_queue == 0) {
1068 IWL_DEBUG_HT(priv,
1069 "HW queue empty: continue ADDBA flow\n");
1070 tid_data->agg.state = IWL_AGG_ON;
1071 ieee80211_start_tx_ba_cb_irqsafe(ctx->vif, addr, tid);
1072 }
1073 break;
1074 }
1075
1076 return 0;
1077}
1078
1079static void iwl4965_non_agg_tx_status(struct iwl_priv *priv,
1080 struct iwl_rxon_context *ctx,
1081 const u8 *addr1)
1082{
1083 struct ieee80211_sta *sta;
1084 struct iwl_station_priv *sta_priv;
1085
1086 rcu_read_lock();
1087 sta = ieee80211_find_sta(ctx->vif, addr1);
1088 if (sta) {
1089 sta_priv = (void *)sta->drv_priv;
1090 /* avoid atomic ops if this isn't a client */
1091 if (sta_priv->client &&
1092 atomic_dec_return(&sta_priv->pending_frames) == 0)
1093 ieee80211_sta_block_awake(priv->hw, sta, false);
1094 }
1095 rcu_read_unlock();
1096}
1097
1098static void
1099iwl4965_tx_status(struct iwl_priv *priv, struct iwl_tx_info *tx_info,
1100 bool is_agg)
1101{
1102 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) tx_info->skb->data;
1103
1104 if (!is_agg)
1105 iwl4965_non_agg_tx_status(priv, tx_info->ctx, hdr->addr1);
1106
1107 ieee80211_tx_status_irqsafe(priv->hw, tx_info->skb);
1108}
1109
1110int iwl4965_tx_queue_reclaim(struct iwl_priv *priv, int txq_id, int index)
1111{
1112 struct iwl_tx_queue *txq = &priv->txq[txq_id];
1113 struct iwl_queue *q = &txq->q;
1114 struct iwl_tx_info *tx_info;
1115 int nfreed = 0;
1116 struct ieee80211_hdr *hdr;
1117
1118 if ((index >= q->n_bd) || (iwl_legacy_queue_used(q, index) == 0)) {
1119 IWL_ERR(priv, "Read index for DMA queue txq id (%d), index %d, "
1120 "is out of range [0-%d] %d %d.\n", txq_id,
1121 index, q->n_bd, q->write_ptr, q->read_ptr);
1122 return 0;
1123 }
1124
1125 for (index = iwl_legacy_queue_inc_wrap(index, q->n_bd);
1126 q->read_ptr != index;
1127 q->read_ptr = iwl_legacy_queue_inc_wrap(q->read_ptr, q->n_bd)) {
1128
1129 tx_info = &txq->txb[txq->q.read_ptr];
1130 iwl4965_tx_status(priv, tx_info,
1131 txq_id >= IWL4965_FIRST_AMPDU_QUEUE);
1132
1133 hdr = (struct ieee80211_hdr *)tx_info->skb->data;
1134 if (hdr && ieee80211_is_data_qos(hdr->frame_control))
1135 nfreed++;
1136 tx_info->skb = NULL;
1137
1138 priv->cfg->ops->lib->txq_free_tfd(priv, txq);
1139 }
1140 return nfreed;
1141}
1142
1143/**
1144 * iwl4965_tx_status_reply_compressed_ba - Update tx status from block-ack
1145 *
1146 * Go through block-ack's bitmap of ACK'd frames, update driver's record of
1147 * ACK vs. not. This gets sent to mac80211, then to rate scaling algo.
1148 */
1149static int iwl4965_tx_status_reply_compressed_ba(struct iwl_priv *priv,
1150 struct iwl_ht_agg *agg,
1151 struct iwl_compressed_ba_resp *ba_resp)
1152
1153{
1154 int i, sh, ack;
1155 u16 seq_ctl = le16_to_cpu(ba_resp->seq_ctl);
1156 u16 scd_flow = le16_to_cpu(ba_resp->scd_flow);
1157 int successes = 0;
1158 struct ieee80211_tx_info *info;
1159 u64 bitmap, sent_bitmap;
1160
1161 if (unlikely(!agg->wait_for_ba)) {
1162 if (unlikely(ba_resp->bitmap))
1163 IWL_ERR(priv, "Received BA when not expected\n");
1164 return -EINVAL;
1165 }
1166
1167 /* Mark that the expected block-ack response arrived */
1168 agg->wait_for_ba = 0;
1169 IWL_DEBUG_TX_REPLY(priv, "BA %d %d\n", agg->start_idx,
1170 ba_resp->seq_ctl);
1171
1172 /* Calculate shift to align block-ack bits with our Tx window bits */
1173 sh = agg->start_idx - SEQ_TO_INDEX(seq_ctl >> 4);
1174 if (sh < 0) /* tbw something is wrong with indices */
1175 sh += 0x100;
1176
1177 if (agg->frame_count > (64 - sh)) {
1178 IWL_DEBUG_TX_REPLY(priv, "more frames than bitmap size");
1179 return -1;
1180 }
1181
1182 /* don't use 64-bit values for now */
1183 bitmap = le64_to_cpu(ba_resp->bitmap) >> sh;
1184
1185 /* check for success or failure according to the
1186 * transmitted bitmap and block-ack bitmap */
1187 sent_bitmap = bitmap & agg->bitmap;
1188
1189 /* For each frame attempted in aggregation,
1190 * update driver's record of tx frame's status. */
1191 i = 0;
1192 while (sent_bitmap) {
1193 ack = sent_bitmap & 1ULL;
1194 successes += ack;
1195 IWL_DEBUG_TX_REPLY(priv, "%s ON i=%d idx=%d raw=%d\n",
1196 ack ? "ACK" : "NACK", i,
1197 (agg->start_idx + i) & 0xff,
1198 agg->start_idx + i);
1199 sent_bitmap >>= 1;
1200 ++i;
1201 }
1202
1203 IWL_DEBUG_TX_REPLY(priv, "Bitmap %llx\n",
1204 (unsigned long long)bitmap);
1205
1206 info = IEEE80211_SKB_CB(priv->txq[scd_flow].txb[agg->start_idx].skb);
1207 memset(&info->status, 0, sizeof(info->status));
1208 info->flags |= IEEE80211_TX_STAT_ACK;
1209 info->flags |= IEEE80211_TX_STAT_AMPDU;
1210 info->status.ampdu_ack_len = successes;
1211 info->status.ampdu_len = agg->frame_count;
1212 iwl4965_hwrate_to_tx_control(priv, agg->rate_n_flags, info);
1213
1214 return 0;
1215}
1216
1217/**
1218 * translate ucode response to mac80211 tx status control values
1219 */
1220void iwl4965_hwrate_to_tx_control(struct iwl_priv *priv, u32 rate_n_flags,
1221 struct ieee80211_tx_info *info)
1222{
1223 struct ieee80211_tx_rate *r = &info->control.rates[0];
1224
1225 info->antenna_sel_tx =
1226 ((rate_n_flags & RATE_MCS_ANT_ABC_MSK) >> RATE_MCS_ANT_POS);
1227 if (rate_n_flags & RATE_MCS_HT_MSK)
1228 r->flags |= IEEE80211_TX_RC_MCS;
1229 if (rate_n_flags & RATE_MCS_GF_MSK)
1230 r->flags |= IEEE80211_TX_RC_GREEN_FIELD;
1231 if (rate_n_flags & RATE_MCS_HT40_MSK)
1232 r->flags |= IEEE80211_TX_RC_40_MHZ_WIDTH;
1233 if (rate_n_flags & RATE_MCS_DUP_MSK)
1234 r->flags |= IEEE80211_TX_RC_DUP_DATA;
1235 if (rate_n_flags & RATE_MCS_SGI_MSK)
1236 r->flags |= IEEE80211_TX_RC_SHORT_GI;
1237 r->idx = iwl4965_hwrate_to_mac80211_idx(rate_n_flags, info->band);
1238}
1239
1240/**
1241 * iwl4965_rx_reply_compressed_ba - Handler for REPLY_COMPRESSED_BA
1242 *
1243 * Handles block-acknowledge notification from device, which reports success
1244 * of frames sent via aggregation.
1245 */
1246void iwl4965_rx_reply_compressed_ba(struct iwl_priv *priv,
1247 struct iwl_rx_mem_buffer *rxb)
1248{
1249 struct iwl_rx_packet *pkt = rxb_addr(rxb);
1250 struct iwl_compressed_ba_resp *ba_resp = &pkt->u.compressed_ba;
1251 struct iwl_tx_queue *txq = NULL;
1252 struct iwl_ht_agg *agg;
1253 int index;
1254 int sta_id;
1255 int tid;
1256 unsigned long flags;
1257
1258 /* "flow" corresponds to Tx queue */
1259 u16 scd_flow = le16_to_cpu(ba_resp->scd_flow);
1260
1261 /* "ssn" is start of block-ack Tx window, corresponds to index
1262 * (in Tx queue's circular buffer) of first TFD/frame in window */
1263 u16 ba_resp_scd_ssn = le16_to_cpu(ba_resp->scd_ssn);
1264
1265 if (scd_flow >= priv->hw_params.max_txq_num) {
1266 IWL_ERR(priv,
1267 "BUG_ON scd_flow is bigger than number of queues\n");
1268 return;
1269 }
1270
1271 txq = &priv->txq[scd_flow];
1272 sta_id = ba_resp->sta_id;
1273 tid = ba_resp->tid;
1274 agg = &priv->stations[sta_id].tid[tid].agg;
1275 if (unlikely(agg->txq_id != scd_flow)) {
1276 /*
1277 * FIXME: this is a uCode bug which need to be addressed,
1278 * log the information and return for now!
1279 * since it is possible happen very often and in order
1280 * not to fill the syslog, don't enable the logging by default
1281 */
1282 IWL_DEBUG_TX_REPLY(priv,
1283 "BA scd_flow %d does not match txq_id %d\n",
1284 scd_flow, agg->txq_id);
1285 return;
1286 }
1287
1288 /* Find index just before block-ack window */
1289 index = iwl_legacy_queue_dec_wrap(ba_resp_scd_ssn & 0xff, txq->q.n_bd);
1290
1291 spin_lock_irqsave(&priv->sta_lock, flags);
1292
1293 IWL_DEBUG_TX_REPLY(priv, "REPLY_COMPRESSED_BA [%d] Received from %pM, "
1294 "sta_id = %d\n",
1295 agg->wait_for_ba,
1296 (u8 *) &ba_resp->sta_addr_lo32,
1297 ba_resp->sta_id);
1298 IWL_DEBUG_TX_REPLY(priv, "TID = %d, SeqCtl = %d, bitmap = 0x%llx,"
1299 "scd_flow = "
1300 "%d, scd_ssn = %d\n",
1301 ba_resp->tid,
1302 ba_resp->seq_ctl,
1303 (unsigned long long)le64_to_cpu(ba_resp->bitmap),
1304 ba_resp->scd_flow,
1305 ba_resp->scd_ssn);
1306 IWL_DEBUG_TX_REPLY(priv, "DAT start_idx = %d, bitmap = 0x%llx\n",
1307 agg->start_idx,
1308 (unsigned long long)agg->bitmap);
1309
1310 /* Update driver's record of ACK vs. not for each frame in window */
1311 iwl4965_tx_status_reply_compressed_ba(priv, agg, ba_resp);
1312
1313 /* Release all TFDs before the SSN, i.e. all TFDs in front of
1314 * block-ack window (we assume that they've been successfully
1315 * transmitted ... if not, it's too late anyway). */
1316 if (txq->q.read_ptr != (ba_resp_scd_ssn & 0xff)) {
1317 /* calculate mac80211 ampdu sw queue to wake */
1318 int freed = iwl4965_tx_queue_reclaim(priv, scd_flow, index);
1319 iwl4965_free_tfds_in_queue(priv, sta_id, tid, freed);
1320
1321 if ((iwl_legacy_queue_space(&txq->q) > txq->q.low_mark) &&
1322 priv->mac80211_registered &&
1323 (agg->state != IWL_EMPTYING_HW_QUEUE_DELBA))
1324 iwl_legacy_wake_queue(priv, txq);
1325
1326 iwl4965_txq_check_empty(priv, sta_id, tid, scd_flow);
1327 }
1328
1329 spin_unlock_irqrestore(&priv->sta_lock, flags);
1330}
1331
1332#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
1333const char *iwl4965_get_tx_fail_reason(u32 status)
1334{
1335#define TX_STATUS_FAIL(x) case TX_STATUS_FAIL_ ## x: return #x
1336#define TX_STATUS_POSTPONE(x) case TX_STATUS_POSTPONE_ ## x: return #x
1337
1338 switch (status & TX_STATUS_MSK) {
1339 case TX_STATUS_SUCCESS:
1340 return "SUCCESS";
1341 TX_STATUS_POSTPONE(DELAY);
1342 TX_STATUS_POSTPONE(FEW_BYTES);
1343 TX_STATUS_POSTPONE(QUIET_PERIOD);
1344 TX_STATUS_POSTPONE(CALC_TTAK);
1345 TX_STATUS_FAIL(INTERNAL_CROSSED_RETRY);
1346 TX_STATUS_FAIL(SHORT_LIMIT);
1347 TX_STATUS_FAIL(LONG_LIMIT);
1348 TX_STATUS_FAIL(FIFO_UNDERRUN);
1349 TX_STATUS_FAIL(DRAIN_FLOW);
1350 TX_STATUS_FAIL(RFKILL_FLUSH);
1351 TX_STATUS_FAIL(LIFE_EXPIRE);
1352 TX_STATUS_FAIL(DEST_PS);
1353 TX_STATUS_FAIL(HOST_ABORTED);
1354 TX_STATUS_FAIL(BT_RETRY);
1355 TX_STATUS_FAIL(STA_INVALID);
1356 TX_STATUS_FAIL(FRAG_DROPPED);
1357 TX_STATUS_FAIL(TID_DISABLE);
1358 TX_STATUS_FAIL(FIFO_FLUSHED);
1359 TX_STATUS_FAIL(INSUFFICIENT_CF_POLL);
1360 TX_STATUS_FAIL(PASSIVE_NO_RX);
1361 TX_STATUS_FAIL(NO_BEACON_ON_RADAR);
1362 }
1363
1364 return "UNKNOWN";
1365
1366#undef TX_STATUS_FAIL
1367#undef TX_STATUS_POSTPONE
1368}
1369#endif /* CONFIG_IWLWIFI_LEGACY_DEBUG */
diff --git a/drivers/net/wireless/iwlegacy/iwl-4965-ucode.c b/drivers/net/wireless/iwlegacy/iwl-4965-ucode.c
new file mode 100644
index 000000000000..001d148feb94
--- /dev/null
+++ b/drivers/net/wireless/iwlegacy/iwl-4965-ucode.c
@@ -0,0 +1,166 @@
1/******************************************************************************
2 *
3 * GPL LICENSE SUMMARY
4 *
5 * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of version 2 of the GNU General Public License as
9 * published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful, but
12 * WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
19 * USA
20 *
21 * The full GNU General Public License is included in this distribution
22 * in the file called LICENSE.GPL.
23 *
24 * Contact Information:
25 * Intel Linux Wireless <ilw@linux.intel.com>
26 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
27 *
28 *****************************************************************************/
29
30#include <linux/kernel.h>
31#include <linux/module.h>
32#include <linux/init.h>
33#include <linux/sched.h>
34
35#include "iwl-dev.h"
36#include "iwl-core.h"
37#include "iwl-io.h"
38#include "iwl-helpers.h"
39#include "iwl-4965-hw.h"
40#include "iwl-4965.h"
41#include "iwl-4965-calib.h"
42
43#define IWL_AC_UNSET -1
44
45/**
46 * iwl_verify_inst_sparse - verify runtime uCode image in card vs. host,
47 * using sample data 100 bytes apart. If these sample points are good,
48 * it's a pretty good bet that everything between them is good, too.
49 */
50static int
51iwl4965_verify_inst_sparse(struct iwl_priv *priv, __le32 *image, u32 len)
52{
53 u32 val;
54 int ret = 0;
55 u32 errcnt = 0;
56 u32 i;
57
58 IWL_DEBUG_INFO(priv, "ucode inst image size is %u\n", len);
59
60 for (i = 0; i < len; i += 100, image += 100/sizeof(u32)) {
61 /* read data comes through single port, auto-incr addr */
62 /* NOTE: Use the debugless read so we don't flood kernel log
63 * if IWL_DL_IO is set */
64 iwl_legacy_write_direct32(priv, HBUS_TARG_MEM_RADDR,
65 i + IWL4965_RTC_INST_LOWER_BOUND);
66 val = _iwl_legacy_read_direct32(priv, HBUS_TARG_MEM_RDAT);
67 if (val != le32_to_cpu(*image)) {
68 ret = -EIO;
69 errcnt++;
70 if (errcnt >= 3)
71 break;
72 }
73 }
74
75 return ret;
76}
77
78/**
79 * iwl4965_verify_inst_full - verify runtime uCode image in card vs. host,
80 * looking at all data.
81 */
82static int iwl4965_verify_inst_full(struct iwl_priv *priv, __le32 *image,
83 u32 len)
84{
85 u32 val;
86 u32 save_len = len;
87 int ret = 0;
88 u32 errcnt;
89
90 IWL_DEBUG_INFO(priv, "ucode inst image size is %u\n", len);
91
92 iwl_legacy_write_direct32(priv, HBUS_TARG_MEM_RADDR,
93 IWL4965_RTC_INST_LOWER_BOUND);
94
95 errcnt = 0;
96 for (; len > 0; len -= sizeof(u32), image++) {
97 /* read data comes through single port, auto-incr addr */
98 /* NOTE: Use the debugless read so we don't flood kernel log
99 * if IWL_DL_IO is set */
100 val = _iwl_legacy_read_direct32(priv, HBUS_TARG_MEM_RDAT);
101 if (val != le32_to_cpu(*image)) {
102 IWL_ERR(priv, "uCode INST section is invalid at "
103 "offset 0x%x, is 0x%x, s/b 0x%x\n",
104 save_len - len, val, le32_to_cpu(*image));
105 ret = -EIO;
106 errcnt++;
107 if (errcnt >= 20)
108 break;
109 }
110 }
111
112 if (!errcnt)
113 IWL_DEBUG_INFO(priv,
114 "ucode image in INSTRUCTION memory is good\n");
115
116 return ret;
117}
118
119/**
120 * iwl4965_verify_ucode - determine which instruction image is in SRAM,
121 * and verify its contents
122 */
123int iwl4965_verify_ucode(struct iwl_priv *priv)
124{
125 __le32 *image;
126 u32 len;
127 int ret;
128
129 /* Try bootstrap */
130 image = (__le32 *)priv->ucode_boot.v_addr;
131 len = priv->ucode_boot.len;
132 ret = iwl4965_verify_inst_sparse(priv, image, len);
133 if (!ret) {
134 IWL_DEBUG_INFO(priv, "Bootstrap uCode is good in inst SRAM\n");
135 return 0;
136 }
137
138 /* Try initialize */
139 image = (__le32 *)priv->ucode_init.v_addr;
140 len = priv->ucode_init.len;
141 ret = iwl4965_verify_inst_sparse(priv, image, len);
142 if (!ret) {
143 IWL_DEBUG_INFO(priv, "Initialize uCode is good in inst SRAM\n");
144 return 0;
145 }
146
147 /* Try runtime/protocol */
148 image = (__le32 *)priv->ucode_code.v_addr;
149 len = priv->ucode_code.len;
150 ret = iwl4965_verify_inst_sparse(priv, image, len);
151 if (!ret) {
152 IWL_DEBUG_INFO(priv, "Runtime uCode is good in inst SRAM\n");
153 return 0;
154 }
155
156 IWL_ERR(priv, "NO VALID UCODE IMAGE IN INSTRUCTION SRAM!!\n");
157
158 /* Since nothing seems to match, show first several data entries in
159 * instruction SRAM, so maybe visual inspection will give a clue.
160 * Selection of bootstrap image (vs. other images) is arbitrary. */
161 image = (__le32 *)priv->ucode_boot.v_addr;
162 len = priv->ucode_boot.len;
163 ret = iwl4965_verify_inst_full(priv, image, len);
164
165 return ret;
166}
diff --git a/drivers/net/wireless/iwlegacy/iwl-4965.c b/drivers/net/wireless/iwlegacy/iwl-4965.c
new file mode 100644
index 000000000000..f5433c74b845
--- /dev/null
+++ b/drivers/net/wireless/iwlegacy/iwl-4965.c
@@ -0,0 +1,2188 @@
1/******************************************************************************
2 *
3 * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of version 2 of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
17 *
18 * The full GNU General Public License is included in this distribution in the
19 * file called LICENSE.
20 *
21 * Contact Information:
22 * Intel Linux Wireless <ilw@linux.intel.com>
23 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
24 *
25 *****************************************************************************/
26
27#include <linux/kernel.h>
28#include <linux/module.h>
29#include <linux/init.h>
30#include <linux/pci.h>
31#include <linux/dma-mapping.h>
32#include <linux/delay.h>
33#include <linux/sched.h>
34#include <linux/skbuff.h>
35#include <linux/netdevice.h>
36#include <linux/wireless.h>
37#include <net/mac80211.h>
38#include <linux/etherdevice.h>
39#include <asm/unaligned.h>
40
41#include "iwl-eeprom.h"
42#include "iwl-dev.h"
43#include "iwl-core.h"
44#include "iwl-io.h"
45#include "iwl-helpers.h"
46#include "iwl-4965-calib.h"
47#include "iwl-sta.h"
48#include "iwl-4965-led.h"
49#include "iwl-4965.h"
50#include "iwl-4965-debugfs.h"
51
52static int iwl4965_send_tx_power(struct iwl_priv *priv);
53static int iwl4965_hw_get_temperature(struct iwl_priv *priv);
54
55/* Highest firmware API version supported */
56#define IWL4965_UCODE_API_MAX 2
57
58/* Lowest firmware API version supported */
59#define IWL4965_UCODE_API_MIN 2
60
61#define IWL4965_FW_PRE "iwlwifi-4965-"
62#define _IWL4965_MODULE_FIRMWARE(api) IWL4965_FW_PRE #api ".ucode"
63#define IWL4965_MODULE_FIRMWARE(api) _IWL4965_MODULE_FIRMWARE(api)
64
65/* check contents of special bootstrap uCode SRAM */
66static int iwl4965_verify_bsm(struct iwl_priv *priv)
67{
68 __le32 *image = priv->ucode_boot.v_addr;
69 u32 len = priv->ucode_boot.len;
70 u32 reg;
71 u32 val;
72
73 IWL_DEBUG_INFO(priv, "Begin verify bsm\n");
74
75 /* verify BSM SRAM contents */
76 val = iwl_legacy_read_prph(priv, BSM_WR_DWCOUNT_REG);
77 for (reg = BSM_SRAM_LOWER_BOUND;
78 reg < BSM_SRAM_LOWER_BOUND + len;
79 reg += sizeof(u32), image++) {
80 val = iwl_legacy_read_prph(priv, reg);
81 if (val != le32_to_cpu(*image)) {
82 IWL_ERR(priv, "BSM uCode verification failed at "
83 "addr 0x%08X+%u (of %u), is 0x%x, s/b 0x%x\n",
84 BSM_SRAM_LOWER_BOUND,
85 reg - BSM_SRAM_LOWER_BOUND, len,
86 val, le32_to_cpu(*image));
87 return -EIO;
88 }
89 }
90
91 IWL_DEBUG_INFO(priv, "BSM bootstrap uCode image OK\n");
92
93 return 0;
94}
95
96/**
97 * iwl4965_load_bsm - Load bootstrap instructions
98 *
99 * BSM operation:
100 *
101 * The Bootstrap State Machine (BSM) stores a short bootstrap uCode program
102 * in special SRAM that does not power down during RFKILL. When powering back
103 * up after power-saving sleeps (or during initial uCode load), the BSM loads
104 * the bootstrap program into the on-board processor, and starts it.
105 *
106 * The bootstrap program loads (via DMA) instructions and data for a new
107 * program from host DRAM locations indicated by the host driver in the
108 * BSM_DRAM_* registers. Once the new program is loaded, it starts
109 * automatically.
110 *
111 * When initializing the NIC, the host driver points the BSM to the
112 * "initialize" uCode image. This uCode sets up some internal data, then
113 * notifies host via "initialize alive" that it is complete.
114 *
115 * The host then replaces the BSM_DRAM_* pointer values to point to the
116 * normal runtime uCode instructions and a backup uCode data cache buffer
117 * (filled initially with starting data values for the on-board processor),
118 * then triggers the "initialize" uCode to load and launch the runtime uCode,
119 * which begins normal operation.
120 *
121 * When doing a power-save shutdown, runtime uCode saves data SRAM into
122 * the backup data cache in DRAM before SRAM is powered down.
123 *
124 * When powering back up, the BSM loads the bootstrap program. This reloads
125 * the runtime uCode instructions and the backup data cache into SRAM,
126 * and re-launches the runtime uCode from where it left off.
127 */
128static int iwl4965_load_bsm(struct iwl_priv *priv)
129{
130 __le32 *image = priv->ucode_boot.v_addr;
131 u32 len = priv->ucode_boot.len;
132 dma_addr_t pinst;
133 dma_addr_t pdata;
134 u32 inst_len;
135 u32 data_len;
136 int i;
137 u32 done;
138 u32 reg_offset;
139 int ret;
140
141 IWL_DEBUG_INFO(priv, "Begin load bsm\n");
142
143 priv->ucode_type = UCODE_RT;
144
145 /* make sure bootstrap program is no larger than BSM's SRAM size */
146 if (len > IWL49_MAX_BSM_SIZE)
147 return -EINVAL;
148
149 /* Tell bootstrap uCode where to find the "Initialize" uCode
150 * in host DRAM ... host DRAM physical address bits 35:4 for 4965.
151 * NOTE: iwl_init_alive_start() will replace these values,
152 * after the "initialize" uCode has run, to point to
153 * runtime/protocol instructions and backup data cache.
154 */
155 pinst = priv->ucode_init.p_addr >> 4;
156 pdata = priv->ucode_init_data.p_addr >> 4;
157 inst_len = priv->ucode_init.len;
158 data_len = priv->ucode_init_data.len;
159
160 iwl_legacy_write_prph(priv, BSM_DRAM_INST_PTR_REG, pinst);
161 iwl_legacy_write_prph(priv, BSM_DRAM_DATA_PTR_REG, pdata);
162 iwl_legacy_write_prph(priv, BSM_DRAM_INST_BYTECOUNT_REG, inst_len);
163 iwl_legacy_write_prph(priv, BSM_DRAM_DATA_BYTECOUNT_REG, data_len);
164
165 /* Fill BSM memory with bootstrap instructions */
166 for (reg_offset = BSM_SRAM_LOWER_BOUND;
167 reg_offset < BSM_SRAM_LOWER_BOUND + len;
168 reg_offset += sizeof(u32), image++)
169 _iwl_legacy_write_prph(priv, reg_offset, le32_to_cpu(*image));
170
171 ret = iwl4965_verify_bsm(priv);
172 if (ret)
173 return ret;
174
175 /* Tell BSM to copy from BSM SRAM into instruction SRAM, when asked */
176 iwl_legacy_write_prph(priv, BSM_WR_MEM_SRC_REG, 0x0);
177 iwl_legacy_write_prph(priv,
178 BSM_WR_MEM_DST_REG, IWL49_RTC_INST_LOWER_BOUND);
179 iwl_legacy_write_prph(priv, BSM_WR_DWCOUNT_REG, len / sizeof(u32));
180
181 /* Load bootstrap code into instruction SRAM now,
182 * to prepare to load "initialize" uCode */
183 iwl_legacy_write_prph(priv, BSM_WR_CTRL_REG, BSM_WR_CTRL_REG_BIT_START);
184
185 /* Wait for load of bootstrap uCode to finish */
186 for (i = 0; i < 100; i++) {
187 done = iwl_legacy_read_prph(priv, BSM_WR_CTRL_REG);
188 if (!(done & BSM_WR_CTRL_REG_BIT_START))
189 break;
190 udelay(10);
191 }
192 if (i < 100)
193 IWL_DEBUG_INFO(priv, "BSM write complete, poll %d iterations\n", i);
194 else {
195 IWL_ERR(priv, "BSM write did not complete!\n");
196 return -EIO;
197 }
198
199 /* Enable future boot loads whenever power management unit triggers it
200 * (e.g. when powering back up after power-save shutdown) */
201 iwl_legacy_write_prph(priv,
202 BSM_WR_CTRL_REG, BSM_WR_CTRL_REG_BIT_START_EN);
203
204
205 return 0;
206}
207
208/**
209 * iwl4965_set_ucode_ptrs - Set uCode address location
210 *
211 * Tell initialization uCode where to find runtime uCode.
212 *
213 * BSM registers initially contain pointers to initialization uCode.
214 * We need to replace them to load runtime uCode inst and data,
215 * and to save runtime data when powering down.
216 */
217static int iwl4965_set_ucode_ptrs(struct iwl_priv *priv)
218{
219 dma_addr_t pinst;
220 dma_addr_t pdata;
221 int ret = 0;
222
223 /* bits 35:4 for 4965 */
224 pinst = priv->ucode_code.p_addr >> 4;
225 pdata = priv->ucode_data_backup.p_addr >> 4;
226
227 /* Tell bootstrap uCode where to find image to load */
228 iwl_legacy_write_prph(priv, BSM_DRAM_INST_PTR_REG, pinst);
229 iwl_legacy_write_prph(priv, BSM_DRAM_DATA_PTR_REG, pdata);
230 iwl_legacy_write_prph(priv, BSM_DRAM_DATA_BYTECOUNT_REG,
231 priv->ucode_data.len);
232
233 /* Inst byte count must be last to set up, bit 31 signals uCode
234 * that all new ptr/size info is in place */
235 iwl_legacy_write_prph(priv, BSM_DRAM_INST_BYTECOUNT_REG,
236 priv->ucode_code.len | BSM_DRAM_INST_LOAD);
237 IWL_DEBUG_INFO(priv, "Runtime uCode pointers are set.\n");
238
239 return ret;
240}
241
242/**
243 * iwl4965_init_alive_start - Called after REPLY_ALIVE notification received
244 *
245 * Called after REPLY_ALIVE notification received from "initialize" uCode.
246 *
247 * The 4965 "initialize" ALIVE reply contains calibration data for:
248 * Voltage, temperature, and MIMO tx gain correction, now stored in priv
249 * (3945 does not contain this data).
250 *
251 * Tell "initialize" uCode to go ahead and load the runtime uCode.
252*/
253static void iwl4965_init_alive_start(struct iwl_priv *priv)
254{
255 /* Bootstrap uCode has loaded initialize uCode ... verify inst image.
256 * This is a paranoid check, because we would not have gotten the
257 * "initialize" alive if code weren't properly loaded. */
258 if (iwl4965_verify_ucode(priv)) {
259 /* Runtime instruction load was bad;
260 * take it all the way back down so we can try again */
261 IWL_DEBUG_INFO(priv, "Bad \"initialize\" uCode load.\n");
262 goto restart;
263 }
264
265 /* Calculate temperature */
266 priv->temperature = iwl4965_hw_get_temperature(priv);
267
268 /* Send pointers to protocol/runtime uCode image ... init code will
269 * load and launch runtime uCode, which will send us another "Alive"
270 * notification. */
271 IWL_DEBUG_INFO(priv, "Initialization Alive received.\n");
272 if (iwl4965_set_ucode_ptrs(priv)) {
273 /* Runtime instruction load won't happen;
274 * take it all the way back down so we can try again */
275 IWL_DEBUG_INFO(priv, "Couldn't set up uCode pointers.\n");
276 goto restart;
277 }
278 return;
279
280restart:
281 queue_work(priv->workqueue, &priv->restart);
282}
283
284static bool iw4965_is_ht40_channel(__le32 rxon_flags)
285{
286 int chan_mod = le32_to_cpu(rxon_flags & RXON_FLG_CHANNEL_MODE_MSK)
287 >> RXON_FLG_CHANNEL_MODE_POS;
288 return ((chan_mod == CHANNEL_MODE_PURE_40) ||
289 (chan_mod == CHANNEL_MODE_MIXED));
290}
291
292static void iwl4965_nic_config(struct iwl_priv *priv)
293{
294 unsigned long flags;
295 u16 radio_cfg;
296
297 spin_lock_irqsave(&priv->lock, flags);
298
299 radio_cfg = iwl_legacy_eeprom_query16(priv, EEPROM_RADIO_CONFIG);
300
301 /* write radio config values to register */
302 if (EEPROM_RF_CFG_TYPE_MSK(radio_cfg) == EEPROM_4965_RF_CFG_TYPE_MAX)
303 iwl_legacy_set_bit(priv, CSR_HW_IF_CONFIG_REG,
304 EEPROM_RF_CFG_TYPE_MSK(radio_cfg) |
305 EEPROM_RF_CFG_STEP_MSK(radio_cfg) |
306 EEPROM_RF_CFG_DASH_MSK(radio_cfg));
307
308 /* set CSR_HW_CONFIG_REG for uCode use */
309 iwl_legacy_set_bit(priv, CSR_HW_IF_CONFIG_REG,
310 CSR_HW_IF_CONFIG_REG_BIT_RADIO_SI |
311 CSR_HW_IF_CONFIG_REG_BIT_MAC_SI);
312
313 priv->calib_info = (struct iwl_eeprom_calib_info *)
314 iwl_legacy_eeprom_query_addr(priv,
315 EEPROM_4965_CALIB_TXPOWER_OFFSET);
316
317 spin_unlock_irqrestore(&priv->lock, flags);
318}
319
320/* Reset differential Rx gains in NIC to prepare for chain noise calibration.
321 * Called after every association, but this runs only once!
322 * ... once chain noise is calibrated the first time, it's good forever. */
323static void iwl4965_chain_noise_reset(struct iwl_priv *priv)
324{
325 struct iwl_chain_noise_data *data = &(priv->chain_noise_data);
326
327 if ((data->state == IWL_CHAIN_NOISE_ALIVE) &&
328 iwl_legacy_is_any_associated(priv)) {
329 struct iwl_calib_diff_gain_cmd cmd;
330
331 /* clear data for chain noise calibration algorithm */
332 data->chain_noise_a = 0;
333 data->chain_noise_b = 0;
334 data->chain_noise_c = 0;
335 data->chain_signal_a = 0;
336 data->chain_signal_b = 0;
337 data->chain_signal_c = 0;
338 data->beacon_count = 0;
339
340 memset(&cmd, 0, sizeof(cmd));
341 cmd.hdr.op_code = IWL_PHY_CALIBRATE_DIFF_GAIN_CMD;
342 cmd.diff_gain_a = 0;
343 cmd.diff_gain_b = 0;
344 cmd.diff_gain_c = 0;
345 if (iwl_legacy_send_cmd_pdu(priv, REPLY_PHY_CALIBRATION_CMD,
346 sizeof(cmd), &cmd))
347 IWL_ERR(priv,
348 "Could not send REPLY_PHY_CALIBRATION_CMD\n");
349 data->state = IWL_CHAIN_NOISE_ACCUMULATE;
350 IWL_DEBUG_CALIB(priv, "Run chain_noise_calibrate\n");
351 }
352}
353
354static struct iwl_sensitivity_ranges iwl4965_sensitivity = {
355 .min_nrg_cck = 97,
356 .max_nrg_cck = 0, /* not used, set to 0 */
357
358 .auto_corr_min_ofdm = 85,
359 .auto_corr_min_ofdm_mrc = 170,
360 .auto_corr_min_ofdm_x1 = 105,
361 .auto_corr_min_ofdm_mrc_x1 = 220,
362
363 .auto_corr_max_ofdm = 120,
364 .auto_corr_max_ofdm_mrc = 210,
365 .auto_corr_max_ofdm_x1 = 140,
366 .auto_corr_max_ofdm_mrc_x1 = 270,
367
368 .auto_corr_min_cck = 125,
369 .auto_corr_max_cck = 200,
370 .auto_corr_min_cck_mrc = 200,
371 .auto_corr_max_cck_mrc = 400,
372
373 .nrg_th_cck = 100,
374 .nrg_th_ofdm = 100,
375
376 .barker_corr_th_min = 190,
377 .barker_corr_th_min_mrc = 390,
378 .nrg_th_cca = 62,
379};
380
381static void iwl4965_set_ct_threshold(struct iwl_priv *priv)
382{
383 /* want Kelvin */
384 priv->hw_params.ct_kill_threshold =
385 CELSIUS_TO_KELVIN(CT_KILL_THRESHOLD_LEGACY);
386}
387
388/**
389 * iwl4965_hw_set_hw_params
390 *
391 * Called when initializing driver
392 */
393static int iwl4965_hw_set_hw_params(struct iwl_priv *priv)
394{
395 if (priv->cfg->mod_params->num_of_queues >= IWL_MIN_NUM_QUEUES &&
396 priv->cfg->mod_params->num_of_queues <= IWL49_NUM_QUEUES)
397 priv->cfg->base_params->num_of_queues =
398 priv->cfg->mod_params->num_of_queues;
399
400 priv->hw_params.max_txq_num = priv->cfg->base_params->num_of_queues;
401 priv->hw_params.dma_chnl_num = FH49_TCSR_CHNL_NUM;
402 priv->hw_params.scd_bc_tbls_size =
403 priv->cfg->base_params->num_of_queues *
404 sizeof(struct iwl4965_scd_bc_tbl);
405 priv->hw_params.tfd_size = sizeof(struct iwl_tfd);
406 priv->hw_params.max_stations = IWL4965_STATION_COUNT;
407 priv->contexts[IWL_RXON_CTX_BSS].bcast_sta_id = IWL4965_BROADCAST_ID;
408 priv->hw_params.max_data_size = IWL49_RTC_DATA_SIZE;
409 priv->hw_params.max_inst_size = IWL49_RTC_INST_SIZE;
410 priv->hw_params.max_bsm_size = BSM_SRAM_SIZE;
411 priv->hw_params.ht40_channel = BIT(IEEE80211_BAND_5GHZ);
412
413 priv->hw_params.rx_wrt_ptr_reg = FH_RSCSR_CHNL0_WPTR;
414
415 priv->hw_params.tx_chains_num = iwl4965_num_of_ant(priv->cfg->valid_tx_ant);
416 priv->hw_params.rx_chains_num = iwl4965_num_of_ant(priv->cfg->valid_rx_ant);
417 priv->hw_params.valid_tx_ant = priv->cfg->valid_tx_ant;
418 priv->hw_params.valid_rx_ant = priv->cfg->valid_rx_ant;
419
420 iwl4965_set_ct_threshold(priv);
421
422 priv->hw_params.sens = &iwl4965_sensitivity;
423 priv->hw_params.beacon_time_tsf_bits = IWL4965_EXT_BEACON_TIME_POS;
424
425 return 0;
426}
427
428static s32 iwl4965_math_div_round(s32 num, s32 denom, s32 *res)
429{
430 s32 sign = 1;
431
432 if (num < 0) {
433 sign = -sign;
434 num = -num;
435 }
436 if (denom < 0) {
437 sign = -sign;
438 denom = -denom;
439 }
440 *res = 1;
441 *res = ((num * 2 + denom) / (denom * 2)) * sign;
442
443 return 1;
444}
445
446/**
447 * iwl4965_get_voltage_compensation - Power supply voltage comp for txpower
448 *
449 * Determines power supply voltage compensation for txpower calculations.
450 * Returns number of 1/2-dB steps to subtract from gain table index,
451 * to compensate for difference between power supply voltage during
452 * factory measurements, vs. current power supply voltage.
453 *
454 * Voltage indication is higher for lower voltage.
455 * Lower voltage requires more gain (lower gain table index).
456 */
457static s32 iwl4965_get_voltage_compensation(s32 eeprom_voltage,
458 s32 current_voltage)
459{
460 s32 comp = 0;
461
462 if ((TX_POWER_IWL_ILLEGAL_VOLTAGE == eeprom_voltage) ||
463 (TX_POWER_IWL_ILLEGAL_VOLTAGE == current_voltage))
464 return 0;
465
466 iwl4965_math_div_round(current_voltage - eeprom_voltage,
467 TX_POWER_IWL_VOLTAGE_CODES_PER_03V, &comp);
468
469 if (current_voltage > eeprom_voltage)
470 comp *= 2;
471 if ((comp < -2) || (comp > 2))
472 comp = 0;
473
474 return comp;
475}
476
477static s32 iwl4965_get_tx_atten_grp(u16 channel)
478{
479 if (channel >= CALIB_IWL_TX_ATTEN_GR5_FCH &&
480 channel <= CALIB_IWL_TX_ATTEN_GR5_LCH)
481 return CALIB_CH_GROUP_5;
482
483 if (channel >= CALIB_IWL_TX_ATTEN_GR1_FCH &&
484 channel <= CALIB_IWL_TX_ATTEN_GR1_LCH)
485 return CALIB_CH_GROUP_1;
486
487 if (channel >= CALIB_IWL_TX_ATTEN_GR2_FCH &&
488 channel <= CALIB_IWL_TX_ATTEN_GR2_LCH)
489 return CALIB_CH_GROUP_2;
490
491 if (channel >= CALIB_IWL_TX_ATTEN_GR3_FCH &&
492 channel <= CALIB_IWL_TX_ATTEN_GR3_LCH)
493 return CALIB_CH_GROUP_3;
494
495 if (channel >= CALIB_IWL_TX_ATTEN_GR4_FCH &&
496 channel <= CALIB_IWL_TX_ATTEN_GR4_LCH)
497 return CALIB_CH_GROUP_4;
498
499 return -1;
500}
501
502static u32 iwl4965_get_sub_band(const struct iwl_priv *priv, u32 channel)
503{
504 s32 b = -1;
505
506 for (b = 0; b < EEPROM_TX_POWER_BANDS; b++) {
507 if (priv->calib_info->band_info[b].ch_from == 0)
508 continue;
509
510 if ((channel >= priv->calib_info->band_info[b].ch_from)
511 && (channel <= priv->calib_info->band_info[b].ch_to))
512 break;
513 }
514
515 return b;
516}
517
518static s32 iwl4965_interpolate_value(s32 x, s32 x1, s32 y1, s32 x2, s32 y2)
519{
520 s32 val;
521
522 if (x2 == x1)
523 return y1;
524 else {
525 iwl4965_math_div_round((x2 - x) * (y1 - y2), (x2 - x1), &val);
526 return val + y2;
527 }
528}
529
530/**
531 * iwl4965_interpolate_chan - Interpolate factory measurements for one channel
532 *
533 * Interpolates factory measurements from the two sample channels within a
534 * sub-band, to apply to channel of interest. Interpolation is proportional to
535 * differences in channel frequencies, which is proportional to differences
536 * in channel number.
537 */
538static int iwl4965_interpolate_chan(struct iwl_priv *priv, u32 channel,
539 struct iwl_eeprom_calib_ch_info *chan_info)
540{
541 s32 s = -1;
542 u32 c;
543 u32 m;
544 const struct iwl_eeprom_calib_measure *m1;
545 const struct iwl_eeprom_calib_measure *m2;
546 struct iwl_eeprom_calib_measure *omeas;
547 u32 ch_i1;
548 u32 ch_i2;
549
550 s = iwl4965_get_sub_band(priv, channel);
551 if (s >= EEPROM_TX_POWER_BANDS) {
552 IWL_ERR(priv, "Tx Power can not find channel %d\n", channel);
553 return -1;
554 }
555
556 ch_i1 = priv->calib_info->band_info[s].ch1.ch_num;
557 ch_i2 = priv->calib_info->band_info[s].ch2.ch_num;
558 chan_info->ch_num = (u8) channel;
559
560 IWL_DEBUG_TXPOWER(priv, "channel %d subband %d factory cal ch %d & %d\n",
561 channel, s, ch_i1, ch_i2);
562
563 for (c = 0; c < EEPROM_TX_POWER_TX_CHAINS; c++) {
564 for (m = 0; m < EEPROM_TX_POWER_MEASUREMENTS; m++) {
565 m1 = &(priv->calib_info->band_info[s].ch1.
566 measurements[c][m]);
567 m2 = &(priv->calib_info->band_info[s].ch2.
568 measurements[c][m]);
569 omeas = &(chan_info->measurements[c][m]);
570
571 omeas->actual_pow =
572 (u8) iwl4965_interpolate_value(channel, ch_i1,
573 m1->actual_pow,
574 ch_i2,
575 m2->actual_pow);
576 omeas->gain_idx =
577 (u8) iwl4965_interpolate_value(channel, ch_i1,
578 m1->gain_idx, ch_i2,
579 m2->gain_idx);
580 omeas->temperature =
581 (u8) iwl4965_interpolate_value(channel, ch_i1,
582 m1->temperature,
583 ch_i2,
584 m2->temperature);
585 omeas->pa_det =
586 (s8) iwl4965_interpolate_value(channel, ch_i1,
587 m1->pa_det, ch_i2,
588 m2->pa_det);
589
590 IWL_DEBUG_TXPOWER(priv,
591 "chain %d meas %d AP1=%d AP2=%d AP=%d\n", c, m,
592 m1->actual_pow, m2->actual_pow, omeas->actual_pow);
593 IWL_DEBUG_TXPOWER(priv,
594 "chain %d meas %d NI1=%d NI2=%d NI=%d\n", c, m,
595 m1->gain_idx, m2->gain_idx, omeas->gain_idx);
596 IWL_DEBUG_TXPOWER(priv,
597 "chain %d meas %d PA1=%d PA2=%d PA=%d\n", c, m,
598 m1->pa_det, m2->pa_det, omeas->pa_det);
599 IWL_DEBUG_TXPOWER(priv,
600 "chain %d meas %d T1=%d T2=%d T=%d\n", c, m,
601 m1->temperature, m2->temperature,
602 omeas->temperature);
603 }
604 }
605
606 return 0;
607}
608
609/* bit-rate-dependent table to prevent Tx distortion, in half-dB units,
610 * for OFDM 6, 12, 18, 24, 36, 48, 54, 60 MBit, and CCK all rates. */
611static s32 back_off_table[] = {
612 10, 10, 10, 10, 10, 15, 17, 20, /* OFDM SISO 20 MHz */
613 10, 10, 10, 10, 10, 15, 17, 20, /* OFDM MIMO 20 MHz */
614 10, 10, 10, 10, 10, 15, 17, 20, /* OFDM SISO 40 MHz */
615 10, 10, 10, 10, 10, 15, 17, 20, /* OFDM MIMO 40 MHz */
616 10 /* CCK */
617};
618
619/* Thermal compensation values for txpower for various frequency ranges ...
620 * ratios from 3:1 to 4.5:1 of degrees (Celsius) per half-dB gain adjust */
621static struct iwl4965_txpower_comp_entry {
622 s32 degrees_per_05db_a;
623 s32 degrees_per_05db_a_denom;
624} tx_power_cmp_tble[CALIB_CH_GROUP_MAX] = {
625 {9, 2}, /* group 0 5.2, ch 34-43 */
626 {4, 1}, /* group 1 5.2, ch 44-70 */
627 {4, 1}, /* group 2 5.2, ch 71-124 */
628 {4, 1}, /* group 3 5.2, ch 125-200 */
629 {3, 1} /* group 4 2.4, ch all */
630};
631
632static s32 get_min_power_index(s32 rate_power_index, u32 band)
633{
634 if (!band) {
635 if ((rate_power_index & 7) <= 4)
636 return MIN_TX_GAIN_INDEX_52GHZ_EXT;
637 }
638 return MIN_TX_GAIN_INDEX;
639}
640
641struct gain_entry {
642 u8 dsp;
643 u8 radio;
644};
645
646static const struct gain_entry gain_table[2][108] = {
647 /* 5.2GHz power gain index table */
648 {
649 {123, 0x3F}, /* highest txpower */
650 {117, 0x3F},
651 {110, 0x3F},
652 {104, 0x3F},
653 {98, 0x3F},
654 {110, 0x3E},
655 {104, 0x3E},
656 {98, 0x3E},
657 {110, 0x3D},
658 {104, 0x3D},
659 {98, 0x3D},
660 {110, 0x3C},
661 {104, 0x3C},
662 {98, 0x3C},
663 {110, 0x3B},
664 {104, 0x3B},
665 {98, 0x3B},
666 {110, 0x3A},
667 {104, 0x3A},
668 {98, 0x3A},
669 {110, 0x39},
670 {104, 0x39},
671 {98, 0x39},
672 {110, 0x38},
673 {104, 0x38},
674 {98, 0x38},
675 {110, 0x37},
676 {104, 0x37},
677 {98, 0x37},
678 {110, 0x36},
679 {104, 0x36},
680 {98, 0x36},
681 {110, 0x35},
682 {104, 0x35},
683 {98, 0x35},
684 {110, 0x34},
685 {104, 0x34},
686 {98, 0x34},
687 {110, 0x33},
688 {104, 0x33},
689 {98, 0x33},
690 {110, 0x32},
691 {104, 0x32},
692 {98, 0x32},
693 {110, 0x31},
694 {104, 0x31},
695 {98, 0x31},
696 {110, 0x30},
697 {104, 0x30},
698 {98, 0x30},
699 {110, 0x25},
700 {104, 0x25},
701 {98, 0x25},
702 {110, 0x24},
703 {104, 0x24},
704 {98, 0x24},
705 {110, 0x23},
706 {104, 0x23},
707 {98, 0x23},
708 {110, 0x22},
709 {104, 0x18},
710 {98, 0x18},
711 {110, 0x17},
712 {104, 0x17},
713 {98, 0x17},
714 {110, 0x16},
715 {104, 0x16},
716 {98, 0x16},
717 {110, 0x15},
718 {104, 0x15},
719 {98, 0x15},
720 {110, 0x14},
721 {104, 0x14},
722 {98, 0x14},
723 {110, 0x13},
724 {104, 0x13},
725 {98, 0x13},
726 {110, 0x12},
727 {104, 0x08},
728 {98, 0x08},
729 {110, 0x07},
730 {104, 0x07},
731 {98, 0x07},
732 {110, 0x06},
733 {104, 0x06},
734 {98, 0x06},
735 {110, 0x05},
736 {104, 0x05},
737 {98, 0x05},
738 {110, 0x04},
739 {104, 0x04},
740 {98, 0x04},
741 {110, 0x03},
742 {104, 0x03},
743 {98, 0x03},
744 {110, 0x02},
745 {104, 0x02},
746 {98, 0x02},
747 {110, 0x01},
748 {104, 0x01},
749 {98, 0x01},
750 {110, 0x00},
751 {104, 0x00},
752 {98, 0x00},
753 {93, 0x00},
754 {88, 0x00},
755 {83, 0x00},
756 {78, 0x00},
757 },
758 /* 2.4GHz power gain index table */
759 {
760 {110, 0x3f}, /* highest txpower */
761 {104, 0x3f},
762 {98, 0x3f},
763 {110, 0x3e},
764 {104, 0x3e},
765 {98, 0x3e},
766 {110, 0x3d},
767 {104, 0x3d},
768 {98, 0x3d},
769 {110, 0x3c},
770 {104, 0x3c},
771 {98, 0x3c},
772 {110, 0x3b},
773 {104, 0x3b},
774 {98, 0x3b},
775 {110, 0x3a},
776 {104, 0x3a},
777 {98, 0x3a},
778 {110, 0x39},
779 {104, 0x39},
780 {98, 0x39},
781 {110, 0x38},
782 {104, 0x38},
783 {98, 0x38},
784 {110, 0x37},
785 {104, 0x37},
786 {98, 0x37},
787 {110, 0x36},
788 {104, 0x36},
789 {98, 0x36},
790 {110, 0x35},
791 {104, 0x35},
792 {98, 0x35},
793 {110, 0x34},
794 {104, 0x34},
795 {98, 0x34},
796 {110, 0x33},
797 {104, 0x33},
798 {98, 0x33},
799 {110, 0x32},
800 {104, 0x32},
801 {98, 0x32},
802 {110, 0x31},
803 {104, 0x31},
804 {98, 0x31},
805 {110, 0x30},
806 {104, 0x30},
807 {98, 0x30},
808 {110, 0x6},
809 {104, 0x6},
810 {98, 0x6},
811 {110, 0x5},
812 {104, 0x5},
813 {98, 0x5},
814 {110, 0x4},
815 {104, 0x4},
816 {98, 0x4},
817 {110, 0x3},
818 {104, 0x3},
819 {98, 0x3},
820 {110, 0x2},
821 {104, 0x2},
822 {98, 0x2},
823 {110, 0x1},
824 {104, 0x1},
825 {98, 0x1},
826 {110, 0x0},
827 {104, 0x0},
828 {98, 0x0},
829 {97, 0},
830 {96, 0},
831 {95, 0},
832 {94, 0},
833 {93, 0},
834 {92, 0},
835 {91, 0},
836 {90, 0},
837 {89, 0},
838 {88, 0},
839 {87, 0},
840 {86, 0},
841 {85, 0},
842 {84, 0},
843 {83, 0},
844 {82, 0},
845 {81, 0},
846 {80, 0},
847 {79, 0},
848 {78, 0},
849 {77, 0},
850 {76, 0},
851 {75, 0},
852 {74, 0},
853 {73, 0},
854 {72, 0},
855 {71, 0},
856 {70, 0},
857 {69, 0},
858 {68, 0},
859 {67, 0},
860 {66, 0},
861 {65, 0},
862 {64, 0},
863 {63, 0},
864 {62, 0},
865 {61, 0},
866 {60, 0},
867 {59, 0},
868 }
869};
870
871static int iwl4965_fill_txpower_tbl(struct iwl_priv *priv, u8 band, u16 channel,
872 u8 is_ht40, u8 ctrl_chan_high,
873 struct iwl4965_tx_power_db *tx_power_tbl)
874{
875 u8 saturation_power;
876 s32 target_power;
877 s32 user_target_power;
878 s32 power_limit;
879 s32 current_temp;
880 s32 reg_limit;
881 s32 current_regulatory;
882 s32 txatten_grp = CALIB_CH_GROUP_MAX;
883 int i;
884 int c;
885 const struct iwl_channel_info *ch_info = NULL;
886 struct iwl_eeprom_calib_ch_info ch_eeprom_info;
887 const struct iwl_eeprom_calib_measure *measurement;
888 s16 voltage;
889 s32 init_voltage;
890 s32 voltage_compensation;
891 s32 degrees_per_05db_num;
892 s32 degrees_per_05db_denom;
893 s32 factory_temp;
894 s32 temperature_comp[2];
895 s32 factory_gain_index[2];
896 s32 factory_actual_pwr[2];
897 s32 power_index;
898
899 /* tx_power_user_lmt is in dBm, convert to half-dBm (half-dB units
900 * are used for indexing into txpower table) */
901 user_target_power = 2 * priv->tx_power_user_lmt;
902
903 /* Get current (RXON) channel, band, width */
904 IWL_DEBUG_TXPOWER(priv, "chan %d band %d is_ht40 %d\n", channel, band,
905 is_ht40);
906
907 ch_info = iwl_legacy_get_channel_info(priv, priv->band, channel);
908
909 if (!iwl_legacy_is_channel_valid(ch_info))
910 return -EINVAL;
911
912 /* get txatten group, used to select 1) thermal txpower adjustment
913 * and 2) mimo txpower balance between Tx chains. */
914 txatten_grp = iwl4965_get_tx_atten_grp(channel);
915 if (txatten_grp < 0) {
916 IWL_ERR(priv, "Can't find txatten group for channel %d.\n",
917 channel);
918 return -EINVAL;
919 }
920
921 IWL_DEBUG_TXPOWER(priv, "channel %d belongs to txatten group %d\n",
922 channel, txatten_grp);
923
924 if (is_ht40) {
925 if (ctrl_chan_high)
926 channel -= 2;
927 else
928 channel += 2;
929 }
930
931 /* hardware txpower limits ...
932 * saturation (clipping distortion) txpowers are in half-dBm */
933 if (band)
934 saturation_power = priv->calib_info->saturation_power24;
935 else
936 saturation_power = priv->calib_info->saturation_power52;
937
938 if (saturation_power < IWL_TX_POWER_SATURATION_MIN ||
939 saturation_power > IWL_TX_POWER_SATURATION_MAX) {
940 if (band)
941 saturation_power = IWL_TX_POWER_DEFAULT_SATURATION_24;
942 else
943 saturation_power = IWL_TX_POWER_DEFAULT_SATURATION_52;
944 }
945
946 /* regulatory txpower limits ... reg_limit values are in half-dBm,
947 * max_power_avg values are in dBm, convert * 2 */
948 if (is_ht40)
949 reg_limit = ch_info->ht40_max_power_avg * 2;
950 else
951 reg_limit = ch_info->max_power_avg * 2;
952
953 if ((reg_limit < IWL_TX_POWER_REGULATORY_MIN) ||
954 (reg_limit > IWL_TX_POWER_REGULATORY_MAX)) {
955 if (band)
956 reg_limit = IWL_TX_POWER_DEFAULT_REGULATORY_24;
957 else
958 reg_limit = IWL_TX_POWER_DEFAULT_REGULATORY_52;
959 }
960
961 /* Interpolate txpower calibration values for this channel,
962 * based on factory calibration tests on spaced channels. */
963 iwl4965_interpolate_chan(priv, channel, &ch_eeprom_info);
964
965 /* calculate tx gain adjustment based on power supply voltage */
966 voltage = le16_to_cpu(priv->calib_info->voltage);
967 init_voltage = (s32)le32_to_cpu(priv->card_alive_init.voltage);
968 voltage_compensation =
969 iwl4965_get_voltage_compensation(voltage, init_voltage);
970
971 IWL_DEBUG_TXPOWER(priv, "curr volt %d eeprom volt %d volt comp %d\n",
972 init_voltage,
973 voltage, voltage_compensation);
974
975 /* get current temperature (Celsius) */
976 current_temp = max(priv->temperature, IWL_TX_POWER_TEMPERATURE_MIN);
977 current_temp = min(priv->temperature, IWL_TX_POWER_TEMPERATURE_MAX);
978 current_temp = KELVIN_TO_CELSIUS(current_temp);
979
980 /* select thermal txpower adjustment params, based on channel group
981 * (same frequency group used for mimo txatten adjustment) */
982 degrees_per_05db_num =
983 tx_power_cmp_tble[txatten_grp].degrees_per_05db_a;
984 degrees_per_05db_denom =
985 tx_power_cmp_tble[txatten_grp].degrees_per_05db_a_denom;
986
987 /* get per-chain txpower values from factory measurements */
988 for (c = 0; c < 2; c++) {
989 measurement = &ch_eeprom_info.measurements[c][1];
990
991 /* txgain adjustment (in half-dB steps) based on difference
992 * between factory and current temperature */
993 factory_temp = measurement->temperature;
994 iwl4965_math_div_round((current_temp - factory_temp) *
995 degrees_per_05db_denom,
996 degrees_per_05db_num,
997 &temperature_comp[c]);
998
999 factory_gain_index[c] = measurement->gain_idx;
1000 factory_actual_pwr[c] = measurement->actual_pow;
1001
1002 IWL_DEBUG_TXPOWER(priv, "chain = %d\n", c);
1003 IWL_DEBUG_TXPOWER(priv, "fctry tmp %d, "
1004 "curr tmp %d, comp %d steps\n",
1005 factory_temp, current_temp,
1006 temperature_comp[c]);
1007
1008 IWL_DEBUG_TXPOWER(priv, "fctry idx %d, fctry pwr %d\n",
1009 factory_gain_index[c],
1010 factory_actual_pwr[c]);
1011 }
1012
1013 /* for each of 33 bit-rates (including 1 for CCK) */
1014 for (i = 0; i < POWER_TABLE_NUM_ENTRIES; i++) {
1015 u8 is_mimo_rate;
1016 union iwl4965_tx_power_dual_stream tx_power;
1017
1018 /* for mimo, reduce each chain's txpower by half
1019 * (3dB, 6 steps), so total output power is regulatory
1020 * compliant. */
1021 if (i & 0x8) {
1022 current_regulatory = reg_limit -
1023 IWL_TX_POWER_MIMO_REGULATORY_COMPENSATION;
1024 is_mimo_rate = 1;
1025 } else {
1026 current_regulatory = reg_limit;
1027 is_mimo_rate = 0;
1028 }
1029
1030 /* find txpower limit, either hardware or regulatory */
1031 power_limit = saturation_power - back_off_table[i];
1032 if (power_limit > current_regulatory)
1033 power_limit = current_regulatory;
1034
1035 /* reduce user's txpower request if necessary
1036 * for this rate on this channel */
1037 target_power = user_target_power;
1038 if (target_power > power_limit)
1039 target_power = power_limit;
1040
1041 IWL_DEBUG_TXPOWER(priv, "rate %d sat %d reg %d usr %d tgt %d\n",
1042 i, saturation_power - back_off_table[i],
1043 current_regulatory, user_target_power,
1044 target_power);
1045
1046 /* for each of 2 Tx chains (radio transmitters) */
1047 for (c = 0; c < 2; c++) {
1048 s32 atten_value;
1049
1050 if (is_mimo_rate)
1051 atten_value =
1052 (s32)le32_to_cpu(priv->card_alive_init.
1053 tx_atten[txatten_grp][c]);
1054 else
1055 atten_value = 0;
1056
1057 /* calculate index; higher index means lower txpower */
1058 power_index = (u8) (factory_gain_index[c] -
1059 (target_power -
1060 factory_actual_pwr[c]) -
1061 temperature_comp[c] -
1062 voltage_compensation +
1063 atten_value);
1064
1065/* IWL_DEBUG_TXPOWER(priv, "calculated txpower index %d\n",
1066 power_index); */
1067
1068 if (power_index < get_min_power_index(i, band))
1069 power_index = get_min_power_index(i, band);
1070
1071 /* adjust 5 GHz index to support negative indexes */
1072 if (!band)
1073 power_index += 9;
1074
1075 /* CCK, rate 32, reduce txpower for CCK */
1076 if (i == POWER_TABLE_CCK_ENTRY)
1077 power_index +=
1078 IWL_TX_POWER_CCK_COMPENSATION_C_STEP;
1079
1080 /* stay within the table! */
1081 if (power_index > 107) {
1082 IWL_WARN(priv, "txpower index %d > 107\n",
1083 power_index);
1084 power_index = 107;
1085 }
1086 if (power_index < 0) {
1087 IWL_WARN(priv, "txpower index %d < 0\n",
1088 power_index);
1089 power_index = 0;
1090 }
1091
1092 /* fill txpower command for this rate/chain */
1093 tx_power.s.radio_tx_gain[c] =
1094 gain_table[band][power_index].radio;
1095 tx_power.s.dsp_predis_atten[c] =
1096 gain_table[band][power_index].dsp;
1097
1098 IWL_DEBUG_TXPOWER(priv, "chain %d mimo %d index %d "
1099 "gain 0x%02x dsp %d\n",
1100 c, atten_value, power_index,
1101 tx_power.s.radio_tx_gain[c],
1102 tx_power.s.dsp_predis_atten[c]);
1103 } /* for each chain */
1104
1105 tx_power_tbl->power_tbl[i].dw = cpu_to_le32(tx_power.dw);
1106
1107 } /* for each rate */
1108
1109 return 0;
1110}
1111
1112/**
1113 * iwl4965_send_tx_power - Configure the TXPOWER level user limit
1114 *
1115 * Uses the active RXON for channel, band, and characteristics (ht40, high)
1116 * The power limit is taken from priv->tx_power_user_lmt.
1117 */
1118static int iwl4965_send_tx_power(struct iwl_priv *priv)
1119{
1120 struct iwl4965_txpowertable_cmd cmd = { 0 };
1121 int ret;
1122 u8 band = 0;
1123 bool is_ht40 = false;
1124 u8 ctrl_chan_high = 0;
1125 struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
1126
1127 if (WARN_ONCE(test_bit(STATUS_SCAN_HW, &priv->status),
1128 "TX Power requested while scanning!\n"))
1129 return -EAGAIN;
1130
1131 band = priv->band == IEEE80211_BAND_2GHZ;
1132
1133 is_ht40 = iw4965_is_ht40_channel(ctx->active.flags);
1134
1135 if (is_ht40 && (ctx->active.flags & RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK))
1136 ctrl_chan_high = 1;
1137
1138 cmd.band = band;
1139 cmd.channel = ctx->active.channel;
1140
1141 ret = iwl4965_fill_txpower_tbl(priv, band,
1142 le16_to_cpu(ctx->active.channel),
1143 is_ht40, ctrl_chan_high, &cmd.tx_power);
1144 if (ret)
1145 goto out;
1146
1147 ret = iwl_legacy_send_cmd_pdu(priv,
1148 REPLY_TX_PWR_TABLE_CMD, sizeof(cmd), &cmd);
1149
1150out:
1151 return ret;
1152}
1153
1154static int iwl4965_send_rxon_assoc(struct iwl_priv *priv,
1155 struct iwl_rxon_context *ctx)
1156{
1157 int ret = 0;
1158 struct iwl4965_rxon_assoc_cmd rxon_assoc;
1159 const struct iwl_legacy_rxon_cmd *rxon1 = &ctx->staging;
1160 const struct iwl_legacy_rxon_cmd *rxon2 = &ctx->active;
1161
1162 if ((rxon1->flags == rxon2->flags) &&
1163 (rxon1->filter_flags == rxon2->filter_flags) &&
1164 (rxon1->cck_basic_rates == rxon2->cck_basic_rates) &&
1165 (rxon1->ofdm_ht_single_stream_basic_rates ==
1166 rxon2->ofdm_ht_single_stream_basic_rates) &&
1167 (rxon1->ofdm_ht_dual_stream_basic_rates ==
1168 rxon2->ofdm_ht_dual_stream_basic_rates) &&
1169 (rxon1->rx_chain == rxon2->rx_chain) &&
1170 (rxon1->ofdm_basic_rates == rxon2->ofdm_basic_rates)) {
1171 IWL_DEBUG_INFO(priv, "Using current RXON_ASSOC. Not resending.\n");
1172 return 0;
1173 }
1174
1175 rxon_assoc.flags = ctx->staging.flags;
1176 rxon_assoc.filter_flags = ctx->staging.filter_flags;
1177 rxon_assoc.ofdm_basic_rates = ctx->staging.ofdm_basic_rates;
1178 rxon_assoc.cck_basic_rates = ctx->staging.cck_basic_rates;
1179 rxon_assoc.reserved = 0;
1180 rxon_assoc.ofdm_ht_single_stream_basic_rates =
1181 ctx->staging.ofdm_ht_single_stream_basic_rates;
1182 rxon_assoc.ofdm_ht_dual_stream_basic_rates =
1183 ctx->staging.ofdm_ht_dual_stream_basic_rates;
1184 rxon_assoc.rx_chain_select_flags = ctx->staging.rx_chain;
1185
1186 ret = iwl_legacy_send_cmd_pdu_async(priv, REPLY_RXON_ASSOC,
1187 sizeof(rxon_assoc), &rxon_assoc, NULL);
1188 if (ret)
1189 return ret;
1190
1191 return ret;
1192}
1193
1194static int iwl4965_commit_rxon(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
1195{
1196 /* cast away the const for active_rxon in this function */
1197 struct iwl_legacy_rxon_cmd *active_rxon = (void *)&ctx->active;
1198 int ret;
1199 bool new_assoc =
1200 !!(ctx->staging.filter_flags & RXON_FILTER_ASSOC_MSK);
1201
1202 if (!iwl_legacy_is_alive(priv))
1203 return -EBUSY;
1204
1205 if (!ctx->is_active)
1206 return 0;
1207
1208 /* always get timestamp with Rx frame */
1209 ctx->staging.flags |= RXON_FLG_TSF2HOST_MSK;
1210
1211 ret = iwl_legacy_check_rxon_cmd(priv, ctx);
1212 if (ret) {
1213 IWL_ERR(priv, "Invalid RXON configuration. Not committing.\n");
1214 return -EINVAL;
1215 }
1216
1217 /*
1218 * receive commit_rxon request
1219 * abort any previous channel switch if still in process
1220 */
1221 if (priv->switch_rxon.switch_in_progress &&
1222 (priv->switch_rxon.channel != ctx->staging.channel)) {
1223 IWL_DEBUG_11H(priv, "abort channel switch on %d\n",
1224 le16_to_cpu(priv->switch_rxon.channel));
1225 iwl_legacy_chswitch_done(priv, false);
1226 }
1227
1228 /* If we don't need to send a full RXON, we can use
1229 * iwl_rxon_assoc_cmd which is used to reconfigure filter
1230 * and other flags for the current radio configuration. */
1231 if (!iwl_legacy_full_rxon_required(priv, ctx)) {
1232 ret = iwl_legacy_send_rxon_assoc(priv, ctx);
1233 if (ret) {
1234 IWL_ERR(priv, "Error setting RXON_ASSOC (%d)\n", ret);
1235 return ret;
1236 }
1237
1238 memcpy(active_rxon, &ctx->staging, sizeof(*active_rxon));
1239 iwl_legacy_print_rx_config_cmd(priv, ctx);
1240 return 0;
1241 }
1242
1243 /* If we are currently associated and the new config requires
1244 * an RXON_ASSOC and the new config wants the associated mask enabled,
1245 * we must clear the associated from the active configuration
1246 * before we apply the new config */
1247 if (iwl_legacy_is_associated_ctx(ctx) && new_assoc) {
1248 IWL_DEBUG_INFO(priv, "Toggling associated bit on current RXON\n");
1249 active_rxon->filter_flags &= ~RXON_FILTER_ASSOC_MSK;
1250
1251 ret = iwl_legacy_send_cmd_pdu(priv, ctx->rxon_cmd,
1252 sizeof(struct iwl_legacy_rxon_cmd),
1253 active_rxon);
1254
1255 /* If the mask clearing failed then we set
1256 * active_rxon back to what it was previously */
1257 if (ret) {
1258 active_rxon->filter_flags |= RXON_FILTER_ASSOC_MSK;
1259 IWL_ERR(priv, "Error clearing ASSOC_MSK (%d)\n", ret);
1260 return ret;
1261 }
1262 iwl_legacy_clear_ucode_stations(priv, ctx);
1263 iwl_legacy_restore_stations(priv, ctx);
1264 ret = iwl4965_restore_default_wep_keys(priv, ctx);
1265 if (ret) {
1266 IWL_ERR(priv, "Failed to restore WEP keys (%d)\n", ret);
1267 return ret;
1268 }
1269 }
1270
1271 IWL_DEBUG_INFO(priv, "Sending RXON\n"
1272 "* with%s RXON_FILTER_ASSOC_MSK\n"
1273 "* channel = %d\n"
1274 "* bssid = %pM\n",
1275 (new_assoc ? "" : "out"),
1276 le16_to_cpu(ctx->staging.channel),
1277 ctx->staging.bssid_addr);
1278
1279 iwl_legacy_set_rxon_hwcrypto(priv, ctx,
1280 !priv->cfg->mod_params->sw_crypto);
1281
1282 /* Apply the new configuration
1283 * RXON unassoc clears the station table in uCode so restoration of
1284 * stations is needed after it (the RXON command) completes
1285 */
1286 if (!new_assoc) {
1287 ret = iwl_legacy_send_cmd_pdu(priv, ctx->rxon_cmd,
1288 sizeof(struct iwl_legacy_rxon_cmd), &ctx->staging);
1289 if (ret) {
1290 IWL_ERR(priv, "Error setting new RXON (%d)\n", ret);
1291 return ret;
1292 }
1293 IWL_DEBUG_INFO(priv, "Return from !new_assoc RXON.\n");
1294 memcpy(active_rxon, &ctx->staging, sizeof(*active_rxon));
1295 iwl_legacy_clear_ucode_stations(priv, ctx);
1296 iwl_legacy_restore_stations(priv, ctx);
1297 ret = iwl4965_restore_default_wep_keys(priv, ctx);
1298 if (ret) {
1299 IWL_ERR(priv, "Failed to restore WEP keys (%d)\n", ret);
1300 return ret;
1301 }
1302 }
1303 if (new_assoc) {
1304 priv->start_calib = 0;
1305 /* Apply the new configuration
1306 * RXON assoc doesn't clear the station table in uCode,
1307 */
1308 ret = iwl_legacy_send_cmd_pdu(priv, ctx->rxon_cmd,
1309 sizeof(struct iwl_legacy_rxon_cmd), &ctx->staging);
1310 if (ret) {
1311 IWL_ERR(priv, "Error setting new RXON (%d)\n", ret);
1312 return ret;
1313 }
1314 memcpy(active_rxon, &ctx->staging, sizeof(*active_rxon));
1315 }
1316 iwl_legacy_print_rx_config_cmd(priv, ctx);
1317
1318 iwl4965_init_sensitivity(priv);
1319
1320 /* If we issue a new RXON command which required a tune then we must
1321 * send a new TXPOWER command or we won't be able to Tx any frames */
1322 ret = iwl_legacy_set_tx_power(priv, priv->tx_power_next, true);
1323 if (ret) {
1324 IWL_ERR(priv, "Error sending TX power (%d)\n", ret);
1325 return ret;
1326 }
1327
1328 return 0;
1329}
1330
1331static int iwl4965_hw_channel_switch(struct iwl_priv *priv,
1332 struct ieee80211_channel_switch *ch_switch)
1333{
1334 struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
1335 int rc;
1336 u8 band = 0;
1337 bool is_ht40 = false;
1338 u8 ctrl_chan_high = 0;
1339 struct iwl4965_channel_switch_cmd cmd;
1340 const struct iwl_channel_info *ch_info;
1341 u32 switch_time_in_usec, ucode_switch_time;
1342 u16 ch;
1343 u32 tsf_low;
1344 u8 switch_count;
1345 u16 beacon_interval = le16_to_cpu(ctx->timing.beacon_interval);
1346 struct ieee80211_vif *vif = ctx->vif;
1347 band = priv->band == IEEE80211_BAND_2GHZ;
1348
1349 is_ht40 = iw4965_is_ht40_channel(ctx->staging.flags);
1350
1351 if (is_ht40 &&
1352 (ctx->staging.flags & RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK))
1353 ctrl_chan_high = 1;
1354
1355 cmd.band = band;
1356 cmd.expect_beacon = 0;
1357 ch = ch_switch->channel->hw_value;
1358 cmd.channel = cpu_to_le16(ch);
1359 cmd.rxon_flags = ctx->staging.flags;
1360 cmd.rxon_filter_flags = ctx->staging.filter_flags;
1361 switch_count = ch_switch->count;
1362 tsf_low = ch_switch->timestamp & 0x0ffffffff;
1363 /*
1364 * calculate the ucode channel switch time
1365 * adding TSF as one of the factor for when to switch
1366 */
1367 if ((priv->ucode_beacon_time > tsf_low) && beacon_interval) {
1368 if (switch_count > ((priv->ucode_beacon_time - tsf_low) /
1369 beacon_interval)) {
1370 switch_count -= (priv->ucode_beacon_time -
1371 tsf_low) / beacon_interval;
1372 } else
1373 switch_count = 0;
1374 }
1375 if (switch_count <= 1)
1376 cmd.switch_time = cpu_to_le32(priv->ucode_beacon_time);
1377 else {
1378 switch_time_in_usec =
1379 vif->bss_conf.beacon_int * switch_count * TIME_UNIT;
1380 ucode_switch_time = iwl_legacy_usecs_to_beacons(priv,
1381 switch_time_in_usec,
1382 beacon_interval);
1383 cmd.switch_time = iwl_legacy_add_beacon_time(priv,
1384 priv->ucode_beacon_time,
1385 ucode_switch_time,
1386 beacon_interval);
1387 }
1388 IWL_DEBUG_11H(priv, "uCode time for the switch is 0x%x\n",
1389 cmd.switch_time);
1390 ch_info = iwl_legacy_get_channel_info(priv, priv->band, ch);
1391 if (ch_info)
1392 cmd.expect_beacon = iwl_legacy_is_channel_radar(ch_info);
1393 else {
1394 IWL_ERR(priv, "invalid channel switch from %u to %u\n",
1395 ctx->active.channel, ch);
1396 return -EFAULT;
1397 }
1398
1399 rc = iwl4965_fill_txpower_tbl(priv, band, ch, is_ht40,
1400 ctrl_chan_high, &cmd.tx_power);
1401 if (rc) {
1402 IWL_DEBUG_11H(priv, "error:%d fill txpower_tbl\n", rc);
1403 return rc;
1404 }
1405
1406 priv->switch_rxon.channel = cmd.channel;
1407 priv->switch_rxon.switch_in_progress = true;
1408
1409 return iwl_legacy_send_cmd_pdu(priv,
1410 REPLY_CHANNEL_SWITCH, sizeof(cmd), &cmd);
1411}
1412
1413/**
1414 * iwl4965_txq_update_byte_cnt_tbl - Set up entry in Tx byte-count array
1415 */
1416static void iwl4965_txq_update_byte_cnt_tbl(struct iwl_priv *priv,
1417 struct iwl_tx_queue *txq,
1418 u16 byte_cnt)
1419{
1420 struct iwl4965_scd_bc_tbl *scd_bc_tbl = priv->scd_bc_tbls.addr;
1421 int txq_id = txq->q.id;
1422 int write_ptr = txq->q.write_ptr;
1423 int len = byte_cnt + IWL_TX_CRC_SIZE + IWL_TX_DELIMITER_SIZE;
1424 __le16 bc_ent;
1425
1426 WARN_ON(len > 0xFFF || write_ptr >= TFD_QUEUE_SIZE_MAX);
1427
1428 bc_ent = cpu_to_le16(len & 0xFFF);
1429 /* Set up byte count within first 256 entries */
1430 scd_bc_tbl[txq_id].tfd_offset[write_ptr] = bc_ent;
1431
1432 /* If within first 64 entries, duplicate at end */
1433 if (write_ptr < TFD_QUEUE_SIZE_BC_DUP)
1434 scd_bc_tbl[txq_id].
1435 tfd_offset[TFD_QUEUE_SIZE_MAX + write_ptr] = bc_ent;
1436}
1437
1438/**
1439 * iwl4965_hw_get_temperature - return the calibrated temperature (in Kelvin)
1440 * @statistics: Provides the temperature reading from the uCode
1441 *
1442 * A return of <0 indicates bogus data in the statistics
1443 */
1444static int iwl4965_hw_get_temperature(struct iwl_priv *priv)
1445{
1446 s32 temperature;
1447 s32 vt;
1448 s32 R1, R2, R3;
1449 u32 R4;
1450
1451 if (test_bit(STATUS_TEMPERATURE, &priv->status) &&
1452 (priv->_4965.statistics.flag &
1453 STATISTICS_REPLY_FLG_HT40_MODE_MSK)) {
1454 IWL_DEBUG_TEMP(priv, "Running HT40 temperature calibration\n");
1455 R1 = (s32)le32_to_cpu(priv->card_alive_init.therm_r1[1]);
1456 R2 = (s32)le32_to_cpu(priv->card_alive_init.therm_r2[1]);
1457 R3 = (s32)le32_to_cpu(priv->card_alive_init.therm_r3[1]);
1458 R4 = le32_to_cpu(priv->card_alive_init.therm_r4[1]);
1459 } else {
1460 IWL_DEBUG_TEMP(priv, "Running temperature calibration\n");
1461 R1 = (s32)le32_to_cpu(priv->card_alive_init.therm_r1[0]);
1462 R2 = (s32)le32_to_cpu(priv->card_alive_init.therm_r2[0]);
1463 R3 = (s32)le32_to_cpu(priv->card_alive_init.therm_r3[0]);
1464 R4 = le32_to_cpu(priv->card_alive_init.therm_r4[0]);
1465 }
1466
1467 /*
1468 * Temperature is only 23 bits, so sign extend out to 32.
1469 *
1470 * NOTE If we haven't received a statistics notification yet
1471 * with an updated temperature, use R4 provided to us in the
1472 * "initialize" ALIVE response.
1473 */
1474 if (!test_bit(STATUS_TEMPERATURE, &priv->status))
1475 vt = sign_extend32(R4, 23);
1476 else
1477 vt = sign_extend32(le32_to_cpu(priv->_4965.statistics.
1478 general.common.temperature), 23);
1479
1480 IWL_DEBUG_TEMP(priv, "Calib values R[1-3]: %d %d %d R4: %d\n", R1, R2, R3, vt);
1481
1482 if (R3 == R1) {
1483 IWL_ERR(priv, "Calibration conflict R1 == R3\n");
1484 return -1;
1485 }
1486
1487 /* Calculate temperature in degrees Kelvin, adjust by 97%.
1488 * Add offset to center the adjustment around 0 degrees Centigrade. */
1489 temperature = TEMPERATURE_CALIB_A_VAL * (vt - R2);
1490 temperature /= (R3 - R1);
1491 temperature = (temperature * 97) / 100 + TEMPERATURE_CALIB_KELVIN_OFFSET;
1492
1493 IWL_DEBUG_TEMP(priv, "Calibrated temperature: %dK, %dC\n",
1494 temperature, KELVIN_TO_CELSIUS(temperature));
1495
1496 return temperature;
1497}
1498
1499/* Adjust Txpower only if temperature variance is greater than threshold. */
1500#define IWL_TEMPERATURE_THRESHOLD 3
1501
1502/**
1503 * iwl4965_is_temp_calib_needed - determines if new calibration is needed
1504 *
1505 * If the temperature changed has changed sufficiently, then a recalibration
1506 * is needed.
1507 *
1508 * Assumes caller will replace priv->last_temperature once calibration
1509 * executed.
1510 */
1511static int iwl4965_is_temp_calib_needed(struct iwl_priv *priv)
1512{
1513 int temp_diff;
1514
1515 if (!test_bit(STATUS_STATISTICS, &priv->status)) {
1516 IWL_DEBUG_TEMP(priv, "Temperature not updated -- no statistics.\n");
1517 return 0;
1518 }
1519
1520 temp_diff = priv->temperature - priv->last_temperature;
1521
1522 /* get absolute value */
1523 if (temp_diff < 0) {
1524 IWL_DEBUG_POWER(priv, "Getting cooler, delta %d\n", temp_diff);
1525 temp_diff = -temp_diff;
1526 } else if (temp_diff == 0)
1527 IWL_DEBUG_POWER(priv, "Temperature unchanged\n");
1528 else
1529 IWL_DEBUG_POWER(priv, "Getting warmer, delta %d\n", temp_diff);
1530
1531 if (temp_diff < IWL_TEMPERATURE_THRESHOLD) {
1532 IWL_DEBUG_POWER(priv, " => thermal txpower calib not needed\n");
1533 return 0;
1534 }
1535
1536 IWL_DEBUG_POWER(priv, " => thermal txpower calib needed\n");
1537
1538 return 1;
1539}
1540
1541static void iwl4965_temperature_calib(struct iwl_priv *priv)
1542{
1543 s32 temp;
1544
1545 temp = iwl4965_hw_get_temperature(priv);
1546 if (temp < 0)
1547 return;
1548
1549 if (priv->temperature != temp) {
1550 if (priv->temperature)
1551 IWL_DEBUG_TEMP(priv, "Temperature changed "
1552 "from %dC to %dC\n",
1553 KELVIN_TO_CELSIUS(priv->temperature),
1554 KELVIN_TO_CELSIUS(temp));
1555 else
1556 IWL_DEBUG_TEMP(priv, "Temperature "
1557 "initialized to %dC\n",
1558 KELVIN_TO_CELSIUS(temp));
1559 }
1560
1561 priv->temperature = temp;
1562 set_bit(STATUS_TEMPERATURE, &priv->status);
1563
1564 if (!priv->disable_tx_power_cal &&
1565 unlikely(!test_bit(STATUS_SCANNING, &priv->status)) &&
1566 iwl4965_is_temp_calib_needed(priv))
1567 queue_work(priv->workqueue, &priv->txpower_work);
1568}
1569
1570static u16 iwl4965_get_hcmd_size(u8 cmd_id, u16 len)
1571{
1572 switch (cmd_id) {
1573 case REPLY_RXON:
1574 return (u16) sizeof(struct iwl4965_rxon_cmd);
1575 default:
1576 return len;
1577 }
1578}
1579
1580static u16 iwl4965_build_addsta_hcmd(const struct iwl_legacy_addsta_cmd *cmd,
1581 u8 *data)
1582{
1583 struct iwl4965_addsta_cmd *addsta = (struct iwl4965_addsta_cmd *)data;
1584 addsta->mode = cmd->mode;
1585 memcpy(&addsta->sta, &cmd->sta, sizeof(struct sta_id_modify));
1586 memcpy(&addsta->key, &cmd->key, sizeof(struct iwl4965_keyinfo));
1587 addsta->station_flags = cmd->station_flags;
1588 addsta->station_flags_msk = cmd->station_flags_msk;
1589 addsta->tid_disable_tx = cmd->tid_disable_tx;
1590 addsta->add_immediate_ba_tid = cmd->add_immediate_ba_tid;
1591 addsta->remove_immediate_ba_tid = cmd->remove_immediate_ba_tid;
1592 addsta->add_immediate_ba_ssn = cmd->add_immediate_ba_ssn;
1593 addsta->sleep_tx_count = cmd->sleep_tx_count;
1594 addsta->reserved1 = cpu_to_le16(0);
1595 addsta->reserved2 = cpu_to_le16(0);
1596
1597 return (u16)sizeof(struct iwl4965_addsta_cmd);
1598}
1599
1600static inline u32 iwl4965_get_scd_ssn(struct iwl4965_tx_resp *tx_resp)
1601{
1602 return le32_to_cpup(&tx_resp->u.status + tx_resp->frame_count) & MAX_SN;
1603}
1604
1605/**
1606 * iwl4965_tx_status_reply_tx - Handle Tx response for frames in aggregation queue
1607 */
1608static int iwl4965_tx_status_reply_tx(struct iwl_priv *priv,
1609 struct iwl_ht_agg *agg,
1610 struct iwl4965_tx_resp *tx_resp,
1611 int txq_id, u16 start_idx)
1612{
1613 u16 status;
1614 struct agg_tx_status *frame_status = tx_resp->u.agg_status;
1615 struct ieee80211_tx_info *info = NULL;
1616 struct ieee80211_hdr *hdr = NULL;
1617 u32 rate_n_flags = le32_to_cpu(tx_resp->rate_n_flags);
1618 int i, sh, idx;
1619 u16 seq;
1620 if (agg->wait_for_ba)
1621 IWL_DEBUG_TX_REPLY(priv, "got tx response w/o block-ack\n");
1622
1623 agg->frame_count = tx_resp->frame_count;
1624 agg->start_idx = start_idx;
1625 agg->rate_n_flags = rate_n_flags;
1626 agg->bitmap = 0;
1627
1628 /* num frames attempted by Tx command */
1629 if (agg->frame_count == 1) {
1630 /* Only one frame was attempted; no block-ack will arrive */
1631 status = le16_to_cpu(frame_status[0].status);
1632 idx = start_idx;
1633
1634 IWL_DEBUG_TX_REPLY(priv, "FrameCnt = %d, StartIdx=%d idx=%d\n",
1635 agg->frame_count, agg->start_idx, idx);
1636
1637 info = IEEE80211_SKB_CB(priv->txq[txq_id].txb[idx].skb);
1638 info->status.rates[0].count = tx_resp->failure_frame + 1;
1639 info->flags &= ~IEEE80211_TX_CTL_AMPDU;
1640 info->flags |= iwl4965_tx_status_to_mac80211(status);
1641 iwl4965_hwrate_to_tx_control(priv, rate_n_flags, info);
1642
1643 IWL_DEBUG_TX_REPLY(priv, "1 Frame 0x%x failure :%d\n",
1644 status & 0xff, tx_resp->failure_frame);
1645 IWL_DEBUG_TX_REPLY(priv, "Rate Info rate_n_flags=%x\n", rate_n_flags);
1646
1647 agg->wait_for_ba = 0;
1648 } else {
1649 /* Two or more frames were attempted; expect block-ack */
1650 u64 bitmap = 0;
1651 int start = agg->start_idx;
1652
1653 /* Construct bit-map of pending frames within Tx window */
1654 for (i = 0; i < agg->frame_count; i++) {
1655 u16 sc;
1656 status = le16_to_cpu(frame_status[i].status);
1657 seq = le16_to_cpu(frame_status[i].sequence);
1658 idx = SEQ_TO_INDEX(seq);
1659 txq_id = SEQ_TO_QUEUE(seq);
1660
1661 if (status & (AGG_TX_STATE_FEW_BYTES_MSK |
1662 AGG_TX_STATE_ABORT_MSK))
1663 continue;
1664
1665 IWL_DEBUG_TX_REPLY(priv, "FrameCnt = %d, txq_id=%d idx=%d\n",
1666 agg->frame_count, txq_id, idx);
1667
1668 hdr = iwl_legacy_tx_queue_get_hdr(priv, txq_id, idx);
1669 if (!hdr) {
1670 IWL_ERR(priv,
1671 "BUG_ON idx doesn't point to valid skb"
1672 " idx=%d, txq_id=%d\n", idx, txq_id);
1673 return -1;
1674 }
1675
1676 sc = le16_to_cpu(hdr->seq_ctrl);
1677 if (idx != (SEQ_TO_SN(sc) & 0xff)) {
1678 IWL_ERR(priv,
1679 "BUG_ON idx doesn't match seq control"
1680 " idx=%d, seq_idx=%d, seq=%d\n",
1681 idx, SEQ_TO_SN(sc), hdr->seq_ctrl);
1682 return -1;
1683 }
1684
1685 IWL_DEBUG_TX_REPLY(priv, "AGG Frame i=%d idx %d seq=%d\n",
1686 i, idx, SEQ_TO_SN(sc));
1687
1688 sh = idx - start;
1689 if (sh > 64) {
1690 sh = (start - idx) + 0xff;
1691 bitmap = bitmap << sh;
1692 sh = 0;
1693 start = idx;
1694 } else if (sh < -64)
1695 sh = 0xff - (start - idx);
1696 else if (sh < 0) {
1697 sh = start - idx;
1698 start = idx;
1699 bitmap = bitmap << sh;
1700 sh = 0;
1701 }
1702 bitmap |= 1ULL << sh;
1703 IWL_DEBUG_TX_REPLY(priv, "start=%d bitmap=0x%llx\n",
1704 start, (unsigned long long)bitmap);
1705 }
1706
1707 agg->bitmap = bitmap;
1708 agg->start_idx = start;
1709 IWL_DEBUG_TX_REPLY(priv, "Frames %d start_idx=%d bitmap=0x%llx\n",
1710 agg->frame_count, agg->start_idx,
1711 (unsigned long long)agg->bitmap);
1712
1713 if (bitmap)
1714 agg->wait_for_ba = 1;
1715 }
1716 return 0;
1717}
1718
1719static u8 iwl4965_find_station(struct iwl_priv *priv, const u8 *addr)
1720{
1721 int i;
1722 int start = 0;
1723 int ret = IWL_INVALID_STATION;
1724 unsigned long flags;
1725
1726 if ((priv->iw_mode == NL80211_IFTYPE_ADHOC))
1727 start = IWL_STA_ID;
1728
1729 if (is_broadcast_ether_addr(addr))
1730 return priv->contexts[IWL_RXON_CTX_BSS].bcast_sta_id;
1731
1732 spin_lock_irqsave(&priv->sta_lock, flags);
1733 for (i = start; i < priv->hw_params.max_stations; i++)
1734 if (priv->stations[i].used &&
1735 (!compare_ether_addr(priv->stations[i].sta.sta.addr,
1736 addr))) {
1737 ret = i;
1738 goto out;
1739 }
1740
1741 IWL_DEBUG_ASSOC_LIMIT(priv, "can not find STA %pM total %d\n",
1742 addr, priv->num_stations);
1743
1744 out:
1745 /*
1746 * It may be possible that more commands interacting with stations
1747 * arrive before we completed processing the adding of
1748 * station
1749 */
1750 if (ret != IWL_INVALID_STATION &&
1751 (!(priv->stations[ret].used & IWL_STA_UCODE_ACTIVE) ||
1752 ((priv->stations[ret].used & IWL_STA_UCODE_ACTIVE) &&
1753 (priv->stations[ret].used & IWL_STA_UCODE_INPROGRESS)))) {
1754 IWL_ERR(priv, "Requested station info for sta %d before ready.\n",
1755 ret);
1756 ret = IWL_INVALID_STATION;
1757 }
1758 spin_unlock_irqrestore(&priv->sta_lock, flags);
1759 return ret;
1760}
1761
1762static int iwl4965_get_ra_sta_id(struct iwl_priv *priv, struct ieee80211_hdr *hdr)
1763{
1764 if (priv->iw_mode == NL80211_IFTYPE_STATION) {
1765 return IWL_AP_ID;
1766 } else {
1767 u8 *da = ieee80211_get_DA(hdr);
1768 return iwl4965_find_station(priv, da);
1769 }
1770}
1771
1772/**
1773 * iwl4965_rx_reply_tx - Handle standard (non-aggregation) Tx response
1774 */
1775static void iwl4965_rx_reply_tx(struct iwl_priv *priv,
1776 struct iwl_rx_mem_buffer *rxb)
1777{
1778 struct iwl_rx_packet *pkt = rxb_addr(rxb);
1779 u16 sequence = le16_to_cpu(pkt->hdr.sequence);
1780 int txq_id = SEQ_TO_QUEUE(sequence);
1781 int index = SEQ_TO_INDEX(sequence);
1782 struct iwl_tx_queue *txq = &priv->txq[txq_id];
1783 struct ieee80211_hdr *hdr;
1784 struct ieee80211_tx_info *info;
1785 struct iwl4965_tx_resp *tx_resp = (void *)&pkt->u.raw[0];
1786 u32 status = le32_to_cpu(tx_resp->u.status);
1787 int uninitialized_var(tid);
1788 int sta_id;
1789 int freed;
1790 u8 *qc = NULL;
1791 unsigned long flags;
1792
1793 if ((index >= txq->q.n_bd) || (iwl_legacy_queue_used(&txq->q, index) == 0)) {
1794 IWL_ERR(priv, "Read index for DMA queue txq_id (%d) index %d "
1795 "is out of range [0-%d] %d %d\n", txq_id,
1796 index, txq->q.n_bd, txq->q.write_ptr,
1797 txq->q.read_ptr);
1798 return;
1799 }
1800
1801 txq->time_stamp = jiffies;
1802 info = IEEE80211_SKB_CB(txq->txb[txq->q.read_ptr].skb);
1803 memset(&info->status, 0, sizeof(info->status));
1804
1805 hdr = iwl_legacy_tx_queue_get_hdr(priv, txq_id, index);
1806 if (ieee80211_is_data_qos(hdr->frame_control)) {
1807 qc = ieee80211_get_qos_ctl(hdr);
1808 tid = qc[0] & 0xf;
1809 }
1810
1811 sta_id = iwl4965_get_ra_sta_id(priv, hdr);
1812 if (txq->sched_retry && unlikely(sta_id == IWL_INVALID_STATION)) {
1813 IWL_ERR(priv, "Station not known\n");
1814 return;
1815 }
1816
1817 spin_lock_irqsave(&priv->sta_lock, flags);
1818 if (txq->sched_retry) {
1819 const u32 scd_ssn = iwl4965_get_scd_ssn(tx_resp);
1820 struct iwl_ht_agg *agg = NULL;
1821 WARN_ON(!qc);
1822
1823 agg = &priv->stations[sta_id].tid[tid].agg;
1824
1825 iwl4965_tx_status_reply_tx(priv, agg, tx_resp, txq_id, index);
1826
1827 /* check if BAR is needed */
1828 if ((tx_resp->frame_count == 1) && !iwl4965_is_tx_success(status))
1829 info->flags |= IEEE80211_TX_STAT_AMPDU_NO_BACK;
1830
1831 if (txq->q.read_ptr != (scd_ssn & 0xff)) {
1832 index = iwl_legacy_queue_dec_wrap(scd_ssn & 0xff,
1833 txq->q.n_bd);
1834 IWL_DEBUG_TX_REPLY(priv, "Retry scheduler reclaim scd_ssn "
1835 "%d index %d\n", scd_ssn , index);
1836 freed = iwl4965_tx_queue_reclaim(priv, txq_id, index);
1837 if (qc)
1838 iwl4965_free_tfds_in_queue(priv, sta_id,
1839 tid, freed);
1840
1841 if (priv->mac80211_registered &&
1842 (iwl_legacy_queue_space(&txq->q) > txq->q.low_mark)
1843 && (agg->state != IWL_EMPTYING_HW_QUEUE_DELBA))
1844 iwl_legacy_wake_queue(priv, txq);
1845 }
1846 } else {
1847 info->status.rates[0].count = tx_resp->failure_frame + 1;
1848 info->flags |= iwl4965_tx_status_to_mac80211(status);
1849 iwl4965_hwrate_to_tx_control(priv,
1850 le32_to_cpu(tx_resp->rate_n_flags),
1851 info);
1852
1853 IWL_DEBUG_TX_REPLY(priv, "TXQ %d status %s (0x%08x) "
1854 "rate_n_flags 0x%x retries %d\n",
1855 txq_id,
1856 iwl4965_get_tx_fail_reason(status), status,
1857 le32_to_cpu(tx_resp->rate_n_flags),
1858 tx_resp->failure_frame);
1859
1860 freed = iwl4965_tx_queue_reclaim(priv, txq_id, index);
1861 if (qc && likely(sta_id != IWL_INVALID_STATION))
1862 iwl4965_free_tfds_in_queue(priv, sta_id, tid, freed);
1863 else if (sta_id == IWL_INVALID_STATION)
1864 IWL_DEBUG_TX_REPLY(priv, "Station not known\n");
1865
1866 if (priv->mac80211_registered &&
1867 (iwl_legacy_queue_space(&txq->q) > txq->q.low_mark))
1868 iwl_legacy_wake_queue(priv, txq);
1869 }
1870 if (qc && likely(sta_id != IWL_INVALID_STATION))
1871 iwl4965_txq_check_empty(priv, sta_id, tid, txq_id);
1872
1873 iwl4965_check_abort_status(priv, tx_resp->frame_count, status);
1874
1875 spin_unlock_irqrestore(&priv->sta_lock, flags);
1876}
1877
1878static void iwl4965_rx_beacon_notif(struct iwl_priv *priv,
1879 struct iwl_rx_mem_buffer *rxb)
1880{
1881 struct iwl_rx_packet *pkt = rxb_addr(rxb);
1882 struct iwl4965_beacon_notif *beacon = (void *)pkt->u.raw;
1883 u8 rate __maybe_unused =
1884 iwl4965_hw_get_rate(beacon->beacon_notify_hdr.rate_n_flags);
1885
1886 IWL_DEBUG_RX(priv, "beacon status %#x, retries:%d ibssmgr:%d "
1887 "tsf:0x%.8x%.8x rate:%d\n",
1888 le32_to_cpu(beacon->beacon_notify_hdr.u.status) & TX_STATUS_MSK,
1889 beacon->beacon_notify_hdr.failure_frame,
1890 le32_to_cpu(beacon->ibss_mgr_status),
1891 le32_to_cpu(beacon->high_tsf),
1892 le32_to_cpu(beacon->low_tsf), rate);
1893
1894 priv->ibss_manager = le32_to_cpu(beacon->ibss_mgr_status);
1895}
1896
1897/* Set up 4965-specific Rx frame reply handlers */
1898static void iwl4965_rx_handler_setup(struct iwl_priv *priv)
1899{
1900 /* Legacy Rx frames */
1901 priv->rx_handlers[REPLY_RX] = iwl4965_rx_reply_rx;
1902 /* Tx response */
1903 priv->rx_handlers[REPLY_TX] = iwl4965_rx_reply_tx;
1904 priv->rx_handlers[BEACON_NOTIFICATION] = iwl4965_rx_beacon_notif;
1905}
1906
1907static struct iwl_hcmd_ops iwl4965_hcmd = {
1908 .rxon_assoc = iwl4965_send_rxon_assoc,
1909 .commit_rxon = iwl4965_commit_rxon,
1910 .set_rxon_chain = iwl4965_set_rxon_chain,
1911};
1912
1913static void iwl4965_post_scan(struct iwl_priv *priv)
1914{
1915 struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
1916
1917 /*
1918 * Since setting the RXON may have been deferred while
1919 * performing the scan, fire one off if needed
1920 */
1921 if (memcmp(&ctx->staging, &ctx->active, sizeof(ctx->staging)))
1922 iwl_legacy_commit_rxon(priv, ctx);
1923}
1924
1925static void iwl4965_post_associate(struct iwl_priv *priv)
1926{
1927 struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
1928 struct ieee80211_vif *vif = ctx->vif;
1929 struct ieee80211_conf *conf = NULL;
1930 int ret = 0;
1931
1932 if (!vif || !priv->is_open)
1933 return;
1934
1935 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
1936 return;
1937
1938 iwl_legacy_scan_cancel_timeout(priv, 200);
1939
1940 conf = iwl_legacy_ieee80211_get_hw_conf(priv->hw);
1941
1942 ctx->staging.filter_flags &= ~RXON_FILTER_ASSOC_MSK;
1943 iwl_legacy_commit_rxon(priv, ctx);
1944
1945 ret = iwl_legacy_send_rxon_timing(priv, ctx);
1946 if (ret)
1947 IWL_WARN(priv, "RXON timing - "
1948 "Attempting to continue.\n");
1949
1950 ctx->staging.filter_flags |= RXON_FILTER_ASSOC_MSK;
1951
1952 iwl_legacy_set_rxon_ht(priv, &priv->current_ht_config);
1953
1954 if (priv->cfg->ops->hcmd->set_rxon_chain)
1955 priv->cfg->ops->hcmd->set_rxon_chain(priv, ctx);
1956
1957 ctx->staging.assoc_id = cpu_to_le16(vif->bss_conf.aid);
1958
1959 IWL_DEBUG_ASSOC(priv, "assoc id %d beacon interval %d\n",
1960 vif->bss_conf.aid, vif->bss_conf.beacon_int);
1961
1962 if (vif->bss_conf.use_short_preamble)
1963 ctx->staging.flags |= RXON_FLG_SHORT_PREAMBLE_MSK;
1964 else
1965 ctx->staging.flags &= ~RXON_FLG_SHORT_PREAMBLE_MSK;
1966
1967 if (ctx->staging.flags & RXON_FLG_BAND_24G_MSK) {
1968 if (vif->bss_conf.use_short_slot)
1969 ctx->staging.flags |= RXON_FLG_SHORT_SLOT_MSK;
1970 else
1971 ctx->staging.flags &= ~RXON_FLG_SHORT_SLOT_MSK;
1972 }
1973
1974 iwl_legacy_commit_rxon(priv, ctx);
1975
1976 IWL_DEBUG_ASSOC(priv, "Associated as %d to: %pM\n",
1977 vif->bss_conf.aid, ctx->active.bssid_addr);
1978
1979 switch (vif->type) {
1980 case NL80211_IFTYPE_STATION:
1981 break;
1982 case NL80211_IFTYPE_ADHOC:
1983 iwl4965_send_beacon_cmd(priv);
1984 break;
1985 default:
1986 IWL_ERR(priv, "%s Should not be called in %d mode\n",
1987 __func__, vif->type);
1988 break;
1989 }
1990
1991 /* the chain noise calibration will enabled PM upon completion
1992 * If chain noise has already been run, then we need to enable
1993 * power management here */
1994 if (priv->chain_noise_data.state == IWL_CHAIN_NOISE_DONE)
1995 iwl_legacy_power_update_mode(priv, false);
1996
1997 /* Enable Rx differential gain and sensitivity calibrations */
1998 iwl4965_chain_noise_reset(priv);
1999 priv->start_calib = 1;
2000}
2001
2002static void iwl4965_config_ap(struct iwl_priv *priv)
2003{
2004 struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
2005 struct ieee80211_vif *vif = ctx->vif;
2006 int ret = 0;
2007
2008 lockdep_assert_held(&priv->mutex);
2009
2010 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
2011 return;
2012
2013 /* The following should be done only at AP bring up */
2014 if (!iwl_legacy_is_associated_ctx(ctx)) {
2015
2016 /* RXON - unassoc (to set timing command) */
2017 ctx->staging.filter_flags &= ~RXON_FILTER_ASSOC_MSK;
2018 iwl_legacy_commit_rxon(priv, ctx);
2019
2020 /* RXON Timing */
2021 ret = iwl_legacy_send_rxon_timing(priv, ctx);
2022 if (ret)
2023 IWL_WARN(priv, "RXON timing failed - "
2024 "Attempting to continue.\n");
2025
2026 /* AP has all antennas */
2027 priv->chain_noise_data.active_chains =
2028 priv->hw_params.valid_rx_ant;
2029 iwl_legacy_set_rxon_ht(priv, &priv->current_ht_config);
2030 if (priv->cfg->ops->hcmd->set_rxon_chain)
2031 priv->cfg->ops->hcmd->set_rxon_chain(priv, ctx);
2032
2033 ctx->staging.assoc_id = 0;
2034
2035 if (vif->bss_conf.use_short_preamble)
2036 ctx->staging.flags |=
2037 RXON_FLG_SHORT_PREAMBLE_MSK;
2038 else
2039 ctx->staging.flags &=
2040 ~RXON_FLG_SHORT_PREAMBLE_MSK;
2041
2042 if (ctx->staging.flags & RXON_FLG_BAND_24G_MSK) {
2043 if (vif->bss_conf.use_short_slot)
2044 ctx->staging.flags |=
2045 RXON_FLG_SHORT_SLOT_MSK;
2046 else
2047 ctx->staging.flags &=
2048 ~RXON_FLG_SHORT_SLOT_MSK;
2049 }
2050 /* need to send beacon cmd before committing assoc RXON! */
2051 iwl4965_send_beacon_cmd(priv);
2052 /* restore RXON assoc */
2053 ctx->staging.filter_flags |= RXON_FILTER_ASSOC_MSK;
2054 iwl_legacy_commit_rxon(priv, ctx);
2055 }
2056 iwl4965_send_beacon_cmd(priv);
2057}
2058
2059static struct iwl_hcmd_utils_ops iwl4965_hcmd_utils = {
2060 .get_hcmd_size = iwl4965_get_hcmd_size,
2061 .build_addsta_hcmd = iwl4965_build_addsta_hcmd,
2062 .request_scan = iwl4965_request_scan,
2063 .post_scan = iwl4965_post_scan,
2064};
2065
2066static struct iwl_lib_ops iwl4965_lib = {
2067 .set_hw_params = iwl4965_hw_set_hw_params,
2068 .txq_update_byte_cnt_tbl = iwl4965_txq_update_byte_cnt_tbl,
2069 .txq_attach_buf_to_tfd = iwl4965_hw_txq_attach_buf_to_tfd,
2070 .txq_free_tfd = iwl4965_hw_txq_free_tfd,
2071 .txq_init = iwl4965_hw_tx_queue_init,
2072 .rx_handler_setup = iwl4965_rx_handler_setup,
2073 .is_valid_rtc_data_addr = iwl4965_hw_valid_rtc_data_addr,
2074 .init_alive_start = iwl4965_init_alive_start,
2075 .load_ucode = iwl4965_load_bsm,
2076 .dump_nic_event_log = iwl4965_dump_nic_event_log,
2077 .dump_nic_error_log = iwl4965_dump_nic_error_log,
2078 .dump_fh = iwl4965_dump_fh,
2079 .set_channel_switch = iwl4965_hw_channel_switch,
2080 .apm_ops = {
2081 .init = iwl_legacy_apm_init,
2082 .config = iwl4965_nic_config,
2083 },
2084 .eeprom_ops = {
2085 .regulatory_bands = {
2086 EEPROM_REGULATORY_BAND_1_CHANNELS,
2087 EEPROM_REGULATORY_BAND_2_CHANNELS,
2088 EEPROM_REGULATORY_BAND_3_CHANNELS,
2089 EEPROM_REGULATORY_BAND_4_CHANNELS,
2090 EEPROM_REGULATORY_BAND_5_CHANNELS,
2091 EEPROM_4965_REGULATORY_BAND_24_HT40_CHANNELS,
2092 EEPROM_4965_REGULATORY_BAND_52_HT40_CHANNELS
2093 },
2094 .acquire_semaphore = iwl4965_eeprom_acquire_semaphore,
2095 .release_semaphore = iwl4965_eeprom_release_semaphore,
2096 },
2097 .send_tx_power = iwl4965_send_tx_power,
2098 .update_chain_flags = iwl4965_update_chain_flags,
2099 .temp_ops = {
2100 .temperature = iwl4965_temperature_calib,
2101 },
2102 .debugfs_ops = {
2103 .rx_stats_read = iwl4965_ucode_rx_stats_read,
2104 .tx_stats_read = iwl4965_ucode_tx_stats_read,
2105 .general_stats_read = iwl4965_ucode_general_stats_read,
2106 },
2107 .check_plcp_health = iwl4965_good_plcp_health,
2108};
2109
2110static const struct iwl_legacy_ops iwl4965_legacy_ops = {
2111 .post_associate = iwl4965_post_associate,
2112 .config_ap = iwl4965_config_ap,
2113 .manage_ibss_station = iwl4965_manage_ibss_station,
2114 .update_bcast_stations = iwl4965_update_bcast_stations,
2115};
2116
2117struct ieee80211_ops iwl4965_hw_ops = {
2118 .tx = iwl4965_mac_tx,
2119 .start = iwl4965_mac_start,
2120 .stop = iwl4965_mac_stop,
2121 .add_interface = iwl_legacy_mac_add_interface,
2122 .remove_interface = iwl_legacy_mac_remove_interface,
2123 .change_interface = iwl_legacy_mac_change_interface,
2124 .config = iwl_legacy_mac_config,
2125 .configure_filter = iwl4965_configure_filter,
2126 .set_key = iwl4965_mac_set_key,
2127 .update_tkip_key = iwl4965_mac_update_tkip_key,
2128 .conf_tx = iwl_legacy_mac_conf_tx,
2129 .reset_tsf = iwl_legacy_mac_reset_tsf,
2130 .bss_info_changed = iwl_legacy_mac_bss_info_changed,
2131 .ampdu_action = iwl4965_mac_ampdu_action,
2132 .hw_scan = iwl_legacy_mac_hw_scan,
2133 .sta_add = iwl4965_mac_sta_add,
2134 .sta_remove = iwl_legacy_mac_sta_remove,
2135 .channel_switch = iwl4965_mac_channel_switch,
2136 .tx_last_beacon = iwl_legacy_mac_tx_last_beacon,
2137};
2138
2139static const struct iwl_ops iwl4965_ops = {
2140 .lib = &iwl4965_lib,
2141 .hcmd = &iwl4965_hcmd,
2142 .utils = &iwl4965_hcmd_utils,
2143 .led = &iwl4965_led_ops,
2144 .legacy = &iwl4965_legacy_ops,
2145 .ieee80211_ops = &iwl4965_hw_ops,
2146};
2147
2148static struct iwl_base_params iwl4965_base_params = {
2149 .eeprom_size = IWL4965_EEPROM_IMG_SIZE,
2150 .num_of_queues = IWL49_NUM_QUEUES,
2151 .num_of_ampdu_queues = IWL49_NUM_AMPDU_QUEUES,
2152 .pll_cfg_val = 0,
2153 .set_l0s = true,
2154 .use_bsm = true,
2155 .led_compensation = 61,
2156 .chain_noise_num_beacons = IWL4965_CAL_NUM_BEACONS,
2157 .plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF,
2158 .wd_timeout = IWL_DEF_WD_TIMEOUT,
2159 .temperature_kelvin = true,
2160 .max_event_log_size = 512,
2161 .ucode_tracing = true,
2162 .sensitivity_calib_by_driver = true,
2163 .chain_noise_calib_by_driver = true,
2164};
2165
2166struct iwl_cfg iwl4965_cfg = {
2167 .name = "Intel(R) Wireless WiFi Link 4965AGN",
2168 .fw_name_pre = IWL4965_FW_PRE,
2169 .ucode_api_max = IWL4965_UCODE_API_MAX,
2170 .ucode_api_min = IWL4965_UCODE_API_MIN,
2171 .sku = IWL_SKU_A|IWL_SKU_G|IWL_SKU_N,
2172 .valid_tx_ant = ANT_AB,
2173 .valid_rx_ant = ANT_ABC,
2174 .eeprom_ver = EEPROM_4965_EEPROM_VERSION,
2175 .eeprom_calib_ver = EEPROM_4965_TX_POWER_VERSION,
2176 .ops = &iwl4965_ops,
2177 .mod_params = &iwl4965_mod_params,
2178 .base_params = &iwl4965_base_params,
2179 .led_mode = IWL_LED_BLINK,
2180 /*
2181 * Force use of chains B and C for scan RX on 5 GHz band
2182 * because the device has off-channel reception on chain A.
2183 */
2184 .scan_rx_antennas[IEEE80211_BAND_5GHZ] = ANT_BC,
2185};
2186
2187/* Module firmware */
2188MODULE_FIRMWARE(IWL4965_MODULE_FIRMWARE(IWL4965_UCODE_API_MAX));
diff --git a/drivers/net/wireless/iwlegacy/iwl-4965.h b/drivers/net/wireless/iwlegacy/iwl-4965.h
new file mode 100644
index 000000000000..01f8163daf16
--- /dev/null
+++ b/drivers/net/wireless/iwlegacy/iwl-4965.h
@@ -0,0 +1,282 @@
1/******************************************************************************
2 *
3 * This file is provided under a dual BSD/GPLv2 license. When using or
4 * redistributing this file, you may do so under either license.
5 *
6 * GPL LICENSE SUMMARY
7 *
8 * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of version 2 of the GNU General Public License as
12 * published by the Free Software Foundation.
13 *
14 * This program is distributed in the hope that it will be useful, but
15 * WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
22 * USA
23 *
24 * The full GNU General Public License is included in this distribution
25 * in the file called LICENSE.GPL.
26 *
27 * Contact Information:
28 * Intel Linux Wireless <ilw@linux.intel.com>
29 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
30 *
31 * BSD LICENSE
32 *
33 * Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved.
34 * All rights reserved.
35 *
36 * Redistribution and use in source and binary forms, with or without
37 * modification, are permitted provided that the following conditions
38 * are met:
39 *
40 * * Redistributions of source code must retain the above copyright
41 * notice, this list of conditions and the following disclaimer.
42 * * Redistributions in binary form must reproduce the above copyright
43 * notice, this list of conditions and the following disclaimer in
44 * the documentation and/or other materials provided with the
45 * distribution.
46 * * Neither the name Intel Corporation nor the names of its
47 * contributors may be used to endorse or promote products derived
48 * from this software without specific prior written permission.
49 *
50 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
51 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
52 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
53 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
54 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
55 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
56 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
57 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
58 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
59 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
60 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
61 *****************************************************************************/
62
63#ifndef __iwl_4965_h__
64#define __iwl_4965_h__
65
66#include "iwl-dev.h"
67
68/* configuration for the _4965 devices */
69extern struct iwl_cfg iwl4965_cfg;
70
71extern struct iwl_mod_params iwl4965_mod_params;
72
73extern struct ieee80211_ops iwl4965_hw_ops;
74
75/* tx queue */
76void iwl4965_free_tfds_in_queue(struct iwl_priv *priv,
77 int sta_id, int tid, int freed);
78
79/* RXON */
80void iwl4965_set_rxon_chain(struct iwl_priv *priv,
81 struct iwl_rxon_context *ctx);
82
83/* uCode */
84int iwl4965_verify_ucode(struct iwl_priv *priv);
85
86/* lib */
87void iwl4965_check_abort_status(struct iwl_priv *priv,
88 u8 frame_count, u32 status);
89
90void iwl4965_rx_queue_reset(struct iwl_priv *priv, struct iwl_rx_queue *rxq);
91int iwl4965_rx_init(struct iwl_priv *priv, struct iwl_rx_queue *rxq);
92int iwl4965_hw_nic_init(struct iwl_priv *priv);
93int iwl4965_dump_fh(struct iwl_priv *priv, char **buf, bool display);
94
95/* rx */
96void iwl4965_rx_queue_restock(struct iwl_priv *priv);
97void iwl4965_rx_replenish(struct iwl_priv *priv);
98void iwl4965_rx_replenish_now(struct iwl_priv *priv);
99void iwl4965_rx_queue_free(struct iwl_priv *priv, struct iwl_rx_queue *rxq);
100int iwl4965_rxq_stop(struct iwl_priv *priv);
101int iwl4965_hwrate_to_mac80211_idx(u32 rate_n_flags, enum ieee80211_band band);
102void iwl4965_rx_reply_rx(struct iwl_priv *priv,
103 struct iwl_rx_mem_buffer *rxb);
104void iwl4965_rx_reply_rx_phy(struct iwl_priv *priv,
105 struct iwl_rx_mem_buffer *rxb);
106void iwl4965_rx_handle(struct iwl_priv *priv);
107
108/* tx */
109void iwl4965_hw_txq_free_tfd(struct iwl_priv *priv, struct iwl_tx_queue *txq);
110int iwl4965_hw_txq_attach_buf_to_tfd(struct iwl_priv *priv,
111 struct iwl_tx_queue *txq,
112 dma_addr_t addr, u16 len, u8 reset, u8 pad);
113int iwl4965_hw_tx_queue_init(struct iwl_priv *priv,
114 struct iwl_tx_queue *txq);
115void iwl4965_hwrate_to_tx_control(struct iwl_priv *priv, u32 rate_n_flags,
116 struct ieee80211_tx_info *info);
117int iwl4965_tx_skb(struct iwl_priv *priv, struct sk_buff *skb);
118int iwl4965_tx_agg_start(struct iwl_priv *priv, struct ieee80211_vif *vif,
119 struct ieee80211_sta *sta, u16 tid, u16 *ssn);
120int iwl4965_tx_agg_stop(struct iwl_priv *priv, struct ieee80211_vif *vif,
121 struct ieee80211_sta *sta, u16 tid);
122int iwl4965_txq_check_empty(struct iwl_priv *priv,
123 int sta_id, u8 tid, int txq_id);
124void iwl4965_rx_reply_compressed_ba(struct iwl_priv *priv,
125 struct iwl_rx_mem_buffer *rxb);
126int iwl4965_tx_queue_reclaim(struct iwl_priv *priv, int txq_id, int index);
127void iwl4965_hw_txq_ctx_free(struct iwl_priv *priv);
128int iwl4965_txq_ctx_alloc(struct iwl_priv *priv);
129void iwl4965_txq_ctx_reset(struct iwl_priv *priv);
130void iwl4965_txq_ctx_stop(struct iwl_priv *priv);
131void iwl4965_txq_set_sched(struct iwl_priv *priv, u32 mask);
132
133/*
134 * Acquire priv->lock before calling this function !
135 */
136void iwl4965_set_wr_ptrs(struct iwl_priv *priv, int txq_id, u32 index);
137/**
138 * iwl4965_tx_queue_set_status - (optionally) start Tx/Cmd queue
139 * @tx_fifo_id: Tx DMA/FIFO channel (range 0-7) that the queue will feed
140 * @scd_retry: (1) Indicates queue will be used in aggregation mode
141 *
142 * NOTE: Acquire priv->lock before calling this function !
143 */
144void iwl4965_tx_queue_set_status(struct iwl_priv *priv,
145 struct iwl_tx_queue *txq,
146 int tx_fifo_id, int scd_retry);
147
148static inline u32 iwl4965_tx_status_to_mac80211(u32 status)
149{
150 status &= TX_STATUS_MSK;
151
152 switch (status) {
153 case TX_STATUS_SUCCESS:
154 case TX_STATUS_DIRECT_DONE:
155 return IEEE80211_TX_STAT_ACK;
156 case TX_STATUS_FAIL_DEST_PS:
157 return IEEE80211_TX_STAT_TX_FILTERED;
158 default:
159 return 0;
160 }
161}
162
163static inline bool iwl4965_is_tx_success(u32 status)
164{
165 status &= TX_STATUS_MSK;
166 return (status == TX_STATUS_SUCCESS) ||
167 (status == TX_STATUS_DIRECT_DONE);
168}
169
170u8 iwl4965_toggle_tx_ant(struct iwl_priv *priv, u8 ant_idx, u8 valid);
171
172/* rx */
173void iwl4965_rx_missed_beacon_notif(struct iwl_priv *priv,
174 struct iwl_rx_mem_buffer *rxb);
175bool iwl4965_good_plcp_health(struct iwl_priv *priv,
176 struct iwl_rx_packet *pkt);
177void iwl4965_rx_statistics(struct iwl_priv *priv,
178 struct iwl_rx_mem_buffer *rxb);
179void iwl4965_reply_statistics(struct iwl_priv *priv,
180 struct iwl_rx_mem_buffer *rxb);
181
182/* scan */
183int iwl4965_request_scan(struct iwl_priv *priv, struct ieee80211_vif *vif);
184
185/* station mgmt */
186int iwl4965_manage_ibss_station(struct iwl_priv *priv,
187 struct ieee80211_vif *vif, bool add);
188
189/* hcmd */
190int iwl4965_send_beacon_cmd(struct iwl_priv *priv);
191
192#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
193const char *iwl4965_get_tx_fail_reason(u32 status);
194#else
195static inline const char *
196iwl4965_get_tx_fail_reason(u32 status) { return ""; }
197#endif
198
199/* station management */
200int iwl4965_alloc_bcast_station(struct iwl_priv *priv,
201 struct iwl_rxon_context *ctx);
202int iwl4965_add_bssid_station(struct iwl_priv *priv,
203 struct iwl_rxon_context *ctx,
204 const u8 *addr, u8 *sta_id_r);
205int iwl4965_remove_default_wep_key(struct iwl_priv *priv,
206 struct iwl_rxon_context *ctx,
207 struct ieee80211_key_conf *key);
208int iwl4965_set_default_wep_key(struct iwl_priv *priv,
209 struct iwl_rxon_context *ctx,
210 struct ieee80211_key_conf *key);
211int iwl4965_restore_default_wep_keys(struct iwl_priv *priv,
212 struct iwl_rxon_context *ctx);
213int iwl4965_set_dynamic_key(struct iwl_priv *priv,
214 struct iwl_rxon_context *ctx,
215 struct ieee80211_key_conf *key, u8 sta_id);
216int iwl4965_remove_dynamic_key(struct iwl_priv *priv,
217 struct iwl_rxon_context *ctx,
218 struct ieee80211_key_conf *key, u8 sta_id);
219void iwl4965_update_tkip_key(struct iwl_priv *priv,
220 struct iwl_rxon_context *ctx,
221 struct ieee80211_key_conf *keyconf,
222 struct ieee80211_sta *sta, u32 iv32, u16 *phase1key);
223int iwl4965_sta_tx_modify_enable_tid(struct iwl_priv *priv,
224 int sta_id, int tid);
225int iwl4965_sta_rx_agg_start(struct iwl_priv *priv, struct ieee80211_sta *sta,
226 int tid, u16 ssn);
227int iwl4965_sta_rx_agg_stop(struct iwl_priv *priv, struct ieee80211_sta *sta,
228 int tid);
229void iwl4965_sta_modify_sleep_tx_count(struct iwl_priv *priv,
230 int sta_id, int cnt);
231int iwl4965_update_bcast_stations(struct iwl_priv *priv);
232
233/* rate */
234static inline u32 iwl4965_ant_idx_to_flags(u8 ant_idx)
235{
236 return BIT(ant_idx) << RATE_MCS_ANT_POS;
237}
238
239static inline u8 iwl4965_hw_get_rate(__le32 rate_n_flags)
240{
241 return le32_to_cpu(rate_n_flags) & 0xFF;
242}
243
244static inline __le32 iwl4965_hw_set_rate_n_flags(u8 rate, u32 flags)
245{
246 return cpu_to_le32(flags|(u32)rate);
247}
248
249/* eeprom */
250void iwl4965_eeprom_get_mac(const struct iwl_priv *priv, u8 *mac);
251int iwl4965_eeprom_acquire_semaphore(struct iwl_priv *priv);
252void iwl4965_eeprom_release_semaphore(struct iwl_priv *priv);
253int iwl4965_eeprom_check_version(struct iwl_priv *priv);
254
255/* mac80211 handlers (for 4965) */
256void iwl4965_mac_tx(struct ieee80211_hw *hw, struct sk_buff *skb);
257int iwl4965_mac_start(struct ieee80211_hw *hw);
258void iwl4965_mac_stop(struct ieee80211_hw *hw);
259void iwl4965_configure_filter(struct ieee80211_hw *hw,
260 unsigned int changed_flags,
261 unsigned int *total_flags,
262 u64 multicast);
263int iwl4965_mac_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
264 struct ieee80211_vif *vif, struct ieee80211_sta *sta,
265 struct ieee80211_key_conf *key);
266void iwl4965_mac_update_tkip_key(struct ieee80211_hw *hw,
267 struct ieee80211_vif *vif,
268 struct ieee80211_key_conf *keyconf,
269 struct ieee80211_sta *sta,
270 u32 iv32, u16 *phase1key);
271int iwl4965_mac_ampdu_action(struct ieee80211_hw *hw,
272 struct ieee80211_vif *vif,
273 enum ieee80211_ampdu_mlme_action action,
274 struct ieee80211_sta *sta, u16 tid, u16 *ssn,
275 u8 buf_size);
276int iwl4965_mac_sta_add(struct ieee80211_hw *hw,
277 struct ieee80211_vif *vif,
278 struct ieee80211_sta *sta);
279void iwl4965_mac_channel_switch(struct ieee80211_hw *hw,
280 struct ieee80211_channel_switch *ch_switch);
281
282#endif /* __iwl_4965_h__ */
diff --git a/drivers/net/wireless/iwlegacy/iwl-commands.h b/drivers/net/wireless/iwlegacy/iwl-commands.h
new file mode 100644
index 000000000000..17a1d504348e
--- /dev/null
+++ b/drivers/net/wireless/iwlegacy/iwl-commands.h
@@ -0,0 +1,3405 @@
1/******************************************************************************
2 *
3 * This file is provided under a dual BSD/GPLv2 license. When using or
4 * redistributing this file, you may do so under either license.
5 *
6 * GPL LICENSE SUMMARY
7 *
8 * Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved.
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of version 2 of the GNU General Public License as
12 * published by the Free Software Foundation.
13 *
14 * This program is distributed in the hope that it will be useful, but
15 * WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
22 * USA
23 *
24 * The full GNU General Public License is included in this distribution
25 * in the file called LICENSE.GPL.
26 *
27 * Contact Information:
28 * Intel Linux Wireless <ilw@linux.intel.com>
29 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
30 *
31 * BSD LICENSE
32 *
33 * Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved.
34 * All rights reserved.
35 *
36 * Redistribution and use in source and binary forms, with or without
37 * modification, are permitted provided that the following conditions
38 * are met:
39 *
40 * * Redistributions of source code must retain the above copyright
41 * notice, this list of conditions and the following disclaimer.
42 * * Redistributions in binary form must reproduce the above copyright
43 * notice, this list of conditions and the following disclaimer in
44 * the documentation and/or other materials provided with the
45 * distribution.
46 * * Neither the name Intel Corporation nor the names of its
47 * contributors may be used to endorse or promote products derived
48 * from this software without specific prior written permission.
49 *
50 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
51 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
52 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
53 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
54 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
55 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
56 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
57 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
58 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
59 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
60 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
61 *
62 *****************************************************************************/
63/*
64 * Please use this file (iwl-commands.h) only for uCode API definitions.
65 * Please use iwl-xxxx-hw.h for hardware-related definitions.
66 * Please use iwl-dev.h for driver implementation definitions.
67 */
68
69#ifndef __iwl_legacy_commands_h__
70#define __iwl_legacy_commands_h__
71
72struct iwl_priv;
73
74/* uCode version contains 4 values: Major/Minor/API/Serial */
75#define IWL_UCODE_MAJOR(ver) (((ver) & 0xFF000000) >> 24)
76#define IWL_UCODE_MINOR(ver) (((ver) & 0x00FF0000) >> 16)
77#define IWL_UCODE_API(ver) (((ver) & 0x0000FF00) >> 8)
78#define IWL_UCODE_SERIAL(ver) ((ver) & 0x000000FF)
79
80
81/* Tx rates */
82#define IWL_CCK_RATES 4
83#define IWL_OFDM_RATES 8
84#define IWL_MAX_RATES (IWL_CCK_RATES + IWL_OFDM_RATES)
85
86enum {
87 REPLY_ALIVE = 0x1,
88 REPLY_ERROR = 0x2,
89
90 /* RXON and QOS commands */
91 REPLY_RXON = 0x10,
92 REPLY_RXON_ASSOC = 0x11,
93 REPLY_QOS_PARAM = 0x13,
94 REPLY_RXON_TIMING = 0x14,
95
96 /* Multi-Station support */
97 REPLY_ADD_STA = 0x18,
98 REPLY_REMOVE_STA = 0x19,
99
100 /* Security */
101 REPLY_WEPKEY = 0x20,
102
103 /* RX, TX, LEDs */
104 REPLY_3945_RX = 0x1b, /* 3945 only */
105 REPLY_TX = 0x1c,
106 REPLY_RATE_SCALE = 0x47, /* 3945 only */
107 REPLY_LEDS_CMD = 0x48,
108 REPLY_TX_LINK_QUALITY_CMD = 0x4e, /* for 4965 and up */
109
110 /* 802.11h related */
111 REPLY_CHANNEL_SWITCH = 0x72,
112 CHANNEL_SWITCH_NOTIFICATION = 0x73,
113 REPLY_SPECTRUM_MEASUREMENT_CMD = 0x74,
114 SPECTRUM_MEASURE_NOTIFICATION = 0x75,
115
116 /* Power Management */
117 POWER_TABLE_CMD = 0x77,
118 PM_SLEEP_NOTIFICATION = 0x7A,
119 PM_DEBUG_STATISTIC_NOTIFIC = 0x7B,
120
121 /* Scan commands and notifications */
122 REPLY_SCAN_CMD = 0x80,
123 REPLY_SCAN_ABORT_CMD = 0x81,
124 SCAN_START_NOTIFICATION = 0x82,
125 SCAN_RESULTS_NOTIFICATION = 0x83,
126 SCAN_COMPLETE_NOTIFICATION = 0x84,
127
128 /* IBSS/AP commands */
129 BEACON_NOTIFICATION = 0x90,
130 REPLY_TX_BEACON = 0x91,
131
132 /* Miscellaneous commands */
133 REPLY_TX_PWR_TABLE_CMD = 0x97,
134
135 /* Bluetooth device coexistence config command */
136 REPLY_BT_CONFIG = 0x9b,
137
138 /* Statistics */
139 REPLY_STATISTICS_CMD = 0x9c,
140 STATISTICS_NOTIFICATION = 0x9d,
141
142 /* RF-KILL commands and notifications */
143 CARD_STATE_NOTIFICATION = 0xa1,
144
145 /* Missed beacons notification */
146 MISSED_BEACONS_NOTIFICATION = 0xa2,
147
148 REPLY_CT_KILL_CONFIG_CMD = 0xa4,
149 SENSITIVITY_CMD = 0xa8,
150 REPLY_PHY_CALIBRATION_CMD = 0xb0,
151 REPLY_RX_PHY_CMD = 0xc0,
152 REPLY_RX_MPDU_CMD = 0xc1,
153 REPLY_RX = 0xc3,
154 REPLY_COMPRESSED_BA = 0xc5,
155
156 REPLY_MAX = 0xff
157};
158
159/******************************************************************************
160 * (0)
161 * Commonly used structures and definitions:
162 * Command header, rate_n_flags, txpower
163 *
164 *****************************************************************************/
165
166/* iwl_cmd_header flags value */
167#define IWL_CMD_FAILED_MSK 0x40
168
169#define SEQ_TO_QUEUE(s) (((s) >> 8) & 0x1f)
170#define QUEUE_TO_SEQ(q) (((q) & 0x1f) << 8)
171#define SEQ_TO_INDEX(s) ((s) & 0xff)
172#define INDEX_TO_SEQ(i) ((i) & 0xff)
173#define SEQ_HUGE_FRAME cpu_to_le16(0x4000)
174#define SEQ_RX_FRAME cpu_to_le16(0x8000)
175
176/**
177 * struct iwl_cmd_header
178 *
179 * This header format appears in the beginning of each command sent from the
180 * driver, and each response/notification received from uCode.
181 */
182struct iwl_cmd_header {
183 u8 cmd; /* Command ID: REPLY_RXON, etc. */
184 u8 flags; /* 0:5 reserved, 6 abort, 7 internal */
185 /*
186 * The driver sets up the sequence number to values of its choosing.
187 * uCode does not use this value, but passes it back to the driver
188 * when sending the response to each driver-originated command, so
189 * the driver can match the response to the command. Since the values
190 * don't get used by uCode, the driver may set up an arbitrary format.
191 *
192 * There is one exception: uCode sets bit 15 when it originates
193 * the response/notification, i.e. when the response/notification
194 * is not a direct response to a command sent by the driver. For
195 * example, uCode issues REPLY_3945_RX when it sends a received frame
196 * to the driver; it is not a direct response to any driver command.
197 *
198 * The Linux driver uses the following format:
199 *
200 * 0:7 tfd index - position within TX queue
201 * 8:12 TX queue id
202 * 13 reserved
203 * 14 huge - driver sets this to indicate command is in the
204 * 'huge' storage at the end of the command buffers
205 * 15 unsolicited RX or uCode-originated notification
206 */
207 __le16 sequence;
208
209 /* command or response/notification data follows immediately */
210 u8 data[0];
211} __packed;
212
213
214/**
215 * struct iwl3945_tx_power
216 *
217 * Used in REPLY_TX_PWR_TABLE_CMD, REPLY_SCAN_CMD, REPLY_CHANNEL_SWITCH
218 *
219 * Each entry contains two values:
220 * 1) DSP gain (or sometimes called DSP attenuation). This is a fine-grained
221 * linear value that multiplies the output of the digital signal processor,
222 * before being sent to the analog radio.
223 * 2) Radio gain. This sets the analog gain of the radio Tx path.
224 * It is a coarser setting, and behaves in a logarithmic (dB) fashion.
225 *
226 * Driver obtains values from struct iwl3945_tx_power power_gain_table[][].
227 */
228struct iwl3945_tx_power {
229 u8 tx_gain; /* gain for analog radio */
230 u8 dsp_atten; /* gain for DSP */
231} __packed;
232
233/**
234 * struct iwl3945_power_per_rate
235 *
236 * Used in REPLY_TX_PWR_TABLE_CMD, REPLY_CHANNEL_SWITCH
237 */
238struct iwl3945_power_per_rate {
239 u8 rate; /* plcp */
240 struct iwl3945_tx_power tpc;
241 u8 reserved;
242} __packed;
243
244/**
245 * iwl4965 rate_n_flags bit fields
246 *
247 * rate_n_flags format is used in following iwl4965 commands:
248 * REPLY_RX (response only)
249 * REPLY_RX_MPDU (response only)
250 * REPLY_TX (both command and response)
251 * REPLY_TX_LINK_QUALITY_CMD
252 *
253 * High-throughput (HT) rate format for bits 7:0 (bit 8 must be "1"):
254 * 2-0: 0) 6 Mbps
255 * 1) 12 Mbps
256 * 2) 18 Mbps
257 * 3) 24 Mbps
258 * 4) 36 Mbps
259 * 5) 48 Mbps
260 * 6) 54 Mbps
261 * 7) 60 Mbps
262 *
263 * 4-3: 0) Single stream (SISO)
264 * 1) Dual stream (MIMO)
265 * 2) Triple stream (MIMO)
266 *
267 * 5: Value of 0x20 in bits 7:0 indicates 6 Mbps HT40 duplicate data
268 *
269 * Legacy OFDM rate format for bits 7:0 (bit 8 must be "0", bit 9 "0"):
270 * 3-0: 0xD) 6 Mbps
271 * 0xF) 9 Mbps
272 * 0x5) 12 Mbps
273 * 0x7) 18 Mbps
274 * 0x9) 24 Mbps
275 * 0xB) 36 Mbps
276 * 0x1) 48 Mbps
277 * 0x3) 54 Mbps
278 *
279 * Legacy CCK rate format for bits 7:0 (bit 8 must be "0", bit 9 "1"):
280 * 6-0: 10) 1 Mbps
281 * 20) 2 Mbps
282 * 55) 5.5 Mbps
283 * 110) 11 Mbps
284 */
285#define RATE_MCS_CODE_MSK 0x7
286#define RATE_MCS_SPATIAL_POS 3
287#define RATE_MCS_SPATIAL_MSK 0x18
288#define RATE_MCS_HT_DUP_POS 5
289#define RATE_MCS_HT_DUP_MSK 0x20
290
291/* Bit 8: (1) HT format, (0) legacy format in bits 7:0 */
292#define RATE_MCS_FLAGS_POS 8
293#define RATE_MCS_HT_POS 8
294#define RATE_MCS_HT_MSK 0x100
295
296/* Bit 9: (1) CCK, (0) OFDM. HT (bit 8) must be "0" for this bit to be valid */
297#define RATE_MCS_CCK_POS 9
298#define RATE_MCS_CCK_MSK 0x200
299
300/* Bit 10: (1) Use Green Field preamble */
301#define RATE_MCS_GF_POS 10
302#define RATE_MCS_GF_MSK 0x400
303
304/* Bit 11: (1) Use 40Mhz HT40 chnl width, (0) use 20 MHz legacy chnl width */
305#define RATE_MCS_HT40_POS 11
306#define RATE_MCS_HT40_MSK 0x800
307
308/* Bit 12: (1) Duplicate data on both 20MHz chnls. HT40 (bit 11) must be set. */
309#define RATE_MCS_DUP_POS 12
310#define RATE_MCS_DUP_MSK 0x1000
311
312/* Bit 13: (1) Short guard interval (0.4 usec), (0) normal GI (0.8 usec) */
313#define RATE_MCS_SGI_POS 13
314#define RATE_MCS_SGI_MSK 0x2000
315
316/**
317 * rate_n_flags Tx antenna masks
318 * 4965 has 2 transmitters
319 * bit14:16
320 */
321#define RATE_MCS_ANT_POS 14
322#define RATE_MCS_ANT_A_MSK 0x04000
323#define RATE_MCS_ANT_B_MSK 0x08000
324#define RATE_MCS_ANT_C_MSK 0x10000
325#define RATE_MCS_ANT_AB_MSK (RATE_MCS_ANT_A_MSK | RATE_MCS_ANT_B_MSK)
326#define RATE_MCS_ANT_ABC_MSK (RATE_MCS_ANT_AB_MSK | RATE_MCS_ANT_C_MSK)
327#define RATE_ANT_NUM 3
328
329#define POWER_TABLE_NUM_ENTRIES 33
330#define POWER_TABLE_NUM_HT_OFDM_ENTRIES 32
331#define POWER_TABLE_CCK_ENTRY 32
332
333#define IWL_PWR_NUM_HT_OFDM_ENTRIES 24
334#define IWL_PWR_CCK_ENTRIES 2
335
336/**
337 * union iwl4965_tx_power_dual_stream
338 *
339 * Host format used for REPLY_TX_PWR_TABLE_CMD, REPLY_CHANNEL_SWITCH
340 * Use __le32 version (struct tx_power_dual_stream) when building command.
341 *
342 * Driver provides radio gain and DSP attenuation settings to device in pairs,
343 * one value for each transmitter chain. The first value is for transmitter A,
344 * second for transmitter B.
345 *
346 * For SISO bit rates, both values in a pair should be identical.
347 * For MIMO rates, one value may be different from the other,
348 * in order to balance the Tx output between the two transmitters.
349 *
350 * See more details in doc for TXPOWER in iwl-4965-hw.h.
351 */
352union iwl4965_tx_power_dual_stream {
353 struct {
354 u8 radio_tx_gain[2];
355 u8 dsp_predis_atten[2];
356 } s;
357 u32 dw;
358};
359
360/**
361 * struct tx_power_dual_stream
362 *
363 * Table entries in REPLY_TX_PWR_TABLE_CMD, REPLY_CHANNEL_SWITCH
364 *
365 * Same format as iwl_tx_power_dual_stream, but __le32
366 */
367struct tx_power_dual_stream {
368 __le32 dw;
369} __packed;
370
371/**
372 * struct iwl4965_tx_power_db
373 *
374 * Entire table within REPLY_TX_PWR_TABLE_CMD, REPLY_CHANNEL_SWITCH
375 */
376struct iwl4965_tx_power_db {
377 struct tx_power_dual_stream power_tbl[POWER_TABLE_NUM_ENTRIES];
378} __packed;
379
380/******************************************************************************
381 * (0a)
382 * Alive and Error Commands & Responses:
383 *
384 *****************************************************************************/
385
386#define UCODE_VALID_OK cpu_to_le32(0x1)
387#define INITIALIZE_SUBTYPE (9)
388
389/*
390 * ("Initialize") REPLY_ALIVE = 0x1 (response only, not a command)
391 *
392 * uCode issues this "initialize alive" notification once the initialization
393 * uCode image has completed its work, and is ready to load the runtime image.
394 * This is the *first* "alive" notification that the driver will receive after
395 * rebooting uCode; the "initialize" alive is indicated by subtype field == 9.
396 *
397 * See comments documenting "BSM" (bootstrap state machine).
398 *
399 * For 4965, this notification contains important calibration data for
400 * calculating txpower settings:
401 *
402 * 1) Power supply voltage indication. The voltage sensor outputs higher
403 * values for lower voltage, and vice verse.
404 *
405 * 2) Temperature measurement parameters, for each of two channel widths
406 * (20 MHz and 40 MHz) supported by the radios. Temperature sensing
407 * is done via one of the receiver chains, and channel width influences
408 * the results.
409 *
410 * 3) Tx gain compensation to balance 4965's 2 Tx chains for MIMO operation,
411 * for each of 5 frequency ranges.
412 */
413struct iwl_init_alive_resp {
414 u8 ucode_minor;
415 u8 ucode_major;
416 __le16 reserved1;
417 u8 sw_rev[8];
418 u8 ver_type;
419 u8 ver_subtype; /* "9" for initialize alive */
420 __le16 reserved2;
421 __le32 log_event_table_ptr;
422 __le32 error_event_table_ptr;
423 __le32 timestamp;
424 __le32 is_valid;
425
426 /* calibration values from "initialize" uCode */
427 __le32 voltage; /* signed, higher value is lower voltage */
428 __le32 therm_r1[2]; /* signed, 1st for normal, 2nd for HT40 */
429 __le32 therm_r2[2]; /* signed */
430 __le32 therm_r3[2]; /* signed */
431 __le32 therm_r4[2]; /* signed */
432 __le32 tx_atten[5][2]; /* signed MIMO gain comp, 5 freq groups,
433 * 2 Tx chains */
434} __packed;
435
436
437/**
438 * REPLY_ALIVE = 0x1 (response only, not a command)
439 *
440 * uCode issues this "alive" notification once the runtime image is ready
441 * to receive commands from the driver. This is the *second* "alive"
442 * notification that the driver will receive after rebooting uCode;
443 * this "alive" is indicated by subtype field != 9.
444 *
445 * See comments documenting "BSM" (bootstrap state machine).
446 *
447 * This response includes two pointers to structures within the device's
448 * data SRAM (access via HBUS_TARG_MEM_* regs) that are useful for debugging:
449 *
450 * 1) log_event_table_ptr indicates base of the event log. This traces
451 * a 256-entry history of uCode execution within a circular buffer.
452 * Its header format is:
453 *
454 * __le32 log_size; log capacity (in number of entries)
455 * __le32 type; (1) timestamp with each entry, (0) no timestamp
456 * __le32 wraps; # times uCode has wrapped to top of circular buffer
457 * __le32 write_index; next circular buffer entry that uCode would fill
458 *
459 * The header is followed by the circular buffer of log entries. Entries
460 * with timestamps have the following format:
461 *
462 * __le32 event_id; range 0 - 1500
463 * __le32 timestamp; low 32 bits of TSF (of network, if associated)
464 * __le32 data; event_id-specific data value
465 *
466 * Entries without timestamps contain only event_id and data.
467 *
468 *
469 * 2) error_event_table_ptr indicates base of the error log. This contains
470 * information about any uCode error that occurs. For 4965, the format
471 * of the error log is:
472 *
473 * __le32 valid; (nonzero) valid, (0) log is empty
474 * __le32 error_id; type of error
475 * __le32 pc; program counter
476 * __le32 blink1; branch link
477 * __le32 blink2; branch link
478 * __le32 ilink1; interrupt link
479 * __le32 ilink2; interrupt link
480 * __le32 data1; error-specific data
481 * __le32 data2; error-specific data
482 * __le32 line; source code line of error
483 * __le32 bcon_time; beacon timer
484 * __le32 tsf_low; network timestamp function timer
485 * __le32 tsf_hi; network timestamp function timer
486 * __le32 gp1; GP1 timer register
487 * __le32 gp2; GP2 timer register
488 * __le32 gp3; GP3 timer register
489 * __le32 ucode_ver; uCode version
490 * __le32 hw_ver; HW Silicon version
491 * __le32 brd_ver; HW board version
492 * __le32 log_pc; log program counter
493 * __le32 frame_ptr; frame pointer
494 * __le32 stack_ptr; stack pointer
495 * __le32 hcmd; last host command
496 * __le32 isr0; isr status register LMPM_NIC_ISR0: rxtx_flag
497 * __le32 isr1; isr status register LMPM_NIC_ISR1: host_flag
498 * __le32 isr2; isr status register LMPM_NIC_ISR2: enc_flag
499 * __le32 isr3; isr status register LMPM_NIC_ISR3: time_flag
500 * __le32 isr4; isr status register LMPM_NIC_ISR4: wico interrupt
501 * __le32 isr_pref; isr status register LMPM_NIC_PREF_STAT
502 * __le32 wait_event; wait event() caller address
503 * __le32 l2p_control; L2pControlField
504 * __le32 l2p_duration; L2pDurationField
505 * __le32 l2p_mhvalid; L2pMhValidBits
506 * __le32 l2p_addr_match; L2pAddrMatchStat
507 * __le32 lmpm_pmg_sel; indicate which clocks are turned on (LMPM_PMG_SEL)
508 * __le32 u_timestamp; indicate when the date and time of the compilation
509 * __le32 reserved;
510 *
511 * The Linux driver can print both logs to the system log when a uCode error
512 * occurs.
513 */
514struct iwl_alive_resp {
515 u8 ucode_minor;
516 u8 ucode_major;
517 __le16 reserved1;
518 u8 sw_rev[8];
519 u8 ver_type;
520 u8 ver_subtype; /* not "9" for runtime alive */
521 __le16 reserved2;
522 __le32 log_event_table_ptr; /* SRAM address for event log */
523 __le32 error_event_table_ptr; /* SRAM address for error log */
524 __le32 timestamp;
525 __le32 is_valid;
526} __packed;
527
528/*
529 * REPLY_ERROR = 0x2 (response only, not a command)
530 */
531struct iwl_error_resp {
532 __le32 error_type;
533 u8 cmd_id;
534 u8 reserved1;
535 __le16 bad_cmd_seq_num;
536 __le32 error_info;
537 __le64 timestamp;
538} __packed;
539
540/******************************************************************************
541 * (1)
542 * RXON Commands & Responses:
543 *
544 *****************************************************************************/
545
546/*
547 * Rx config defines & structure
548 */
549/* rx_config device types */
550enum {
551 RXON_DEV_TYPE_AP = 1,
552 RXON_DEV_TYPE_ESS = 3,
553 RXON_DEV_TYPE_IBSS = 4,
554 RXON_DEV_TYPE_SNIFFER = 6,
555};
556
557
558#define RXON_RX_CHAIN_DRIVER_FORCE_MSK cpu_to_le16(0x1 << 0)
559#define RXON_RX_CHAIN_DRIVER_FORCE_POS (0)
560#define RXON_RX_CHAIN_VALID_MSK cpu_to_le16(0x7 << 1)
561#define RXON_RX_CHAIN_VALID_POS (1)
562#define RXON_RX_CHAIN_FORCE_SEL_MSK cpu_to_le16(0x7 << 4)
563#define RXON_RX_CHAIN_FORCE_SEL_POS (4)
564#define RXON_RX_CHAIN_FORCE_MIMO_SEL_MSK cpu_to_le16(0x7 << 7)
565#define RXON_RX_CHAIN_FORCE_MIMO_SEL_POS (7)
566#define RXON_RX_CHAIN_CNT_MSK cpu_to_le16(0x3 << 10)
567#define RXON_RX_CHAIN_CNT_POS (10)
568#define RXON_RX_CHAIN_MIMO_CNT_MSK cpu_to_le16(0x3 << 12)
569#define RXON_RX_CHAIN_MIMO_CNT_POS (12)
570#define RXON_RX_CHAIN_MIMO_FORCE_MSK cpu_to_le16(0x1 << 14)
571#define RXON_RX_CHAIN_MIMO_FORCE_POS (14)
572
573/* rx_config flags */
574/* band & modulation selection */
575#define RXON_FLG_BAND_24G_MSK cpu_to_le32(1 << 0)
576#define RXON_FLG_CCK_MSK cpu_to_le32(1 << 1)
577/* auto detection enable */
578#define RXON_FLG_AUTO_DETECT_MSK cpu_to_le32(1 << 2)
579/* TGg protection when tx */
580#define RXON_FLG_TGG_PROTECT_MSK cpu_to_le32(1 << 3)
581/* cck short slot & preamble */
582#define RXON_FLG_SHORT_SLOT_MSK cpu_to_le32(1 << 4)
583#define RXON_FLG_SHORT_PREAMBLE_MSK cpu_to_le32(1 << 5)
584/* antenna selection */
585#define RXON_FLG_DIS_DIV_MSK cpu_to_le32(1 << 7)
586#define RXON_FLG_ANT_SEL_MSK cpu_to_le32(0x0f00)
587#define RXON_FLG_ANT_A_MSK cpu_to_le32(1 << 8)
588#define RXON_FLG_ANT_B_MSK cpu_to_le32(1 << 9)
589/* radar detection enable */
590#define RXON_FLG_RADAR_DETECT_MSK cpu_to_le32(1 << 12)
591#define RXON_FLG_TGJ_NARROW_BAND_MSK cpu_to_le32(1 << 13)
592/* rx response to host with 8-byte TSF
593* (according to ON_AIR deassertion) */
594#define RXON_FLG_TSF2HOST_MSK cpu_to_le32(1 << 15)
595
596
597/* HT flags */
598#define RXON_FLG_CTRL_CHANNEL_LOC_POS (22)
599#define RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK cpu_to_le32(0x1 << 22)
600
601#define RXON_FLG_HT_OPERATING_MODE_POS (23)
602
603#define RXON_FLG_HT_PROT_MSK cpu_to_le32(0x1 << 23)
604#define RXON_FLG_HT40_PROT_MSK cpu_to_le32(0x2 << 23)
605
606#define RXON_FLG_CHANNEL_MODE_POS (25)
607#define RXON_FLG_CHANNEL_MODE_MSK cpu_to_le32(0x3 << 25)
608
609/* channel mode */
610enum {
611 CHANNEL_MODE_LEGACY = 0,
612 CHANNEL_MODE_PURE_40 = 1,
613 CHANNEL_MODE_MIXED = 2,
614 CHANNEL_MODE_RESERVED = 3,
615};
616#define RXON_FLG_CHANNEL_MODE_LEGACY \
617 cpu_to_le32(CHANNEL_MODE_LEGACY << RXON_FLG_CHANNEL_MODE_POS)
618#define RXON_FLG_CHANNEL_MODE_PURE_40 \
619 cpu_to_le32(CHANNEL_MODE_PURE_40 << RXON_FLG_CHANNEL_MODE_POS)
620#define RXON_FLG_CHANNEL_MODE_MIXED \
621 cpu_to_le32(CHANNEL_MODE_MIXED << RXON_FLG_CHANNEL_MODE_POS)
622
623/* CTS to self (if spec allows) flag */
624#define RXON_FLG_SELF_CTS_EN cpu_to_le32(0x1<<30)
625
626/* rx_config filter flags */
627/* accept all data frames */
628#define RXON_FILTER_PROMISC_MSK cpu_to_le32(1 << 0)
629/* pass control & management to host */
630#define RXON_FILTER_CTL2HOST_MSK cpu_to_le32(1 << 1)
631/* accept multi-cast */
632#define RXON_FILTER_ACCEPT_GRP_MSK cpu_to_le32(1 << 2)
633/* don't decrypt uni-cast frames */
634#define RXON_FILTER_DIS_DECRYPT_MSK cpu_to_le32(1 << 3)
635/* don't decrypt multi-cast frames */
636#define RXON_FILTER_DIS_GRP_DECRYPT_MSK cpu_to_le32(1 << 4)
637/* STA is associated */
638#define RXON_FILTER_ASSOC_MSK cpu_to_le32(1 << 5)
639/* transfer to host non bssid beacons in associated state */
640#define RXON_FILTER_BCON_AWARE_MSK cpu_to_le32(1 << 6)
641
642/**
643 * REPLY_RXON = 0x10 (command, has simple generic response)
644 *
645 * RXON tunes the radio tuner to a service channel, and sets up a number
646 * of parameters that are used primarily for Rx, but also for Tx operations.
647 *
648 * NOTE: When tuning to a new channel, driver must set the
649 * RXON_FILTER_ASSOC_MSK to 0. This will clear station-dependent
650 * info within the device, including the station tables, tx retry
651 * rate tables, and txpower tables. Driver must build a new station
652 * table and txpower table before transmitting anything on the RXON
653 * channel.
654 *
655 * NOTE: All RXONs wipe clean the internal txpower table. Driver must
656 * issue a new REPLY_TX_PWR_TABLE_CMD after each REPLY_RXON (0x10),
657 * regardless of whether RXON_FILTER_ASSOC_MSK is set.
658 */
659
660struct iwl3945_rxon_cmd {
661 u8 node_addr[6];
662 __le16 reserved1;
663 u8 bssid_addr[6];
664 __le16 reserved2;
665 u8 wlap_bssid_addr[6];
666 __le16 reserved3;
667 u8 dev_type;
668 u8 air_propagation;
669 __le16 reserved4;
670 u8 ofdm_basic_rates;
671 u8 cck_basic_rates;
672 __le16 assoc_id;
673 __le32 flags;
674 __le32 filter_flags;
675 __le16 channel;
676 __le16 reserved5;
677} __packed;
678
679struct iwl4965_rxon_cmd {
680 u8 node_addr[6];
681 __le16 reserved1;
682 u8 bssid_addr[6];
683 __le16 reserved2;
684 u8 wlap_bssid_addr[6];
685 __le16 reserved3;
686 u8 dev_type;
687 u8 air_propagation;
688 __le16 rx_chain;
689 u8 ofdm_basic_rates;
690 u8 cck_basic_rates;
691 __le16 assoc_id;
692 __le32 flags;
693 __le32 filter_flags;
694 __le16 channel;
695 u8 ofdm_ht_single_stream_basic_rates;
696 u8 ofdm_ht_dual_stream_basic_rates;
697} __packed;
698
699/* Create a common rxon cmd which will be typecast into the 3945 or 4965
700 * specific rxon cmd, depending on where it is called from.
701 */
702struct iwl_legacy_rxon_cmd {
703 u8 node_addr[6];
704 __le16 reserved1;
705 u8 bssid_addr[6];
706 __le16 reserved2;
707 u8 wlap_bssid_addr[6];
708 __le16 reserved3;
709 u8 dev_type;
710 u8 air_propagation;
711 __le16 rx_chain;
712 u8 ofdm_basic_rates;
713 u8 cck_basic_rates;
714 __le16 assoc_id;
715 __le32 flags;
716 __le32 filter_flags;
717 __le16 channel;
718 u8 ofdm_ht_single_stream_basic_rates;
719 u8 ofdm_ht_dual_stream_basic_rates;
720 u8 reserved4;
721 u8 reserved5;
722} __packed;
723
724
725/*
726 * REPLY_RXON_ASSOC = 0x11 (command, has simple generic response)
727 */
728struct iwl3945_rxon_assoc_cmd {
729 __le32 flags;
730 __le32 filter_flags;
731 u8 ofdm_basic_rates;
732 u8 cck_basic_rates;
733 __le16 reserved;
734} __packed;
735
736struct iwl4965_rxon_assoc_cmd {
737 __le32 flags;
738 __le32 filter_flags;
739 u8 ofdm_basic_rates;
740 u8 cck_basic_rates;
741 u8 ofdm_ht_single_stream_basic_rates;
742 u8 ofdm_ht_dual_stream_basic_rates;
743 __le16 rx_chain_select_flags;
744 __le16 reserved;
745} __packed;
746
747#define IWL_CONN_MAX_LISTEN_INTERVAL 10
748#define IWL_MAX_UCODE_BEACON_INTERVAL 4 /* 4096 */
749#define IWL39_MAX_UCODE_BEACON_INTERVAL 1 /* 1024 */
750
751/*
752 * REPLY_RXON_TIMING = 0x14 (command, has simple generic response)
753 */
754struct iwl_rxon_time_cmd {
755 __le64 timestamp;
756 __le16 beacon_interval;
757 __le16 atim_window;
758 __le32 beacon_init_val;
759 __le16 listen_interval;
760 u8 dtim_period;
761 u8 delta_cp_bss_tbtts;
762} __packed;
763
764/*
765 * REPLY_CHANNEL_SWITCH = 0x72 (command, has simple generic response)
766 */
767struct iwl3945_channel_switch_cmd {
768 u8 band;
769 u8 expect_beacon;
770 __le16 channel;
771 __le32 rxon_flags;
772 __le32 rxon_filter_flags;
773 __le32 switch_time;
774 struct iwl3945_power_per_rate power[IWL_MAX_RATES];
775} __packed;
776
777struct iwl4965_channel_switch_cmd {
778 u8 band;
779 u8 expect_beacon;
780 __le16 channel;
781 __le32 rxon_flags;
782 __le32 rxon_filter_flags;
783 __le32 switch_time;
784 struct iwl4965_tx_power_db tx_power;
785} __packed;
786
787/*
788 * CHANNEL_SWITCH_NOTIFICATION = 0x73 (notification only, not a command)
789 */
790struct iwl_csa_notification {
791 __le16 band;
792 __le16 channel;
793 __le32 status; /* 0 - OK, 1 - fail */
794} __packed;
795
796/******************************************************************************
797 * (2)
798 * Quality-of-Service (QOS) Commands & Responses:
799 *
800 *****************************************************************************/
801
802/**
803 * struct iwl_ac_qos -- QOS timing params for REPLY_QOS_PARAM
804 * One for each of 4 EDCA access categories in struct iwl_qosparam_cmd
805 *
806 * @cw_min: Contention window, start value in numbers of slots.
807 * Should be a power-of-2, minus 1. Device's default is 0x0f.
808 * @cw_max: Contention window, max value in numbers of slots.
809 * Should be a power-of-2, minus 1. Device's default is 0x3f.
810 * @aifsn: Number of slots in Arbitration Interframe Space (before
811 * performing random backoff timing prior to Tx). Device default 1.
812 * @edca_txop: Length of Tx opportunity, in uSecs. Device default is 0.
813 *
814 * Device will automatically increase contention window by (2*CW) + 1 for each
815 * transmission retry. Device uses cw_max as a bit mask, ANDed with new CW
816 * value, to cap the CW value.
817 */
818struct iwl_ac_qos {
819 __le16 cw_min;
820 __le16 cw_max;
821 u8 aifsn;
822 u8 reserved1;
823 __le16 edca_txop;
824} __packed;
825
826/* QoS flags defines */
827#define QOS_PARAM_FLG_UPDATE_EDCA_MSK cpu_to_le32(0x01)
828#define QOS_PARAM_FLG_TGN_MSK cpu_to_le32(0x02)
829#define QOS_PARAM_FLG_TXOP_TYPE_MSK cpu_to_le32(0x10)
830
831/* Number of Access Categories (AC) (EDCA), queues 0..3 */
832#define AC_NUM 4
833
834/*
835 * REPLY_QOS_PARAM = 0x13 (command, has simple generic response)
836 *
837 * This command sets up timings for each of the 4 prioritized EDCA Tx FIFOs
838 * 0: Background, 1: Best Effort, 2: Video, 3: Voice.
839 */
840struct iwl_qosparam_cmd {
841 __le32 qos_flags;
842 struct iwl_ac_qos ac[AC_NUM];
843} __packed;
844
845/******************************************************************************
846 * (3)
847 * Add/Modify Stations Commands & Responses:
848 *
849 *****************************************************************************/
850/*
851 * Multi station support
852 */
853
854/* Special, dedicated locations within device's station table */
855#define IWL_AP_ID 0
856#define IWL_STA_ID 2
857#define IWL3945_BROADCAST_ID 24
858#define IWL3945_STATION_COUNT 25
859#define IWL4965_BROADCAST_ID 31
860#define IWL4965_STATION_COUNT 32
861
862#define IWL_STATION_COUNT 32 /* MAX(3945,4965)*/
863#define IWL_INVALID_STATION 255
864
865#define STA_FLG_TX_RATE_MSK cpu_to_le32(1 << 2)
866#define STA_FLG_PWR_SAVE_MSK cpu_to_le32(1 << 8)
867#define STA_FLG_RTS_MIMO_PROT_MSK cpu_to_le32(1 << 17)
868#define STA_FLG_AGG_MPDU_8US_MSK cpu_to_le32(1 << 18)
869#define STA_FLG_MAX_AGG_SIZE_POS (19)
870#define STA_FLG_MAX_AGG_SIZE_MSK cpu_to_le32(3 << 19)
871#define STA_FLG_HT40_EN_MSK cpu_to_le32(1 << 21)
872#define STA_FLG_MIMO_DIS_MSK cpu_to_le32(1 << 22)
873#define STA_FLG_AGG_MPDU_DENSITY_POS (23)
874#define STA_FLG_AGG_MPDU_DENSITY_MSK cpu_to_le32(7 << 23)
875
876/* Use in mode field. 1: modify existing entry, 0: add new station entry */
877#define STA_CONTROL_MODIFY_MSK 0x01
878
879/* key flags __le16*/
880#define STA_KEY_FLG_ENCRYPT_MSK cpu_to_le16(0x0007)
881#define STA_KEY_FLG_NO_ENC cpu_to_le16(0x0000)
882#define STA_KEY_FLG_WEP cpu_to_le16(0x0001)
883#define STA_KEY_FLG_CCMP cpu_to_le16(0x0002)
884#define STA_KEY_FLG_TKIP cpu_to_le16(0x0003)
885
886#define STA_KEY_FLG_KEYID_POS 8
887#define STA_KEY_FLG_INVALID cpu_to_le16(0x0800)
888/* wep key is either from global key (0) or from station info array (1) */
889#define STA_KEY_FLG_MAP_KEY_MSK cpu_to_le16(0x0008)
890
891/* wep key in STA: 5-bytes (0) or 13-bytes (1) */
892#define STA_KEY_FLG_KEY_SIZE_MSK cpu_to_le16(0x1000)
893#define STA_KEY_MULTICAST_MSK cpu_to_le16(0x4000)
894#define STA_KEY_MAX_NUM 8
895
896/* Flags indicate whether to modify vs. don't change various station params */
897#define STA_MODIFY_KEY_MASK 0x01
898#define STA_MODIFY_TID_DISABLE_TX 0x02
899#define STA_MODIFY_TX_RATE_MSK 0x04
900#define STA_MODIFY_ADDBA_TID_MSK 0x08
901#define STA_MODIFY_DELBA_TID_MSK 0x10
902#define STA_MODIFY_SLEEP_TX_COUNT_MSK 0x20
903
904/* Receiver address (actually, Rx station's index into station table),
905 * combined with Traffic ID (QOS priority), in format used by Tx Scheduler */
906#define BUILD_RAxTID(sta_id, tid) (((sta_id) << 4) + (tid))
907
908struct iwl4965_keyinfo {
909 __le16 key_flags;
910 u8 tkip_rx_tsc_byte2; /* TSC[2] for key mix ph1 detection */
911 u8 reserved1;
912 __le16 tkip_rx_ttak[5]; /* 10-byte unicast TKIP TTAK */
913 u8 key_offset;
914 u8 reserved2;
915 u8 key[16]; /* 16-byte unicast decryption key */
916} __packed;
917
918/**
919 * struct sta_id_modify
920 * @addr[ETH_ALEN]: station's MAC address
921 * @sta_id: index of station in uCode's station table
922 * @modify_mask: STA_MODIFY_*, 1: modify, 0: don't change
923 *
924 * Driver selects unused table index when adding new station,
925 * or the index to a pre-existing station entry when modifying that station.
926 * Some indexes have special purposes (IWL_AP_ID, index 0, is for AP).
927 *
928 * modify_mask flags select which parameters to modify vs. leave alone.
929 */
930struct sta_id_modify {
931 u8 addr[ETH_ALEN];
932 __le16 reserved1;
933 u8 sta_id;
934 u8 modify_mask;
935 __le16 reserved2;
936} __packed;
937
938/*
939 * REPLY_ADD_STA = 0x18 (command)
940 *
941 * The device contains an internal table of per-station information,
942 * with info on security keys, aggregation parameters, and Tx rates for
943 * initial Tx attempt and any retries (4965 devices uses
944 * REPLY_TX_LINK_QUALITY_CMD,
945 * 3945 uses REPLY_RATE_SCALE to set up rate tables).
946 *
947 * REPLY_ADD_STA sets up the table entry for one station, either creating
948 * a new entry, or modifying a pre-existing one.
949 *
950 * NOTE: RXON command (without "associated" bit set) wipes the station table
951 * clean. Moving into RF_KILL state does this also. Driver must set up
952 * new station table before transmitting anything on the RXON channel
953 * (except active scans or active measurements; those commands carry
954 * their own txpower/rate setup data).
955 *
956 * When getting started on a new channel, driver must set up the
957 * IWL_BROADCAST_ID entry (last entry in the table). For a client
958 * station in a BSS, once an AP is selected, driver sets up the AP STA
959 * in the IWL_AP_ID entry (1st entry in the table). BROADCAST and AP
960 * are all that are needed for a BSS client station. If the device is
961 * used as AP, or in an IBSS network, driver must set up station table
962 * entries for all STAs in network, starting with index IWL_STA_ID.
963 */
964
965struct iwl3945_addsta_cmd {
966 u8 mode; /* 1: modify existing, 0: add new station */
967 u8 reserved[3];
968 struct sta_id_modify sta;
969 struct iwl4965_keyinfo key;
970 __le32 station_flags; /* STA_FLG_* */
971 __le32 station_flags_msk; /* STA_FLG_* */
972
973 /* bit field to disable (1) or enable (0) Tx for Traffic ID (TID)
974 * corresponding to bit (e.g. bit 5 controls TID 5).
975 * Set modify_mask bit STA_MODIFY_TID_DISABLE_TX to use this field. */
976 __le16 tid_disable_tx;
977
978 __le16 rate_n_flags;
979
980 /* TID for which to add block-ack support.
981 * Set modify_mask bit STA_MODIFY_ADDBA_TID_MSK to use this field. */
982 u8 add_immediate_ba_tid;
983
984 /* TID for which to remove block-ack support.
985 * Set modify_mask bit STA_MODIFY_DELBA_TID_MSK to use this field. */
986 u8 remove_immediate_ba_tid;
987
988 /* Starting Sequence Number for added block-ack support.
989 * Set modify_mask bit STA_MODIFY_ADDBA_TID_MSK to use this field. */
990 __le16 add_immediate_ba_ssn;
991} __packed;
992
993struct iwl4965_addsta_cmd {
994 u8 mode; /* 1: modify existing, 0: add new station */
995 u8 reserved[3];
996 struct sta_id_modify sta;
997 struct iwl4965_keyinfo key;
998 __le32 station_flags; /* STA_FLG_* */
999 __le32 station_flags_msk; /* STA_FLG_* */
1000
1001 /* bit field to disable (1) or enable (0) Tx for Traffic ID (TID)
1002 * corresponding to bit (e.g. bit 5 controls TID 5).
1003 * Set modify_mask bit STA_MODIFY_TID_DISABLE_TX to use this field. */
1004 __le16 tid_disable_tx;
1005
1006 __le16 reserved1;
1007
1008 /* TID for which to add block-ack support.
1009 * Set modify_mask bit STA_MODIFY_ADDBA_TID_MSK to use this field. */
1010 u8 add_immediate_ba_tid;
1011
1012 /* TID for which to remove block-ack support.
1013 * Set modify_mask bit STA_MODIFY_DELBA_TID_MSK to use this field. */
1014 u8 remove_immediate_ba_tid;
1015
1016 /* Starting Sequence Number for added block-ack support.
1017 * Set modify_mask bit STA_MODIFY_ADDBA_TID_MSK to use this field. */
1018 __le16 add_immediate_ba_ssn;
1019
1020 /*
1021 * Number of packets OK to transmit to station even though
1022 * it is asleep -- used to synchronise PS-poll and u-APSD
1023 * responses while ucode keeps track of STA sleep state.
1024 */
1025 __le16 sleep_tx_count;
1026
1027 __le16 reserved2;
1028} __packed;
1029
1030/* Wrapper struct for 3945 and 4965 addsta_cmd structures */
1031struct iwl_legacy_addsta_cmd {
1032 u8 mode; /* 1: modify existing, 0: add new station */
1033 u8 reserved[3];
1034 struct sta_id_modify sta;
1035 struct iwl4965_keyinfo key;
1036 __le32 station_flags; /* STA_FLG_* */
1037 __le32 station_flags_msk; /* STA_FLG_* */
1038
1039 /* bit field to disable (1) or enable (0) Tx for Traffic ID (TID)
1040 * corresponding to bit (e.g. bit 5 controls TID 5).
1041 * Set modify_mask bit STA_MODIFY_TID_DISABLE_TX to use this field. */
1042 __le16 tid_disable_tx;
1043
1044 __le16 rate_n_flags; /* 3945 only */
1045
1046 /* TID for which to add block-ack support.
1047 * Set modify_mask bit STA_MODIFY_ADDBA_TID_MSK to use this field. */
1048 u8 add_immediate_ba_tid;
1049
1050 /* TID for which to remove block-ack support.
1051 * Set modify_mask bit STA_MODIFY_DELBA_TID_MSK to use this field. */
1052 u8 remove_immediate_ba_tid;
1053
1054 /* Starting Sequence Number for added block-ack support.
1055 * Set modify_mask bit STA_MODIFY_ADDBA_TID_MSK to use this field. */
1056 __le16 add_immediate_ba_ssn;
1057
1058 /*
1059 * Number of packets OK to transmit to station even though
1060 * it is asleep -- used to synchronise PS-poll and u-APSD
1061 * responses while ucode keeps track of STA sleep state.
1062 */
1063 __le16 sleep_tx_count;
1064
1065 __le16 reserved2;
1066} __packed;
1067
1068
1069#define ADD_STA_SUCCESS_MSK 0x1
1070#define ADD_STA_NO_ROOM_IN_TABLE 0x2
1071#define ADD_STA_NO_BLOCK_ACK_RESOURCE 0x4
1072#define ADD_STA_MODIFY_NON_EXIST_STA 0x8
1073/*
1074 * REPLY_ADD_STA = 0x18 (response)
1075 */
1076struct iwl_add_sta_resp {
1077 u8 status; /* ADD_STA_* */
1078} __packed;
1079
1080#define REM_STA_SUCCESS_MSK 0x1
1081/*
1082 * REPLY_REM_STA = 0x19 (response)
1083 */
1084struct iwl_rem_sta_resp {
1085 u8 status;
1086} __packed;
1087
1088/*
1089 * REPLY_REM_STA = 0x19 (command)
1090 */
1091struct iwl_rem_sta_cmd {
1092 u8 num_sta; /* number of removed stations */
1093 u8 reserved[3];
1094 u8 addr[ETH_ALEN]; /* MAC addr of the first station */
1095 u8 reserved2[2];
1096} __packed;
1097
1098#define IWL_TX_FIFO_BK_MSK cpu_to_le32(BIT(0))
1099#define IWL_TX_FIFO_BE_MSK cpu_to_le32(BIT(1))
1100#define IWL_TX_FIFO_VI_MSK cpu_to_le32(BIT(2))
1101#define IWL_TX_FIFO_VO_MSK cpu_to_le32(BIT(3))
1102#define IWL_AGG_TX_QUEUE_MSK cpu_to_le32(0xffc00)
1103
1104#define IWL_DROP_SINGLE 0
1105#define IWL_DROP_SELECTED 1
1106#define IWL_DROP_ALL 2
1107
1108/*
1109 * REPLY_WEP_KEY = 0x20
1110 */
1111struct iwl_wep_key {
1112 u8 key_index;
1113 u8 key_offset;
1114 u8 reserved1[2];
1115 u8 key_size;
1116 u8 reserved2[3];
1117 u8 key[16];
1118} __packed;
1119
1120struct iwl_wep_cmd {
1121 u8 num_keys;
1122 u8 global_key_type;
1123 u8 flags;
1124 u8 reserved;
1125 struct iwl_wep_key key[0];
1126} __packed;
1127
1128#define WEP_KEY_WEP_TYPE 1
1129#define WEP_KEYS_MAX 4
1130#define WEP_INVALID_OFFSET 0xff
1131#define WEP_KEY_LEN_64 5
1132#define WEP_KEY_LEN_128 13
1133
1134/******************************************************************************
1135 * (4)
1136 * Rx Responses:
1137 *
1138 *****************************************************************************/
1139
1140#define RX_RES_STATUS_NO_CRC32_ERROR cpu_to_le32(1 << 0)
1141#define RX_RES_STATUS_NO_RXE_OVERFLOW cpu_to_le32(1 << 1)
1142
1143#define RX_RES_PHY_FLAGS_BAND_24_MSK cpu_to_le16(1 << 0)
1144#define RX_RES_PHY_FLAGS_MOD_CCK_MSK cpu_to_le16(1 << 1)
1145#define RX_RES_PHY_FLAGS_SHORT_PREAMBLE_MSK cpu_to_le16(1 << 2)
1146#define RX_RES_PHY_FLAGS_NARROW_BAND_MSK cpu_to_le16(1 << 3)
1147#define RX_RES_PHY_FLAGS_ANTENNA_MSK 0xf0
1148#define RX_RES_PHY_FLAGS_ANTENNA_POS 4
1149
1150#define RX_RES_STATUS_SEC_TYPE_MSK (0x7 << 8)
1151#define RX_RES_STATUS_SEC_TYPE_NONE (0x0 << 8)
1152#define RX_RES_STATUS_SEC_TYPE_WEP (0x1 << 8)
1153#define RX_RES_STATUS_SEC_TYPE_CCMP (0x2 << 8)
1154#define RX_RES_STATUS_SEC_TYPE_TKIP (0x3 << 8)
1155#define RX_RES_STATUS_SEC_TYPE_ERR (0x7 << 8)
1156
1157#define RX_RES_STATUS_STATION_FOUND (1<<6)
1158#define RX_RES_STATUS_NO_STATION_INFO_MISMATCH (1<<7)
1159
1160#define RX_RES_STATUS_DECRYPT_TYPE_MSK (0x3 << 11)
1161#define RX_RES_STATUS_NOT_DECRYPT (0x0 << 11)
1162#define RX_RES_STATUS_DECRYPT_OK (0x3 << 11)
1163#define RX_RES_STATUS_BAD_ICV_MIC (0x1 << 11)
1164#define RX_RES_STATUS_BAD_KEY_TTAK (0x2 << 11)
1165
1166#define RX_MPDU_RES_STATUS_ICV_OK (0x20)
1167#define RX_MPDU_RES_STATUS_MIC_OK (0x40)
1168#define RX_MPDU_RES_STATUS_TTAK_OK (1 << 7)
1169#define RX_MPDU_RES_STATUS_DEC_DONE_MSK (0x800)
1170
1171
1172struct iwl3945_rx_frame_stats {
1173 u8 phy_count;
1174 u8 id;
1175 u8 rssi;
1176 u8 agc;
1177 __le16 sig_avg;
1178 __le16 noise_diff;
1179 u8 payload[0];
1180} __packed;
1181
1182struct iwl3945_rx_frame_hdr {
1183 __le16 channel;
1184 __le16 phy_flags;
1185 u8 reserved1;
1186 u8 rate;
1187 __le16 len;
1188 u8 payload[0];
1189} __packed;
1190
1191struct iwl3945_rx_frame_end {
1192 __le32 status;
1193 __le64 timestamp;
1194 __le32 beacon_timestamp;
1195} __packed;
1196
1197/*
1198 * REPLY_3945_RX = 0x1b (response only, not a command)
1199 *
1200 * NOTE: DO NOT dereference from casts to this structure
1201 * It is provided only for calculating minimum data set size.
1202 * The actual offsets of the hdr and end are dynamic based on
1203 * stats.phy_count
1204 */
1205struct iwl3945_rx_frame {
1206 struct iwl3945_rx_frame_stats stats;
1207 struct iwl3945_rx_frame_hdr hdr;
1208 struct iwl3945_rx_frame_end end;
1209} __packed;
1210
1211#define IWL39_RX_FRAME_SIZE (4 + sizeof(struct iwl3945_rx_frame))
1212
1213/* Fixed (non-configurable) rx data from phy */
1214
1215#define IWL49_RX_RES_PHY_CNT 14
1216#define IWL49_RX_PHY_FLAGS_ANTENNAE_OFFSET (4)
1217#define IWL49_RX_PHY_FLAGS_ANTENNAE_MASK (0x70)
1218#define IWL49_AGC_DB_MASK (0x3f80) /* MASK(7,13) */
1219#define IWL49_AGC_DB_POS (7)
1220struct iwl4965_rx_non_cfg_phy {
1221 __le16 ant_selection; /* ant A bit 4, ant B bit 5, ant C bit 6 */
1222 __le16 agc_info; /* agc code 0:6, agc dB 7:13, reserved 14:15 */
1223 u8 rssi_info[6]; /* we use even entries, 0/2/4 for A/B/C rssi */
1224 u8 pad[0];
1225} __packed;
1226
1227
1228/*
1229 * REPLY_RX = 0xc3 (response only, not a command)
1230 * Used only for legacy (non 11n) frames.
1231 */
1232struct iwl_rx_phy_res {
1233 u8 non_cfg_phy_cnt; /* non configurable DSP phy data byte count */
1234 u8 cfg_phy_cnt; /* configurable DSP phy data byte count */
1235 u8 stat_id; /* configurable DSP phy data set ID */
1236 u8 reserved1;
1237 __le64 timestamp; /* TSF at on air rise */
1238 __le32 beacon_time_stamp; /* beacon at on-air rise */
1239 __le16 phy_flags; /* general phy flags: band, modulation, ... */
1240 __le16 channel; /* channel number */
1241 u8 non_cfg_phy_buf[32]; /* for various implementations of non_cfg_phy */
1242 __le32 rate_n_flags; /* RATE_MCS_* */
1243 __le16 byte_count; /* frame's byte-count */
1244 __le16 frame_time; /* frame's time on the air */
1245} __packed;
1246
1247struct iwl_rx_mpdu_res_start {
1248 __le16 byte_count;
1249 __le16 reserved;
1250} __packed;
1251
1252
1253/******************************************************************************
1254 * (5)
1255 * Tx Commands & Responses:
1256 *
1257 * Driver must place each REPLY_TX command into one of the prioritized Tx
1258 * queues in host DRAM, shared between driver and device (see comments for
1259 * SCD registers and Tx/Rx Queues). When the device's Tx scheduler and uCode
1260 * are preparing to transmit, the device pulls the Tx command over the PCI
1261 * bus via one of the device's Tx DMA channels, to fill an internal FIFO
1262 * from which data will be transmitted.
1263 *
1264 * uCode handles all timing and protocol related to control frames
1265 * (RTS/CTS/ACK), based on flags in the Tx command. uCode and Tx scheduler
1266 * handle reception of block-acks; uCode updates the host driver via
1267 * REPLY_COMPRESSED_BA.
1268 *
1269 * uCode handles retrying Tx when an ACK is expected but not received.
1270 * This includes trying lower data rates than the one requested in the Tx
1271 * command, as set up by the REPLY_RATE_SCALE (for 3945) or
1272 * REPLY_TX_LINK_QUALITY_CMD (4965).
1273 *
1274 * Driver sets up transmit power for various rates via REPLY_TX_PWR_TABLE_CMD.
1275 * This command must be executed after every RXON command, before Tx can occur.
1276 *****************************************************************************/
1277
1278/* REPLY_TX Tx flags field */
1279
1280/*
1281 * 1: Use Request-To-Send protocol before this frame.
1282 * Mutually exclusive vs. TX_CMD_FLG_CTS_MSK.
1283 */
1284#define TX_CMD_FLG_RTS_MSK cpu_to_le32(1 << 1)
1285
1286/*
1287 * 1: Transmit Clear-To-Send to self before this frame.
1288 * Driver should set this for AUTH/DEAUTH/ASSOC-REQ/REASSOC mgmnt frames.
1289 * Mutually exclusive vs. TX_CMD_FLG_RTS_MSK.
1290 */
1291#define TX_CMD_FLG_CTS_MSK cpu_to_le32(1 << 2)
1292
1293/* 1: Expect ACK from receiving station
1294 * 0: Don't expect ACK (MAC header's duration field s/b 0)
1295 * Set this for unicast frames, but not broadcast/multicast. */
1296#define TX_CMD_FLG_ACK_MSK cpu_to_le32(1 << 3)
1297
1298/* For 4965 devices:
1299 * 1: Use rate scale table (see REPLY_TX_LINK_QUALITY_CMD).
1300 * Tx command's initial_rate_index indicates first rate to try;
1301 * uCode walks through table for additional Tx attempts.
1302 * 0: Use Tx rate/MCS from Tx command's rate_n_flags field.
1303 * This rate will be used for all Tx attempts; it will not be scaled. */
1304#define TX_CMD_FLG_STA_RATE_MSK cpu_to_le32(1 << 4)
1305
1306/* 1: Expect immediate block-ack.
1307 * Set when Txing a block-ack request frame. Also set TX_CMD_FLG_ACK_MSK. */
1308#define TX_CMD_FLG_IMM_BA_RSP_MASK cpu_to_le32(1 << 6)
1309
1310/*
1311 * 1: Frame requires full Tx-Op protection.
1312 * Set this if either RTS or CTS Tx Flag gets set.
1313 */
1314#define TX_CMD_FLG_FULL_TXOP_PROT_MSK cpu_to_le32(1 << 7)
1315
1316/* Tx antenna selection field; used only for 3945, reserved (0) for 4965 devices.
1317 * Set field to "0" to allow 3945 uCode to select antenna (normal usage). */
1318#define TX_CMD_FLG_ANT_SEL_MSK cpu_to_le32(0xf00)
1319#define TX_CMD_FLG_ANT_A_MSK cpu_to_le32(1 << 8)
1320#define TX_CMD_FLG_ANT_B_MSK cpu_to_le32(1 << 9)
1321
1322/* 1: uCode overrides sequence control field in MAC header.
1323 * 0: Driver provides sequence control field in MAC header.
1324 * Set this for management frames, non-QOS data frames, non-unicast frames,
1325 * and also in Tx command embedded in REPLY_SCAN_CMD for active scans. */
1326#define TX_CMD_FLG_SEQ_CTL_MSK cpu_to_le32(1 << 13)
1327
1328/* 1: This frame is non-last MPDU; more fragments are coming.
1329 * 0: Last fragment, or not using fragmentation. */
1330#define TX_CMD_FLG_MORE_FRAG_MSK cpu_to_le32(1 << 14)
1331
1332/* 1: uCode calculates and inserts Timestamp Function (TSF) in outgoing frame.
1333 * 0: No TSF required in outgoing frame.
1334 * Set this for transmitting beacons and probe responses. */
1335#define TX_CMD_FLG_TSF_MSK cpu_to_le32(1 << 16)
1336
1337/* 1: Driver inserted 2 bytes pad after the MAC header, for (required) dword
1338 * alignment of frame's payload data field.
1339 * 0: No pad
1340 * Set this for MAC headers with 26 or 30 bytes, i.e. those with QOS or ADDR4
1341 * field (but not both). Driver must align frame data (i.e. data following
1342 * MAC header) to DWORD boundary. */
1343#define TX_CMD_FLG_MH_PAD_MSK cpu_to_le32(1 << 20)
1344
1345/* accelerate aggregation support
1346 * 0 - no CCMP encryption; 1 - CCMP encryption */
1347#define TX_CMD_FLG_AGG_CCMP_MSK cpu_to_le32(1 << 22)
1348
1349/* HCCA-AP - disable duration overwriting. */
1350#define TX_CMD_FLG_DUR_MSK cpu_to_le32(1 << 25)
1351
1352
1353/*
1354 * TX command security control
1355 */
1356#define TX_CMD_SEC_WEP 0x01
1357#define TX_CMD_SEC_CCM 0x02
1358#define TX_CMD_SEC_TKIP 0x03
1359#define TX_CMD_SEC_MSK 0x03
1360#define TX_CMD_SEC_SHIFT 6
1361#define TX_CMD_SEC_KEY128 0x08
1362
1363/*
1364 * security overhead sizes
1365 */
1366#define WEP_IV_LEN 4
1367#define WEP_ICV_LEN 4
1368#define CCMP_MIC_LEN 8
1369#define TKIP_ICV_LEN 4
1370
1371/*
1372 * REPLY_TX = 0x1c (command)
1373 */
1374
1375struct iwl3945_tx_cmd {
1376 /*
1377 * MPDU byte count:
1378 * MAC header (24/26/30/32 bytes) + 2 bytes pad if 26/30 header size,
1379 * + 8 byte IV for CCM or TKIP (not used for WEP)
1380 * + Data payload
1381 * + 8-byte MIC (not used for CCM/WEP)
1382 * NOTE: Does not include Tx command bytes, post-MAC pad bytes,
1383 * MIC (CCM) 8 bytes, ICV (WEP/TKIP/CKIP) 4 bytes, CRC 4 bytes.i
1384 * Range: 14-2342 bytes.
1385 */
1386 __le16 len;
1387
1388 /*
1389 * MPDU or MSDU byte count for next frame.
1390 * Used for fragmentation and bursting, but not 11n aggregation.
1391 * Same as "len", but for next frame. Set to 0 if not applicable.
1392 */
1393 __le16 next_frame_len;
1394
1395 __le32 tx_flags; /* TX_CMD_FLG_* */
1396
1397 u8 rate;
1398
1399 /* Index of recipient station in uCode's station table */
1400 u8 sta_id;
1401 u8 tid_tspec;
1402 u8 sec_ctl;
1403 u8 key[16];
1404 union {
1405 u8 byte[8];
1406 __le16 word[4];
1407 __le32 dw[2];
1408 } tkip_mic;
1409 __le32 next_frame_info;
1410 union {
1411 __le32 life_time;
1412 __le32 attempt;
1413 } stop_time;
1414 u8 supp_rates[2];
1415 u8 rts_retry_limit; /*byte 50 */
1416 u8 data_retry_limit; /*byte 51 */
1417 union {
1418 __le16 pm_frame_timeout;
1419 __le16 attempt_duration;
1420 } timeout;
1421
1422 /*
1423 * Duration of EDCA burst Tx Opportunity, in 32-usec units.
1424 * Set this if txop time is not specified by HCCA protocol (e.g. by AP).
1425 */
1426 __le16 driver_txop;
1427
1428 /*
1429 * MAC header goes here, followed by 2 bytes padding if MAC header
1430 * length is 26 or 30 bytes, followed by payload data
1431 */
1432 u8 payload[0];
1433 struct ieee80211_hdr hdr[0];
1434} __packed;
1435
1436/*
1437 * REPLY_TX = 0x1c (response)
1438 */
1439struct iwl3945_tx_resp {
1440 u8 failure_rts;
1441 u8 failure_frame;
1442 u8 bt_kill_count;
1443 u8 rate;
1444 __le32 wireless_media_time;
1445 __le32 status; /* TX status */
1446} __packed;
1447
1448
1449/*
1450 * 4965 uCode updates these Tx attempt count values in host DRAM.
1451 * Used for managing Tx retries when expecting block-acks.
1452 * Driver should set these fields to 0.
1453 */
1454struct iwl_dram_scratch {
1455 u8 try_cnt; /* Tx attempts */
1456 u8 bt_kill_cnt; /* Tx attempts blocked by Bluetooth device */
1457 __le16 reserved;
1458} __packed;
1459
1460struct iwl_tx_cmd {
1461 /*
1462 * MPDU byte count:
1463 * MAC header (24/26/30/32 bytes) + 2 bytes pad if 26/30 header size,
1464 * + 8 byte IV for CCM or TKIP (not used for WEP)
1465 * + Data payload
1466 * + 8-byte MIC (not used for CCM/WEP)
1467 * NOTE: Does not include Tx command bytes, post-MAC pad bytes,
1468 * MIC (CCM) 8 bytes, ICV (WEP/TKIP/CKIP) 4 bytes, CRC 4 bytes.i
1469 * Range: 14-2342 bytes.
1470 */
1471 __le16 len;
1472
1473 /*
1474 * MPDU or MSDU byte count for next frame.
1475 * Used for fragmentation and bursting, but not 11n aggregation.
1476 * Same as "len", but for next frame. Set to 0 if not applicable.
1477 */
1478 __le16 next_frame_len;
1479
1480 __le32 tx_flags; /* TX_CMD_FLG_* */
1481
1482 /* uCode may modify this field of the Tx command (in host DRAM!).
1483 * Driver must also set dram_lsb_ptr and dram_msb_ptr in this cmd. */
1484 struct iwl_dram_scratch scratch;
1485
1486 /* Rate for *all* Tx attempts, if TX_CMD_FLG_STA_RATE_MSK is cleared. */
1487 __le32 rate_n_flags; /* RATE_MCS_* */
1488
1489 /* Index of destination station in uCode's station table */
1490 u8 sta_id;
1491
1492 /* Type of security encryption: CCM or TKIP */
1493 u8 sec_ctl; /* TX_CMD_SEC_* */
1494
1495 /*
1496 * Index into rate table (see REPLY_TX_LINK_QUALITY_CMD) for initial
1497 * Tx attempt, if TX_CMD_FLG_STA_RATE_MSK is set. Normally "0" for
1498 * data frames, this field may be used to selectively reduce initial
1499 * rate (via non-0 value) for special frames (e.g. management), while
1500 * still supporting rate scaling for all frames.
1501 */
1502 u8 initial_rate_index;
1503 u8 reserved;
1504 u8 key[16];
1505 __le16 next_frame_flags;
1506 __le16 reserved2;
1507 union {
1508 __le32 life_time;
1509 __le32 attempt;
1510 } stop_time;
1511
1512 /* Host DRAM physical address pointer to "scratch" in this command.
1513 * Must be dword aligned. "0" in dram_lsb_ptr disables usage. */
1514 __le32 dram_lsb_ptr;
1515 u8 dram_msb_ptr;
1516
1517 u8 rts_retry_limit; /*byte 50 */
1518 u8 data_retry_limit; /*byte 51 */
1519 u8 tid_tspec;
1520 union {
1521 __le16 pm_frame_timeout;
1522 __le16 attempt_duration;
1523 } timeout;
1524
1525 /*
1526 * Duration of EDCA burst Tx Opportunity, in 32-usec units.
1527 * Set this if txop time is not specified by HCCA protocol (e.g. by AP).
1528 */
1529 __le16 driver_txop;
1530
1531 /*
1532 * MAC header goes here, followed by 2 bytes padding if MAC header
1533 * length is 26 or 30 bytes, followed by payload data
1534 */
1535 u8 payload[0];
1536 struct ieee80211_hdr hdr[0];
1537} __packed;
1538
1539/* TX command response is sent after *3945* transmission attempts.
1540 *
1541 * NOTES:
1542 *
1543 * TX_STATUS_FAIL_NEXT_FRAG
1544 *
1545 * If the fragment flag in the MAC header for the frame being transmitted
1546 * is set and there is insufficient time to transmit the next frame, the
1547 * TX status will be returned with 'TX_STATUS_FAIL_NEXT_FRAG'.
1548 *
1549 * TX_STATUS_FIFO_UNDERRUN
1550 *
1551 * Indicates the host did not provide bytes to the FIFO fast enough while
1552 * a TX was in progress.
1553 *
1554 * TX_STATUS_FAIL_MGMNT_ABORT
1555 *
1556 * This status is only possible if the ABORT ON MGMT RX parameter was
1557 * set to true with the TX command.
1558 *
1559 * If the MSB of the status parameter is set then an abort sequence is
1560 * required. This sequence consists of the host activating the TX Abort
1561 * control line, and then waiting for the TX Abort command response. This
1562 * indicates that a the device is no longer in a transmit state, and that the
1563 * command FIFO has been cleared. The host must then deactivate the TX Abort
1564 * control line. Receiving is still allowed in this case.
1565 */
1566enum {
1567 TX_3945_STATUS_SUCCESS = 0x01,
1568 TX_3945_STATUS_DIRECT_DONE = 0x02,
1569 TX_3945_STATUS_FAIL_SHORT_LIMIT = 0x82,
1570 TX_3945_STATUS_FAIL_LONG_LIMIT = 0x83,
1571 TX_3945_STATUS_FAIL_FIFO_UNDERRUN = 0x84,
1572 TX_3945_STATUS_FAIL_MGMNT_ABORT = 0x85,
1573 TX_3945_STATUS_FAIL_NEXT_FRAG = 0x86,
1574 TX_3945_STATUS_FAIL_LIFE_EXPIRE = 0x87,
1575 TX_3945_STATUS_FAIL_DEST_PS = 0x88,
1576 TX_3945_STATUS_FAIL_ABORTED = 0x89,
1577 TX_3945_STATUS_FAIL_BT_RETRY = 0x8a,
1578 TX_3945_STATUS_FAIL_STA_INVALID = 0x8b,
1579 TX_3945_STATUS_FAIL_FRAG_DROPPED = 0x8c,
1580 TX_3945_STATUS_FAIL_TID_DISABLE = 0x8d,
1581 TX_3945_STATUS_FAIL_FRAME_FLUSHED = 0x8e,
1582 TX_3945_STATUS_FAIL_INSUFFICIENT_CF_POLL = 0x8f,
1583 TX_3945_STATUS_FAIL_TX_LOCKED = 0x90,
1584 TX_3945_STATUS_FAIL_NO_BEACON_ON_RADAR = 0x91,
1585};
1586
1587/*
1588 * TX command response is sent after *4965* transmission attempts.
1589 *
1590 * both postpone and abort status are expected behavior from uCode. there is
1591 * no special operation required from driver; except for RFKILL_FLUSH,
1592 * which required tx flush host command to flush all the tx frames in queues
1593 */
1594enum {
1595 TX_STATUS_SUCCESS = 0x01,
1596 TX_STATUS_DIRECT_DONE = 0x02,
1597 /* postpone TX */
1598 TX_STATUS_POSTPONE_DELAY = 0x40,
1599 TX_STATUS_POSTPONE_FEW_BYTES = 0x41,
1600 TX_STATUS_POSTPONE_QUIET_PERIOD = 0x43,
1601 TX_STATUS_POSTPONE_CALC_TTAK = 0x44,
1602 /* abort TX */
1603 TX_STATUS_FAIL_INTERNAL_CROSSED_RETRY = 0x81,
1604 TX_STATUS_FAIL_SHORT_LIMIT = 0x82,
1605 TX_STATUS_FAIL_LONG_LIMIT = 0x83,
1606 TX_STATUS_FAIL_FIFO_UNDERRUN = 0x84,
1607 TX_STATUS_FAIL_DRAIN_FLOW = 0x85,
1608 TX_STATUS_FAIL_RFKILL_FLUSH = 0x86,
1609 TX_STATUS_FAIL_LIFE_EXPIRE = 0x87,
1610 TX_STATUS_FAIL_DEST_PS = 0x88,
1611 TX_STATUS_FAIL_HOST_ABORTED = 0x89,
1612 TX_STATUS_FAIL_BT_RETRY = 0x8a,
1613 TX_STATUS_FAIL_STA_INVALID = 0x8b,
1614 TX_STATUS_FAIL_FRAG_DROPPED = 0x8c,
1615 TX_STATUS_FAIL_TID_DISABLE = 0x8d,
1616 TX_STATUS_FAIL_FIFO_FLUSHED = 0x8e,
1617 TX_STATUS_FAIL_INSUFFICIENT_CF_POLL = 0x8f,
1618 TX_STATUS_FAIL_PASSIVE_NO_RX = 0x90,
1619 TX_STATUS_FAIL_NO_BEACON_ON_RADAR = 0x91,
1620};
1621
1622#define TX_PACKET_MODE_REGULAR 0x0000
1623#define TX_PACKET_MODE_BURST_SEQ 0x0100
1624#define TX_PACKET_MODE_BURST_FIRST 0x0200
1625
1626enum {
1627 TX_POWER_PA_NOT_ACTIVE = 0x0,
1628};
1629
1630enum {
1631 TX_STATUS_MSK = 0x000000ff, /* bits 0:7 */
1632 TX_STATUS_DELAY_MSK = 0x00000040,
1633 TX_STATUS_ABORT_MSK = 0x00000080,
1634 TX_PACKET_MODE_MSK = 0x0000ff00, /* bits 8:15 */
1635 TX_FIFO_NUMBER_MSK = 0x00070000, /* bits 16:18 */
1636 TX_RESERVED = 0x00780000, /* bits 19:22 */
1637 TX_POWER_PA_DETECT_MSK = 0x7f800000, /* bits 23:30 */
1638 TX_ABORT_REQUIRED_MSK = 0x80000000, /* bits 31:31 */
1639};
1640
1641/* *******************************
1642 * TX aggregation status
1643 ******************************* */
1644
1645enum {
1646 AGG_TX_STATE_TRANSMITTED = 0x00,
1647 AGG_TX_STATE_UNDERRUN_MSK = 0x01,
1648 AGG_TX_STATE_FEW_BYTES_MSK = 0x04,
1649 AGG_TX_STATE_ABORT_MSK = 0x08,
1650 AGG_TX_STATE_LAST_SENT_TTL_MSK = 0x10,
1651 AGG_TX_STATE_LAST_SENT_TRY_CNT_MSK = 0x20,
1652 AGG_TX_STATE_SCD_QUERY_MSK = 0x80,
1653 AGG_TX_STATE_TEST_BAD_CRC32_MSK = 0x100,
1654 AGG_TX_STATE_RESPONSE_MSK = 0x1ff,
1655 AGG_TX_STATE_DUMP_TX_MSK = 0x200,
1656 AGG_TX_STATE_DELAY_TX_MSK = 0x400
1657};
1658
1659#define AGG_TX_STATUS_MSK 0x00000fff /* bits 0:11 */
1660#define AGG_TX_TRY_MSK 0x0000f000 /* bits 12:15 */
1661
1662#define AGG_TX_STATE_LAST_SENT_MSK (AGG_TX_STATE_LAST_SENT_TTL_MSK | \
1663 AGG_TX_STATE_LAST_SENT_TRY_CNT_MSK)
1664
1665/* # tx attempts for first frame in aggregation */
1666#define AGG_TX_STATE_TRY_CNT_POS 12
1667#define AGG_TX_STATE_TRY_CNT_MSK 0xf000
1668
1669/* Command ID and sequence number of Tx command for this frame */
1670#define AGG_TX_STATE_SEQ_NUM_POS 16
1671#define AGG_TX_STATE_SEQ_NUM_MSK 0xffff0000
1672
1673/*
1674 * REPLY_TX = 0x1c (response)
1675 *
1676 * This response may be in one of two slightly different formats, indicated
1677 * by the frame_count field:
1678 *
1679 * 1) No aggregation (frame_count == 1). This reports Tx results for
1680 * a single frame. Multiple attempts, at various bit rates, may have
1681 * been made for this frame.
1682 *
1683 * 2) Aggregation (frame_count > 1). This reports Tx results for
1684 * 2 or more frames that used block-acknowledge. All frames were
1685 * transmitted at same rate. Rate scaling may have been used if first
1686 * frame in this new agg block failed in previous agg block(s).
1687 *
1688 * Note that, for aggregation, ACK (block-ack) status is not delivered here;
1689 * block-ack has not been received by the time the 4965 device records
1690 * this status.
1691 * This status relates to reasons the tx might have been blocked or aborted
1692 * within the sending station (this 4965 device), rather than whether it was
1693 * received successfully by the destination station.
1694 */
1695struct agg_tx_status {
1696 __le16 status;
1697 __le16 sequence;
1698} __packed;
1699
1700struct iwl4965_tx_resp {
1701 u8 frame_count; /* 1 no aggregation, >1 aggregation */
1702 u8 bt_kill_count; /* # blocked by bluetooth (unused for agg) */
1703 u8 failure_rts; /* # failures due to unsuccessful RTS */
1704 u8 failure_frame; /* # failures due to no ACK (unused for agg) */
1705
1706 /* For non-agg: Rate at which frame was successful.
1707 * For agg: Rate at which all frames were transmitted. */
1708 __le32 rate_n_flags; /* RATE_MCS_* */
1709
1710 /* For non-agg: RTS + CTS + frame tx attempts time + ACK.
1711 * For agg: RTS + CTS + aggregation tx time + block-ack time. */
1712 __le16 wireless_media_time; /* uSecs */
1713
1714 __le16 reserved;
1715 __le32 pa_power1; /* RF power amplifier measurement (not used) */
1716 __le32 pa_power2;
1717
1718 /*
1719 * For non-agg: frame status TX_STATUS_*
1720 * For agg: status of 1st frame, AGG_TX_STATE_*; other frame status
1721 * fields follow this one, up to frame_count.
1722 * Bit fields:
1723 * 11- 0: AGG_TX_STATE_* status code
1724 * 15-12: Retry count for 1st frame in aggregation (retries
1725 * occur if tx failed for this frame when it was a
1726 * member of a previous aggregation block). If rate
1727 * scaling is used, retry count indicates the rate
1728 * table entry used for all frames in the new agg.
1729 * 31-16: Sequence # for this frame's Tx cmd (not SSN!)
1730 */
1731 union {
1732 __le32 status;
1733 struct agg_tx_status agg_status[0]; /* for each agg frame */
1734 } u;
1735} __packed;
1736
1737/*
1738 * REPLY_COMPRESSED_BA = 0xc5 (response only, not a command)
1739 *
1740 * Reports Block-Acknowledge from recipient station
1741 */
1742struct iwl_compressed_ba_resp {
1743 __le32 sta_addr_lo32;
1744 __le16 sta_addr_hi16;
1745 __le16 reserved;
1746
1747 /* Index of recipient (BA-sending) station in uCode's station table */
1748 u8 sta_id;
1749 u8 tid;
1750 __le16 seq_ctl;
1751 __le64 bitmap;
1752 __le16 scd_flow;
1753 __le16 scd_ssn;
1754} __packed;
1755
1756/*
1757 * REPLY_TX_PWR_TABLE_CMD = 0x97 (command, has simple generic response)
1758 *
1759 * See details under "TXPOWER" in iwl-4965-hw.h.
1760 */
1761
1762struct iwl3945_txpowertable_cmd {
1763 u8 band; /* 0: 5 GHz, 1: 2.4 GHz */
1764 u8 reserved;
1765 __le16 channel;
1766 struct iwl3945_power_per_rate power[IWL_MAX_RATES];
1767} __packed;
1768
1769struct iwl4965_txpowertable_cmd {
1770 u8 band; /* 0: 5 GHz, 1: 2.4 GHz */
1771 u8 reserved;
1772 __le16 channel;
1773 struct iwl4965_tx_power_db tx_power;
1774} __packed;
1775
1776
1777/**
1778 * struct iwl3945_rate_scaling_cmd - Rate Scaling Command & Response
1779 *
1780 * REPLY_RATE_SCALE = 0x47 (command, has simple generic response)
1781 *
1782 * NOTE: The table of rates passed to the uCode via the
1783 * RATE_SCALE command sets up the corresponding order of
1784 * rates used for all related commands, including rate
1785 * masks, etc.
1786 *
1787 * For example, if you set 9MB (PLCP 0x0f) as the first
1788 * rate in the rate table, the bit mask for that rate
1789 * when passed through ofdm_basic_rates on the REPLY_RXON
1790 * command would be bit 0 (1 << 0)
1791 */
1792struct iwl3945_rate_scaling_info {
1793 __le16 rate_n_flags;
1794 u8 try_cnt;
1795 u8 next_rate_index;
1796} __packed;
1797
1798struct iwl3945_rate_scaling_cmd {
1799 u8 table_id;
1800 u8 reserved[3];
1801 struct iwl3945_rate_scaling_info table[IWL_MAX_RATES];
1802} __packed;
1803
1804
1805/*RS_NEW_API: only TLC_RTS remains and moved to bit 0 */
1806#define LINK_QUAL_FLAGS_SET_STA_TLC_RTS_MSK (1 << 0)
1807
1808/* # of EDCA prioritized tx fifos */
1809#define LINK_QUAL_AC_NUM AC_NUM
1810
1811/* # entries in rate scale table to support Tx retries */
1812#define LINK_QUAL_MAX_RETRY_NUM 16
1813
1814/* Tx antenna selection values */
1815#define LINK_QUAL_ANT_A_MSK (1 << 0)
1816#define LINK_QUAL_ANT_B_MSK (1 << 1)
1817#define LINK_QUAL_ANT_MSK (LINK_QUAL_ANT_A_MSK|LINK_QUAL_ANT_B_MSK)
1818
1819
1820/**
1821 * struct iwl_link_qual_general_params
1822 *
1823 * Used in REPLY_TX_LINK_QUALITY_CMD
1824 */
1825struct iwl_link_qual_general_params {
1826 u8 flags;
1827
1828 /* No entries at or above this (driver chosen) index contain MIMO */
1829 u8 mimo_delimiter;
1830
1831 /* Best single antenna to use for single stream (legacy, SISO). */
1832 u8 single_stream_ant_msk; /* LINK_QUAL_ANT_* */
1833
1834 /* Best antennas to use for MIMO (unused for 4965, assumes both). */
1835 u8 dual_stream_ant_msk; /* LINK_QUAL_ANT_* */
1836
1837 /*
1838 * If driver needs to use different initial rates for different
1839 * EDCA QOS access categories (as implemented by tx fifos 0-3),
1840 * this table will set that up, by indicating the indexes in the
1841 * rs_table[LINK_QUAL_MAX_RETRY_NUM] rate table at which to start.
1842 * Otherwise, driver should set all entries to 0.
1843 *
1844 * Entry usage:
1845 * 0 = Background, 1 = Best Effort (normal), 2 = Video, 3 = Voice
1846 * TX FIFOs above 3 use same value (typically 0) as TX FIFO 3.
1847 */
1848 u8 start_rate_index[LINK_QUAL_AC_NUM];
1849} __packed;
1850
1851#define LINK_QUAL_AGG_TIME_LIMIT_DEF (4000) /* 4 milliseconds */
1852#define LINK_QUAL_AGG_TIME_LIMIT_MAX (8000)
1853#define LINK_QUAL_AGG_TIME_LIMIT_MIN (100)
1854
1855#define LINK_QUAL_AGG_DISABLE_START_DEF (3)
1856#define LINK_QUAL_AGG_DISABLE_START_MAX (255)
1857#define LINK_QUAL_AGG_DISABLE_START_MIN (0)
1858
1859#define LINK_QUAL_AGG_FRAME_LIMIT_DEF (31)
1860#define LINK_QUAL_AGG_FRAME_LIMIT_MAX (63)
1861#define LINK_QUAL_AGG_FRAME_LIMIT_MIN (0)
1862
1863/**
1864 * struct iwl_link_qual_agg_params
1865 *
1866 * Used in REPLY_TX_LINK_QUALITY_CMD
1867 */
1868struct iwl_link_qual_agg_params {
1869
1870 /*
1871 *Maximum number of uSec in aggregation.
1872 * default set to 4000 (4 milliseconds) if not configured in .cfg
1873 */
1874 __le16 agg_time_limit;
1875
1876 /*
1877 * Number of Tx retries allowed for a frame, before that frame will
1878 * no longer be considered for the start of an aggregation sequence
1879 * (scheduler will then try to tx it as single frame).
1880 * Driver should set this to 3.
1881 */
1882 u8 agg_dis_start_th;
1883
1884 /*
1885 * Maximum number of frames in aggregation.
1886 * 0 = no limit (default). 1 = no aggregation.
1887 * Other values = max # frames in aggregation.
1888 */
1889 u8 agg_frame_cnt_limit;
1890
1891 __le32 reserved;
1892} __packed;
1893
1894/*
1895 * REPLY_TX_LINK_QUALITY_CMD = 0x4e (command, has simple generic response)
1896 *
1897 * For 4965 devices only; 3945 uses REPLY_RATE_SCALE.
1898 *
1899 * Each station in the 4965 device's internal station table has its own table
1900 * of 16
1901 * Tx rates and modulation modes (e.g. legacy/SISO/MIMO) for retrying Tx when
1902 * an ACK is not received. This command replaces the entire table for
1903 * one station.
1904 *
1905 * NOTE: Station must already be in 4965 device's station table.
1906 * Use REPLY_ADD_STA.
1907 *
1908 * The rate scaling procedures described below work well. Of course, other
1909 * procedures are possible, and may work better for particular environments.
1910 *
1911 *
1912 * FILLING THE RATE TABLE
1913 *
1914 * Given a particular initial rate and mode, as determined by the rate
1915 * scaling algorithm described below, the Linux driver uses the following
1916 * formula to fill the rs_table[LINK_QUAL_MAX_RETRY_NUM] rate table in the
1917 * Link Quality command:
1918 *
1919 *
1920 * 1) If using High-throughput (HT) (SISO or MIMO) initial rate:
1921 * a) Use this same initial rate for first 3 entries.
1922 * b) Find next lower available rate using same mode (SISO or MIMO),
1923 * use for next 3 entries. If no lower rate available, switch to
1924 * legacy mode (no HT40 channel, no MIMO, no short guard interval).
1925 * c) If using MIMO, set command's mimo_delimiter to number of entries
1926 * using MIMO (3 or 6).
1927 * d) After trying 2 HT rates, switch to legacy mode (no HT40 channel,
1928 * no MIMO, no short guard interval), at the next lower bit rate
1929 * (e.g. if second HT bit rate was 54, try 48 legacy), and follow
1930 * legacy procedure for remaining table entries.
1931 *
1932 * 2) If using legacy initial rate:
1933 * a) Use the initial rate for only one entry.
1934 * b) For each following entry, reduce the rate to next lower available
1935 * rate, until reaching the lowest available rate.
1936 * c) When reducing rate, also switch antenna selection.
1937 * d) Once lowest available rate is reached, repeat this rate until
1938 * rate table is filled (16 entries), switching antenna each entry.
1939 *
1940 *
1941 * ACCUMULATING HISTORY
1942 *
1943 * The rate scaling algorithm for 4965 devices, as implemented in Linux driver,
1944 * uses two sets of frame Tx success history: One for the current/active
1945 * modulation mode, and one for a speculative/search mode that is being
1946 * attempted. If the speculative mode turns out to be more effective (i.e.
1947 * actual transfer rate is better), then the driver continues to use the
1948 * speculative mode as the new current active mode.
1949 *
1950 * Each history set contains, separately for each possible rate, data for a
1951 * sliding window of the 62 most recent tx attempts at that rate. The data
1952 * includes a shifting bitmap of success(1)/failure(0), and sums of successful
1953 * and attempted frames, from which the driver can additionally calculate a
1954 * success ratio (success / attempted) and number of failures
1955 * (attempted - success), and control the size of the window (attempted).
1956 * The driver uses the bit map to remove successes from the success sum, as
1957 * the oldest tx attempts fall out of the window.
1958 *
1959 * When the 4965 device makes multiple tx attempts for a given frame, each
1960 * attempt might be at a different rate, and have different modulation
1961 * characteristics (e.g. antenna, fat channel, short guard interval), as set
1962 * up in the rate scaling table in the Link Quality command. The driver must
1963 * determine which rate table entry was used for each tx attempt, to determine
1964 * which rate-specific history to update, and record only those attempts that
1965 * match the modulation characteristics of the history set.
1966 *
1967 * When using block-ack (aggregation), all frames are transmitted at the same
1968 * rate, since there is no per-attempt acknowledgment from the destination
1969 * station. The Tx response struct iwl_tx_resp indicates the Tx rate in
1970 * rate_n_flags field. After receiving a block-ack, the driver can update
1971 * history for the entire block all at once.
1972 *
1973 *
1974 * FINDING BEST STARTING RATE:
1975 *
1976 * When working with a selected initial modulation mode (see below), the
1977 * driver attempts to find a best initial rate. The initial rate is the
1978 * first entry in the Link Quality command's rate table.
1979 *
1980 * 1) Calculate actual throughput (success ratio * expected throughput, see
1981 * table below) for current initial rate. Do this only if enough frames
1982 * have been attempted to make the value meaningful: at least 6 failed
1983 * tx attempts, or at least 8 successes. If not enough, don't try rate
1984 * scaling yet.
1985 *
1986 * 2) Find available rates adjacent to current initial rate. Available means:
1987 * a) supported by hardware &&
1988 * b) supported by association &&
1989 * c) within any constraints selected by user
1990 *
1991 * 3) Gather measured throughputs for adjacent rates. These might not have
1992 * enough history to calculate a throughput. That's okay, we might try
1993 * using one of them anyway!
1994 *
1995 * 4) Try decreasing rate if, for current rate:
1996 * a) success ratio is < 15% ||
1997 * b) lower adjacent rate has better measured throughput ||
1998 * c) higher adjacent rate has worse throughput, and lower is unmeasured
1999 *
2000 * As a sanity check, if decrease was determined above, leave rate
2001 * unchanged if:
2002 * a) lower rate unavailable
2003 * b) success ratio at current rate > 85% (very good)
2004 * c) current measured throughput is better than expected throughput
2005 * of lower rate (under perfect 100% tx conditions, see table below)
2006 *
2007 * 5) Try increasing rate if, for current rate:
2008 * a) success ratio is < 15% ||
2009 * b) both adjacent rates' throughputs are unmeasured (try it!) ||
2010 * b) higher adjacent rate has better measured throughput ||
2011 * c) lower adjacent rate has worse throughput, and higher is unmeasured
2012 *
2013 * As a sanity check, if increase was determined above, leave rate
2014 * unchanged if:
2015 * a) success ratio at current rate < 70%. This is not particularly
2016 * good performance; higher rate is sure to have poorer success.
2017 *
2018 * 6) Re-evaluate the rate after each tx frame. If working with block-
2019 * acknowledge, history and statistics may be calculated for the entire
2020 * block (including prior history that fits within the history windows),
2021 * before re-evaluation.
2022 *
2023 * FINDING BEST STARTING MODULATION MODE:
2024 *
2025 * After working with a modulation mode for a "while" (and doing rate scaling),
2026 * the driver searches for a new initial mode in an attempt to improve
2027 * throughput. The "while" is measured by numbers of attempted frames:
2028 *
2029 * For legacy mode, search for new mode after:
2030 * 480 successful frames, or 160 failed frames
2031 * For high-throughput modes (SISO or MIMO), search for new mode after:
2032 * 4500 successful frames, or 400 failed frames
2033 *
2034 * Mode switch possibilities are (3 for each mode):
2035 *
2036 * For legacy:
2037 * Change antenna, try SISO (if HT association), try MIMO (if HT association)
2038 * For SISO:
2039 * Change antenna, try MIMO, try shortened guard interval (SGI)
2040 * For MIMO:
2041 * Try SISO antenna A, SISO antenna B, try shortened guard interval (SGI)
2042 *
2043 * When trying a new mode, use the same bit rate as the old/current mode when
2044 * trying antenna switches and shortened guard interval. When switching to
2045 * SISO from MIMO or legacy, or to MIMO from SISO or legacy, use a rate
2046 * for which the expected throughput (under perfect conditions) is about the
2047 * same or slightly better than the actual measured throughput delivered by
2048 * the old/current mode.
2049 *
2050 * Actual throughput can be estimated by multiplying the expected throughput
2051 * by the success ratio (successful / attempted tx frames). Frame size is
2052 * not considered in this calculation; it assumes that frame size will average
2053 * out to be fairly consistent over several samples. The following are
2054 * metric values for expected throughput assuming 100% success ratio.
2055 * Only G band has support for CCK rates:
2056 *
2057 * RATE: 1 2 5 11 6 9 12 18 24 36 48 54 60
2058 *
2059 * G: 7 13 35 58 40 57 72 98 121 154 177 186 186
2060 * A: 0 0 0 0 40 57 72 98 121 154 177 186 186
2061 * SISO 20MHz: 0 0 0 0 42 42 76 102 124 159 183 193 202
2062 * SGI SISO 20MHz: 0 0 0 0 46 46 82 110 132 168 192 202 211
2063 * MIMO 20MHz: 0 0 0 0 74 74 123 155 179 214 236 244 251
2064 * SGI MIMO 20MHz: 0 0 0 0 81 81 131 164 188 222 243 251 257
2065 * SISO 40MHz: 0 0 0 0 77 77 127 160 184 220 242 250 257
2066 * SGI SISO 40MHz: 0 0 0 0 83 83 135 169 193 229 250 257 264
2067 * MIMO 40MHz: 0 0 0 0 123 123 182 214 235 264 279 285 289
2068 * SGI MIMO 40MHz: 0 0 0 0 131 131 191 222 242 270 284 289 293
2069 *
2070 * After the new mode has been tried for a short while (minimum of 6 failed
2071 * frames or 8 successful frames), compare success ratio and actual throughput
2072 * estimate of the new mode with the old. If either is better with the new
2073 * mode, continue to use the new mode.
2074 *
2075 * Continue comparing modes until all 3 possibilities have been tried.
2076 * If moving from legacy to HT, try all 3 possibilities from the new HT
2077 * mode. After trying all 3, a best mode is found. Continue to use this mode
2078 * for the longer "while" described above (e.g. 480 successful frames for
2079 * legacy), and then repeat the search process.
2080 *
2081 */
2082struct iwl_link_quality_cmd {
2083
2084 /* Index of destination/recipient station in uCode's station table */
2085 u8 sta_id;
2086 u8 reserved1;
2087 __le16 control; /* not used */
2088 struct iwl_link_qual_general_params general_params;
2089 struct iwl_link_qual_agg_params agg_params;
2090
2091 /*
2092 * Rate info; when using rate-scaling, Tx command's initial_rate_index
2093 * specifies 1st Tx rate attempted, via index into this table.
2094 * 4965 devices works its way through table when retrying Tx.
2095 */
2096 struct {
2097 __le32 rate_n_flags; /* RATE_MCS_*, IWL_RATE_* */
2098 } rs_table[LINK_QUAL_MAX_RETRY_NUM];
2099 __le32 reserved2;
2100} __packed;
2101
2102/*
2103 * BT configuration enable flags:
2104 * bit 0 - 1: BT channel announcement enabled
2105 * 0: disable
2106 * bit 1 - 1: priority of BT device enabled
2107 * 0: disable
2108 */
2109#define BT_COEX_DISABLE (0x0)
2110#define BT_ENABLE_CHANNEL_ANNOUNCE BIT(0)
2111#define BT_ENABLE_PRIORITY BIT(1)
2112
2113#define BT_COEX_ENABLE (BT_ENABLE_CHANNEL_ANNOUNCE | BT_ENABLE_PRIORITY)
2114
2115#define BT_LEAD_TIME_DEF (0x1E)
2116
2117#define BT_MAX_KILL_DEF (0x5)
2118
2119/*
2120 * REPLY_BT_CONFIG = 0x9b (command, has simple generic response)
2121 *
2122 * 3945 and 4965 devices support hardware handshake with Bluetooth device on
2123 * same platform. Bluetooth device alerts wireless device when it will Tx;
2124 * wireless device can delay or kill its own Tx to accommodate.
2125 */
2126struct iwl_bt_cmd {
2127 u8 flags;
2128 u8 lead_time;
2129 u8 max_kill;
2130 u8 reserved;
2131 __le32 kill_ack_mask;
2132 __le32 kill_cts_mask;
2133} __packed;
2134
2135
2136/******************************************************************************
2137 * (6)
2138 * Spectrum Management (802.11h) Commands, Responses, Notifications:
2139 *
2140 *****************************************************************************/
2141
2142/*
2143 * Spectrum Management
2144 */
2145#define MEASUREMENT_FILTER_FLAG (RXON_FILTER_PROMISC_MSK | \
2146 RXON_FILTER_CTL2HOST_MSK | \
2147 RXON_FILTER_ACCEPT_GRP_MSK | \
2148 RXON_FILTER_DIS_DECRYPT_MSK | \
2149 RXON_FILTER_DIS_GRP_DECRYPT_MSK | \
2150 RXON_FILTER_ASSOC_MSK | \
2151 RXON_FILTER_BCON_AWARE_MSK)
2152
2153struct iwl_measure_channel {
2154 __le32 duration; /* measurement duration in extended beacon
2155 * format */
2156 u8 channel; /* channel to measure */
2157 u8 type; /* see enum iwl_measure_type */
2158 __le16 reserved;
2159} __packed;
2160
2161/*
2162 * REPLY_SPECTRUM_MEASUREMENT_CMD = 0x74 (command)
2163 */
2164struct iwl_spectrum_cmd {
2165 __le16 len; /* number of bytes starting from token */
2166 u8 token; /* token id */
2167 u8 id; /* measurement id -- 0 or 1 */
2168 u8 origin; /* 0 = TGh, 1 = other, 2 = TGk */
2169 u8 periodic; /* 1 = periodic */
2170 __le16 path_loss_timeout;
2171 __le32 start_time; /* start time in extended beacon format */
2172 __le32 reserved2;
2173 __le32 flags; /* rxon flags */
2174 __le32 filter_flags; /* rxon filter flags */
2175 __le16 channel_count; /* minimum 1, maximum 10 */
2176 __le16 reserved3;
2177 struct iwl_measure_channel channels[10];
2178} __packed;
2179
2180/*
2181 * REPLY_SPECTRUM_MEASUREMENT_CMD = 0x74 (response)
2182 */
2183struct iwl_spectrum_resp {
2184 u8 token;
2185 u8 id; /* id of the prior command replaced, or 0xff */
2186 __le16 status; /* 0 - command will be handled
2187 * 1 - cannot handle (conflicts with another
2188 * measurement) */
2189} __packed;
2190
2191enum iwl_measurement_state {
2192 IWL_MEASUREMENT_START = 0,
2193 IWL_MEASUREMENT_STOP = 1,
2194};
2195
2196enum iwl_measurement_status {
2197 IWL_MEASUREMENT_OK = 0,
2198 IWL_MEASUREMENT_CONCURRENT = 1,
2199 IWL_MEASUREMENT_CSA_CONFLICT = 2,
2200 IWL_MEASUREMENT_TGH_CONFLICT = 3,
2201 /* 4-5 reserved */
2202 IWL_MEASUREMENT_STOPPED = 6,
2203 IWL_MEASUREMENT_TIMEOUT = 7,
2204 IWL_MEASUREMENT_PERIODIC_FAILED = 8,
2205};
2206
2207#define NUM_ELEMENTS_IN_HISTOGRAM 8
2208
2209struct iwl_measurement_histogram {
2210 __le32 ofdm[NUM_ELEMENTS_IN_HISTOGRAM]; /* in 0.8usec counts */
2211 __le32 cck[NUM_ELEMENTS_IN_HISTOGRAM]; /* in 1usec counts */
2212} __packed;
2213
2214/* clear channel availability counters */
2215struct iwl_measurement_cca_counters {
2216 __le32 ofdm;
2217 __le32 cck;
2218} __packed;
2219
2220enum iwl_measure_type {
2221 IWL_MEASURE_BASIC = (1 << 0),
2222 IWL_MEASURE_CHANNEL_LOAD = (1 << 1),
2223 IWL_MEASURE_HISTOGRAM_RPI = (1 << 2),
2224 IWL_MEASURE_HISTOGRAM_NOISE = (1 << 3),
2225 IWL_MEASURE_FRAME = (1 << 4),
2226 /* bits 5:6 are reserved */
2227 IWL_MEASURE_IDLE = (1 << 7),
2228};
2229
2230/*
2231 * SPECTRUM_MEASURE_NOTIFICATION = 0x75 (notification only, not a command)
2232 */
2233struct iwl_spectrum_notification {
2234 u8 id; /* measurement id -- 0 or 1 */
2235 u8 token;
2236 u8 channel_index; /* index in measurement channel list */
2237 u8 state; /* 0 - start, 1 - stop */
2238 __le32 start_time; /* lower 32-bits of TSF */
2239 u8 band; /* 0 - 5.2GHz, 1 - 2.4GHz */
2240 u8 channel;
2241 u8 type; /* see enum iwl_measurement_type */
2242 u8 reserved1;
2243 /* NOTE: cca_ofdm, cca_cck, basic_type, and histogram are only only
2244 * valid if applicable for measurement type requested. */
2245 __le32 cca_ofdm; /* cca fraction time in 40Mhz clock periods */
2246 __le32 cca_cck; /* cca fraction time in 44Mhz clock periods */
2247 __le32 cca_time; /* channel load time in usecs */
2248 u8 basic_type; /* 0 - bss, 1 - ofdm preamble, 2 -
2249 * unidentified */
2250 u8 reserved2[3];
2251 struct iwl_measurement_histogram histogram;
2252 __le32 stop_time; /* lower 32-bits of TSF */
2253 __le32 status; /* see iwl_measurement_status */
2254} __packed;
2255
2256/******************************************************************************
2257 * (7)
2258 * Power Management Commands, Responses, Notifications:
2259 *
2260 *****************************************************************************/
2261
2262/**
2263 * struct iwl_powertable_cmd - Power Table Command
2264 * @flags: See below:
2265 *
2266 * POWER_TABLE_CMD = 0x77 (command, has simple generic response)
2267 *
2268 * PM allow:
2269 * bit 0 - '0' Driver not allow power management
2270 * '1' Driver allow PM (use rest of parameters)
2271 *
2272 * uCode send sleep notifications:
2273 * bit 1 - '0' Don't send sleep notification
2274 * '1' send sleep notification (SEND_PM_NOTIFICATION)
2275 *
2276 * Sleep over DTIM
2277 * bit 2 - '0' PM have to walk up every DTIM
2278 * '1' PM could sleep over DTIM till listen Interval.
2279 *
2280 * PCI power managed
2281 * bit 3 - '0' (PCI_CFG_LINK_CTRL & 0x1)
2282 * '1' !(PCI_CFG_LINK_CTRL & 0x1)
2283 *
2284 * Fast PD
2285 * bit 4 - '1' Put radio to sleep when receiving frame for others
2286 *
2287 * Force sleep Modes
2288 * bit 31/30- '00' use both mac/xtal sleeps
2289 * '01' force Mac sleep
2290 * '10' force xtal sleep
2291 * '11' Illegal set
2292 *
2293 * NOTE: if sleep_interval[SLEEP_INTRVL_TABLE_SIZE-1] > DTIM period then
2294 * ucode assume sleep over DTIM is allowed and we don't need to wake up
2295 * for every DTIM.
2296 */
2297#define IWL_POWER_VEC_SIZE 5
2298
2299#define IWL_POWER_DRIVER_ALLOW_SLEEP_MSK cpu_to_le16(BIT(0))
2300#define IWL_POWER_POWER_SAVE_ENA_MSK cpu_to_le16(BIT(0))
2301#define IWL_POWER_POWER_MANAGEMENT_ENA_MSK cpu_to_le16(BIT(1))
2302#define IWL_POWER_SLEEP_OVER_DTIM_MSK cpu_to_le16(BIT(2))
2303#define IWL_POWER_PCI_PM_MSK cpu_to_le16(BIT(3))
2304#define IWL_POWER_FAST_PD cpu_to_le16(BIT(4))
2305#define IWL_POWER_BEACON_FILTERING cpu_to_le16(BIT(5))
2306#define IWL_POWER_SHADOW_REG_ENA cpu_to_le16(BIT(6))
2307#define IWL_POWER_CT_KILL_SET cpu_to_le16(BIT(7))
2308
2309struct iwl3945_powertable_cmd {
2310 __le16 flags;
2311 u8 reserved[2];
2312 __le32 rx_data_timeout;
2313 __le32 tx_data_timeout;
2314 __le32 sleep_interval[IWL_POWER_VEC_SIZE];
2315} __packed;
2316
2317struct iwl_powertable_cmd {
2318 __le16 flags;
2319 u8 keep_alive_seconds; /* 3945 reserved */
2320 u8 debug_flags; /* 3945 reserved */
2321 __le32 rx_data_timeout;
2322 __le32 tx_data_timeout;
2323 __le32 sleep_interval[IWL_POWER_VEC_SIZE];
2324 __le32 keep_alive_beacons;
2325} __packed;
2326
2327/*
2328 * PM_SLEEP_NOTIFICATION = 0x7A (notification only, not a command)
2329 * all devices identical.
2330 */
2331struct iwl_sleep_notification {
2332 u8 pm_sleep_mode;
2333 u8 pm_wakeup_src;
2334 __le16 reserved;
2335 __le32 sleep_time;
2336 __le32 tsf_low;
2337 __le32 bcon_timer;
2338} __packed;
2339
2340/* Sleep states. all devices identical. */
2341enum {
2342 IWL_PM_NO_SLEEP = 0,
2343 IWL_PM_SLP_MAC = 1,
2344 IWL_PM_SLP_FULL_MAC_UNASSOCIATE = 2,
2345 IWL_PM_SLP_FULL_MAC_CARD_STATE = 3,
2346 IWL_PM_SLP_PHY = 4,
2347 IWL_PM_SLP_REPENT = 5,
2348 IWL_PM_WAKEUP_BY_TIMER = 6,
2349 IWL_PM_WAKEUP_BY_DRIVER = 7,
2350 IWL_PM_WAKEUP_BY_RFKILL = 8,
2351 /* 3 reserved */
2352 IWL_PM_NUM_OF_MODES = 12,
2353};
2354
2355/*
2356 * CARD_STATE_NOTIFICATION = 0xa1 (notification only, not a command)
2357 */
2358struct iwl_card_state_notif {
2359 __le32 flags;
2360} __packed;
2361
2362#define HW_CARD_DISABLED 0x01
2363#define SW_CARD_DISABLED 0x02
2364#define CT_CARD_DISABLED 0x04
2365#define RXON_CARD_DISABLED 0x10
2366
2367struct iwl_ct_kill_config {
2368 __le32 reserved;
2369 __le32 critical_temperature_M;
2370 __le32 critical_temperature_R;
2371} __packed;
2372
2373/******************************************************************************
2374 * (8)
2375 * Scan Commands, Responses, Notifications:
2376 *
2377 *****************************************************************************/
2378
2379#define SCAN_CHANNEL_TYPE_PASSIVE cpu_to_le32(0)
2380#define SCAN_CHANNEL_TYPE_ACTIVE cpu_to_le32(1)
2381
2382/**
2383 * struct iwl_scan_channel - entry in REPLY_SCAN_CMD channel table
2384 *
2385 * One for each channel in the scan list.
2386 * Each channel can independently select:
2387 * 1) SSID for directed active scans
2388 * 2) Txpower setting (for rate specified within Tx command)
2389 * 3) How long to stay on-channel (behavior may be modified by quiet_time,
2390 * quiet_plcp_th, good_CRC_th)
2391 *
2392 * To avoid uCode errors, make sure the following are true (see comments
2393 * under struct iwl_scan_cmd about max_out_time and quiet_time):
2394 * 1) If using passive_dwell (i.e. passive_dwell != 0):
2395 * active_dwell <= passive_dwell (< max_out_time if max_out_time != 0)
2396 * 2) quiet_time <= active_dwell
2397 * 3) If restricting off-channel time (i.e. max_out_time !=0):
2398 * passive_dwell < max_out_time
2399 * active_dwell < max_out_time
2400 */
2401struct iwl3945_scan_channel {
2402 /*
2403 * type is defined as:
2404 * 0:0 1 = active, 0 = passive
2405 * 1:4 SSID direct bit map; if a bit is set, then corresponding
2406 * SSID IE is transmitted in probe request.
2407 * 5:7 reserved
2408 */
2409 u8 type;
2410 u8 channel; /* band is selected by iwl3945_scan_cmd "flags" field */
2411 struct iwl3945_tx_power tpc;
2412 __le16 active_dwell; /* in 1024-uSec TU (time units), typ 5-50 */
2413 __le16 passive_dwell; /* in 1024-uSec TU (time units), typ 20-500 */
2414} __packed;
2415
2416/* set number of direct probes u8 type */
2417#define IWL39_SCAN_PROBE_MASK(n) ((BIT(n) | (BIT(n) - BIT(1))))
2418
2419struct iwl_scan_channel {
2420 /*
2421 * type is defined as:
2422 * 0:0 1 = active, 0 = passive
2423 * 1:20 SSID direct bit map; if a bit is set, then corresponding
2424 * SSID IE is transmitted in probe request.
2425 * 21:31 reserved
2426 */
2427 __le32 type;
2428 __le16 channel; /* band is selected by iwl_scan_cmd "flags" field */
2429 u8 tx_gain; /* gain for analog radio */
2430 u8 dsp_atten; /* gain for DSP */
2431 __le16 active_dwell; /* in 1024-uSec TU (time units), typ 5-50 */
2432 __le16 passive_dwell; /* in 1024-uSec TU (time units), typ 20-500 */
2433} __packed;
2434
2435/* set number of direct probes __le32 type */
2436#define IWL_SCAN_PROBE_MASK(n) cpu_to_le32((BIT(n) | (BIT(n) - BIT(1))))
2437
2438/**
2439 * struct iwl_ssid_ie - directed scan network information element
2440 *
2441 * Up to 20 of these may appear in REPLY_SCAN_CMD (Note: Only 4 are in
2442 * 3945 SCAN api), selected by "type" bit field in struct iwl_scan_channel;
2443 * each channel may select different ssids from among the 20 (4) entries.
2444 * SSID IEs get transmitted in reverse order of entry.
2445 */
2446struct iwl_ssid_ie {
2447 u8 id;
2448 u8 len;
2449 u8 ssid[32];
2450} __packed;
2451
2452#define PROBE_OPTION_MAX_3945 4
2453#define PROBE_OPTION_MAX 20
2454#define TX_CMD_LIFE_TIME_INFINITE cpu_to_le32(0xFFFFFFFF)
2455#define IWL_GOOD_CRC_TH_DISABLED 0
2456#define IWL_GOOD_CRC_TH_DEFAULT cpu_to_le16(1)
2457#define IWL_GOOD_CRC_TH_NEVER cpu_to_le16(0xffff)
2458#define IWL_MAX_SCAN_SIZE 1024
2459#define IWL_MAX_CMD_SIZE 4096
2460
2461/*
2462 * REPLY_SCAN_CMD = 0x80 (command)
2463 *
2464 * The hardware scan command is very powerful; the driver can set it up to
2465 * maintain (relatively) normal network traffic while doing a scan in the
2466 * background. The max_out_time and suspend_time control the ratio of how
2467 * long the device stays on an associated network channel ("service channel")
2468 * vs. how long it's away from the service channel, i.e. tuned to other channels
2469 * for scanning.
2470 *
2471 * max_out_time is the max time off-channel (in usec), and suspend_time
2472 * is how long (in "extended beacon" format) that the scan is "suspended"
2473 * after returning to the service channel. That is, suspend_time is the
2474 * time that we stay on the service channel, doing normal work, between
2475 * scan segments. The driver may set these parameters differently to support
2476 * scanning when associated vs. not associated, and light vs. heavy traffic
2477 * loads when associated.
2478 *
2479 * After receiving this command, the device's scan engine does the following;
2480 *
2481 * 1) Sends SCAN_START notification to driver
2482 * 2) Checks to see if it has time to do scan for one channel
2483 * 3) Sends NULL packet, with power-save (PS) bit set to 1,
2484 * to tell AP that we're going off-channel
2485 * 4) Tunes to first channel in scan list, does active or passive scan
2486 * 5) Sends SCAN_RESULT notification to driver
2487 * 6) Checks to see if it has time to do scan on *next* channel in list
2488 * 7) Repeats 4-6 until it no longer has time to scan the next channel
2489 * before max_out_time expires
2490 * 8) Returns to service channel
2491 * 9) Sends NULL packet with PS=0 to tell AP that we're back
2492 * 10) Stays on service channel until suspend_time expires
2493 * 11) Repeats entire process 2-10 until list is complete
2494 * 12) Sends SCAN_COMPLETE notification
2495 *
2496 * For fast, efficient scans, the scan command also has support for staying on
2497 * a channel for just a short time, if doing active scanning and getting no
2498 * responses to the transmitted probe request. This time is controlled by
2499 * quiet_time, and the number of received packets below which a channel is
2500 * considered "quiet" is controlled by quiet_plcp_threshold.
2501 *
2502 * For active scanning on channels that have regulatory restrictions against
2503 * blindly transmitting, the scan can listen before transmitting, to make sure
2504 * that there is already legitimate activity on the channel. If enough
2505 * packets are cleanly received on the channel (controlled by good_CRC_th,
2506 * typical value 1), the scan engine starts transmitting probe requests.
2507 *
2508 * Driver must use separate scan commands for 2.4 vs. 5 GHz bands.
2509 *
2510 * To avoid uCode errors, see timing restrictions described under
2511 * struct iwl_scan_channel.
2512 */
2513
2514struct iwl3945_scan_cmd {
2515 __le16 len;
2516 u8 reserved0;
2517 u8 channel_count; /* # channels in channel list */
2518 __le16 quiet_time; /* dwell only this # millisecs on quiet channel
2519 * (only for active scan) */
2520 __le16 quiet_plcp_th; /* quiet chnl is < this # pkts (typ. 1) */
2521 __le16 good_CRC_th; /* passive -> active promotion threshold */
2522 __le16 reserved1;
2523 __le32 max_out_time; /* max usec to be away from associated (service)
2524 * channel */
2525 __le32 suspend_time; /* pause scan this long (in "extended beacon
2526 * format") when returning to service channel:
2527 * 3945; 31:24 # beacons, 19:0 additional usec,
2528 * 4965; 31:22 # beacons, 21:0 additional usec.
2529 */
2530 __le32 flags; /* RXON_FLG_* */
2531 __le32 filter_flags; /* RXON_FILTER_* */
2532
2533 /* For active scans (set to all-0s for passive scans).
2534 * Does not include payload. Must specify Tx rate; no rate scaling. */
2535 struct iwl3945_tx_cmd tx_cmd;
2536
2537 /* For directed active scans (set to all-0s otherwise) */
2538 struct iwl_ssid_ie direct_scan[PROBE_OPTION_MAX_3945];
2539
2540 /*
2541 * Probe request frame, followed by channel list.
2542 *
2543 * Size of probe request frame is specified by byte count in tx_cmd.
2544 * Channel list follows immediately after probe request frame.
2545 * Number of channels in list is specified by channel_count.
2546 * Each channel in list is of type:
2547 *
2548 * struct iwl3945_scan_channel channels[0];
2549 *
2550 * NOTE: Only one band of channels can be scanned per pass. You
2551 * must not mix 2.4GHz channels and 5.2GHz channels, and you must wait
2552 * for one scan to complete (i.e. receive SCAN_COMPLETE_NOTIFICATION)
2553 * before requesting another scan.
2554 */
2555 u8 data[0];
2556} __packed;
2557
2558struct iwl_scan_cmd {
2559 __le16 len;
2560 u8 reserved0;
2561 u8 channel_count; /* # channels in channel list */
2562 __le16 quiet_time; /* dwell only this # millisecs on quiet channel
2563 * (only for active scan) */
2564 __le16 quiet_plcp_th; /* quiet chnl is < this # pkts (typ. 1) */
2565 __le16 good_CRC_th; /* passive -> active promotion threshold */
2566 __le16 rx_chain; /* RXON_RX_CHAIN_* */
2567 __le32 max_out_time; /* max usec to be away from associated (service)
2568 * channel */
2569 __le32 suspend_time; /* pause scan this long (in "extended beacon
2570 * format") when returning to service chnl:
2571 * 3945; 31:24 # beacons, 19:0 additional usec,
2572 * 4965; 31:22 # beacons, 21:0 additional usec.
2573 */
2574 __le32 flags; /* RXON_FLG_* */
2575 __le32 filter_flags; /* RXON_FILTER_* */
2576
2577 /* For active scans (set to all-0s for passive scans).
2578 * Does not include payload. Must specify Tx rate; no rate scaling. */
2579 struct iwl_tx_cmd tx_cmd;
2580
2581 /* For directed active scans (set to all-0s otherwise) */
2582 struct iwl_ssid_ie direct_scan[PROBE_OPTION_MAX];
2583
2584 /*
2585 * Probe request frame, followed by channel list.
2586 *
2587 * Size of probe request frame is specified by byte count in tx_cmd.
2588 * Channel list follows immediately after probe request frame.
2589 * Number of channels in list is specified by channel_count.
2590 * Each channel in list is of type:
2591 *
2592 * struct iwl_scan_channel channels[0];
2593 *
2594 * NOTE: Only one band of channels can be scanned per pass. You
2595 * must not mix 2.4GHz channels and 5.2GHz channels, and you must wait
2596 * for one scan to complete (i.e. receive SCAN_COMPLETE_NOTIFICATION)
2597 * before requesting another scan.
2598 */
2599 u8 data[0];
2600} __packed;
2601
2602/* Can abort will notify by complete notification with abort status. */
2603#define CAN_ABORT_STATUS cpu_to_le32(0x1)
2604/* complete notification statuses */
2605#define ABORT_STATUS 0x2
2606
2607/*
2608 * REPLY_SCAN_CMD = 0x80 (response)
2609 */
2610struct iwl_scanreq_notification {
2611 __le32 status; /* 1: okay, 2: cannot fulfill request */
2612} __packed;
2613
2614/*
2615 * SCAN_START_NOTIFICATION = 0x82 (notification only, not a command)
2616 */
2617struct iwl_scanstart_notification {
2618 __le32 tsf_low;
2619 __le32 tsf_high;
2620 __le32 beacon_timer;
2621 u8 channel;
2622 u8 band;
2623 u8 reserved[2];
2624 __le32 status;
2625} __packed;
2626
2627#define SCAN_OWNER_STATUS 0x1;
2628#define MEASURE_OWNER_STATUS 0x2;
2629
2630#define IWL_PROBE_STATUS_OK 0
2631#define IWL_PROBE_STATUS_TX_FAILED BIT(0)
2632/* error statuses combined with TX_FAILED */
2633#define IWL_PROBE_STATUS_FAIL_TTL BIT(1)
2634#define IWL_PROBE_STATUS_FAIL_BT BIT(2)
2635
2636#define NUMBER_OF_STATISTICS 1 /* first __le32 is good CRC */
2637/*
2638 * SCAN_RESULTS_NOTIFICATION = 0x83 (notification only, not a command)
2639 */
2640struct iwl_scanresults_notification {
2641 u8 channel;
2642 u8 band;
2643 u8 probe_status;
2644 u8 num_probe_not_sent; /* not enough time to send */
2645 __le32 tsf_low;
2646 __le32 tsf_high;
2647 __le32 statistics[NUMBER_OF_STATISTICS];
2648} __packed;
2649
2650/*
2651 * SCAN_COMPLETE_NOTIFICATION = 0x84 (notification only, not a command)
2652 */
2653struct iwl_scancomplete_notification {
2654 u8 scanned_channels;
2655 u8 status;
2656 u8 last_channel;
2657 __le32 tsf_low;
2658 __le32 tsf_high;
2659} __packed;
2660
2661
2662/******************************************************************************
2663 * (9)
2664 * IBSS/AP Commands and Notifications:
2665 *
2666 *****************************************************************************/
2667
2668enum iwl_ibss_manager {
2669 IWL_NOT_IBSS_MANAGER = 0,
2670 IWL_IBSS_MANAGER = 1,
2671};
2672
2673/*
2674 * BEACON_NOTIFICATION = 0x90 (notification only, not a command)
2675 */
2676
2677struct iwl3945_beacon_notif {
2678 struct iwl3945_tx_resp beacon_notify_hdr;
2679 __le32 low_tsf;
2680 __le32 high_tsf;
2681 __le32 ibss_mgr_status;
2682} __packed;
2683
2684struct iwl4965_beacon_notif {
2685 struct iwl4965_tx_resp beacon_notify_hdr;
2686 __le32 low_tsf;
2687 __le32 high_tsf;
2688 __le32 ibss_mgr_status;
2689} __packed;
2690
2691/*
2692 * REPLY_TX_BEACON = 0x91 (command, has simple generic response)
2693 */
2694
2695struct iwl3945_tx_beacon_cmd {
2696 struct iwl3945_tx_cmd tx;
2697 __le16 tim_idx;
2698 u8 tim_size;
2699 u8 reserved1;
2700 struct ieee80211_hdr frame[0]; /* beacon frame */
2701} __packed;
2702
2703struct iwl_tx_beacon_cmd {
2704 struct iwl_tx_cmd tx;
2705 __le16 tim_idx;
2706 u8 tim_size;
2707 u8 reserved1;
2708 struct ieee80211_hdr frame[0]; /* beacon frame */
2709} __packed;
2710
2711/******************************************************************************
2712 * (10)
2713 * Statistics Commands and Notifications:
2714 *
2715 *****************************************************************************/
2716
2717#define IWL_TEMP_CONVERT 260
2718
2719#define SUP_RATE_11A_MAX_NUM_CHANNELS 8
2720#define SUP_RATE_11B_MAX_NUM_CHANNELS 4
2721#define SUP_RATE_11G_MAX_NUM_CHANNELS 12
2722
2723/* Used for passing to driver number of successes and failures per rate */
2724struct rate_histogram {
2725 union {
2726 __le32 a[SUP_RATE_11A_MAX_NUM_CHANNELS];
2727 __le32 b[SUP_RATE_11B_MAX_NUM_CHANNELS];
2728 __le32 g[SUP_RATE_11G_MAX_NUM_CHANNELS];
2729 } success;
2730 union {
2731 __le32 a[SUP_RATE_11A_MAX_NUM_CHANNELS];
2732 __le32 b[SUP_RATE_11B_MAX_NUM_CHANNELS];
2733 __le32 g[SUP_RATE_11G_MAX_NUM_CHANNELS];
2734 } failed;
2735} __packed;
2736
2737/* statistics command response */
2738
2739struct iwl39_statistics_rx_phy {
2740 __le32 ina_cnt;
2741 __le32 fina_cnt;
2742 __le32 plcp_err;
2743 __le32 crc32_err;
2744 __le32 overrun_err;
2745 __le32 early_overrun_err;
2746 __le32 crc32_good;
2747 __le32 false_alarm_cnt;
2748 __le32 fina_sync_err_cnt;
2749 __le32 sfd_timeout;
2750 __le32 fina_timeout;
2751 __le32 unresponded_rts;
2752 __le32 rxe_frame_limit_overrun;
2753 __le32 sent_ack_cnt;
2754 __le32 sent_cts_cnt;
2755} __packed;
2756
2757struct iwl39_statistics_rx_non_phy {
2758 __le32 bogus_cts; /* CTS received when not expecting CTS */
2759 __le32 bogus_ack; /* ACK received when not expecting ACK */
2760 __le32 non_bssid_frames; /* number of frames with BSSID that
2761 * doesn't belong to the STA BSSID */
2762 __le32 filtered_frames; /* count frames that were dumped in the
2763 * filtering process */
2764 __le32 non_channel_beacons; /* beacons with our bss id but not on
2765 * our serving channel */
2766} __packed;
2767
2768struct iwl39_statistics_rx {
2769 struct iwl39_statistics_rx_phy ofdm;
2770 struct iwl39_statistics_rx_phy cck;
2771 struct iwl39_statistics_rx_non_phy general;
2772} __packed;
2773
2774struct iwl39_statistics_tx {
2775 __le32 preamble_cnt;
2776 __le32 rx_detected_cnt;
2777 __le32 bt_prio_defer_cnt;
2778 __le32 bt_prio_kill_cnt;
2779 __le32 few_bytes_cnt;
2780 __le32 cts_timeout;
2781 __le32 ack_timeout;
2782 __le32 expected_ack_cnt;
2783 __le32 actual_ack_cnt;
2784} __packed;
2785
2786struct statistics_dbg {
2787 __le32 burst_check;
2788 __le32 burst_count;
2789 __le32 wait_for_silence_timeout_cnt;
2790 __le32 reserved[3];
2791} __packed;
2792
2793struct iwl39_statistics_div {
2794 __le32 tx_on_a;
2795 __le32 tx_on_b;
2796 __le32 exec_time;
2797 __le32 probe_time;
2798} __packed;
2799
2800struct iwl39_statistics_general {
2801 __le32 temperature;
2802 struct statistics_dbg dbg;
2803 __le32 sleep_time;
2804 __le32 slots_out;
2805 __le32 slots_idle;
2806 __le32 ttl_timestamp;
2807 struct iwl39_statistics_div div;
2808} __packed;
2809
2810struct statistics_rx_phy {
2811 __le32 ina_cnt;
2812 __le32 fina_cnt;
2813 __le32 plcp_err;
2814 __le32 crc32_err;
2815 __le32 overrun_err;
2816 __le32 early_overrun_err;
2817 __le32 crc32_good;
2818 __le32 false_alarm_cnt;
2819 __le32 fina_sync_err_cnt;
2820 __le32 sfd_timeout;
2821 __le32 fina_timeout;
2822 __le32 unresponded_rts;
2823 __le32 rxe_frame_limit_overrun;
2824 __le32 sent_ack_cnt;
2825 __le32 sent_cts_cnt;
2826 __le32 sent_ba_rsp_cnt;
2827 __le32 dsp_self_kill;
2828 __le32 mh_format_err;
2829 __le32 re_acq_main_rssi_sum;
2830 __le32 reserved3;
2831} __packed;
2832
2833struct statistics_rx_ht_phy {
2834 __le32 plcp_err;
2835 __le32 overrun_err;
2836 __le32 early_overrun_err;
2837 __le32 crc32_good;
2838 __le32 crc32_err;
2839 __le32 mh_format_err;
2840 __le32 agg_crc32_good;
2841 __le32 agg_mpdu_cnt;
2842 __le32 agg_cnt;
2843 __le32 unsupport_mcs;
2844} __packed;
2845
2846#define INTERFERENCE_DATA_AVAILABLE cpu_to_le32(1)
2847
2848struct statistics_rx_non_phy {
2849 __le32 bogus_cts; /* CTS received when not expecting CTS */
2850 __le32 bogus_ack; /* ACK received when not expecting ACK */
2851 __le32 non_bssid_frames; /* number of frames with BSSID that
2852 * doesn't belong to the STA BSSID */
2853 __le32 filtered_frames; /* count frames that were dumped in the
2854 * filtering process */
2855 __le32 non_channel_beacons; /* beacons with our bss id but not on
2856 * our serving channel */
2857 __le32 channel_beacons; /* beacons with our bss id and in our
2858 * serving channel */
2859 __le32 num_missed_bcon; /* number of missed beacons */
2860 __le32 adc_rx_saturation_time; /* count in 0.8us units the time the
2861 * ADC was in saturation */
2862 __le32 ina_detection_search_time;/* total time (in 0.8us) searched
2863 * for INA */
2864 __le32 beacon_silence_rssi_a; /* RSSI silence after beacon frame */
2865 __le32 beacon_silence_rssi_b; /* RSSI silence after beacon frame */
2866 __le32 beacon_silence_rssi_c; /* RSSI silence after beacon frame */
2867 __le32 interference_data_flag; /* flag for interference data
2868 * availability. 1 when data is
2869 * available. */
2870 __le32 channel_load; /* counts RX Enable time in uSec */
2871 __le32 dsp_false_alarms; /* DSP false alarm (both OFDM
2872 * and CCK) counter */
2873 __le32 beacon_rssi_a;
2874 __le32 beacon_rssi_b;
2875 __le32 beacon_rssi_c;
2876 __le32 beacon_energy_a;
2877 __le32 beacon_energy_b;
2878 __le32 beacon_energy_c;
2879} __packed;
2880
2881struct statistics_rx {
2882 struct statistics_rx_phy ofdm;
2883 struct statistics_rx_phy cck;
2884 struct statistics_rx_non_phy general;
2885 struct statistics_rx_ht_phy ofdm_ht;
2886} __packed;
2887
2888/**
2889 * struct statistics_tx_power - current tx power
2890 *
2891 * @ant_a: current tx power on chain a in 1/2 dB step
2892 * @ant_b: current tx power on chain b in 1/2 dB step
2893 * @ant_c: current tx power on chain c in 1/2 dB step
2894 */
2895struct statistics_tx_power {
2896 u8 ant_a;
2897 u8 ant_b;
2898 u8 ant_c;
2899 u8 reserved;
2900} __packed;
2901
2902struct statistics_tx_non_phy_agg {
2903 __le32 ba_timeout;
2904 __le32 ba_reschedule_frames;
2905 __le32 scd_query_agg_frame_cnt;
2906 __le32 scd_query_no_agg;
2907 __le32 scd_query_agg;
2908 __le32 scd_query_mismatch;
2909 __le32 frame_not_ready;
2910 __le32 underrun;
2911 __le32 bt_prio_kill;
2912 __le32 rx_ba_rsp_cnt;
2913} __packed;
2914
2915struct statistics_tx {
2916 __le32 preamble_cnt;
2917 __le32 rx_detected_cnt;
2918 __le32 bt_prio_defer_cnt;
2919 __le32 bt_prio_kill_cnt;
2920 __le32 few_bytes_cnt;
2921 __le32 cts_timeout;
2922 __le32 ack_timeout;
2923 __le32 expected_ack_cnt;
2924 __le32 actual_ack_cnt;
2925 __le32 dump_msdu_cnt;
2926 __le32 burst_abort_next_frame_mismatch_cnt;
2927 __le32 burst_abort_missing_next_frame_cnt;
2928 __le32 cts_timeout_collision;
2929 __le32 ack_or_ba_timeout_collision;
2930 struct statistics_tx_non_phy_agg agg;
2931
2932 __le32 reserved1;
2933} __packed;
2934
2935
2936struct statistics_div {
2937 __le32 tx_on_a;
2938 __le32 tx_on_b;
2939 __le32 exec_time;
2940 __le32 probe_time;
2941 __le32 reserved1;
2942 __le32 reserved2;
2943} __packed;
2944
2945struct statistics_general_common {
2946 __le32 temperature; /* radio temperature */
2947 struct statistics_dbg dbg;
2948 __le32 sleep_time;
2949 __le32 slots_out;
2950 __le32 slots_idle;
2951 __le32 ttl_timestamp;
2952 struct statistics_div div;
2953 __le32 rx_enable_counter;
2954 /*
2955 * num_of_sos_states:
2956 * count the number of times we have to re-tune
2957 * in order to get out of bad PHY status
2958 */
2959 __le32 num_of_sos_states;
2960} __packed;
2961
2962struct statistics_general {
2963 struct statistics_general_common common;
2964 __le32 reserved2;
2965 __le32 reserved3;
2966} __packed;
2967
2968#define UCODE_STATISTICS_CLEAR_MSK (0x1 << 0)
2969#define UCODE_STATISTICS_FREQUENCY_MSK (0x1 << 1)
2970#define UCODE_STATISTICS_NARROW_BAND_MSK (0x1 << 2)
2971
2972/*
2973 * REPLY_STATISTICS_CMD = 0x9c,
2974 * all devices identical.
2975 *
2976 * This command triggers an immediate response containing uCode statistics.
2977 * The response is in the same format as STATISTICS_NOTIFICATION 0x9d, below.
2978 *
2979 * If the CLEAR_STATS configuration flag is set, uCode will clear its
2980 * internal copy of the statistics (counters) after issuing the response.
2981 * This flag does not affect STATISTICS_NOTIFICATIONs after beacons (see below).
2982 *
2983 * If the DISABLE_NOTIF configuration flag is set, uCode will not issue
2984 * STATISTICS_NOTIFICATIONs after received beacons (see below). This flag
2985 * does not affect the response to the REPLY_STATISTICS_CMD 0x9c itself.
2986 */
2987#define IWL_STATS_CONF_CLEAR_STATS cpu_to_le32(0x1) /* see above */
2988#define IWL_STATS_CONF_DISABLE_NOTIF cpu_to_le32(0x2)/* see above */
2989struct iwl_statistics_cmd {
2990 __le32 configuration_flags; /* IWL_STATS_CONF_* */
2991} __packed;
2992
2993/*
2994 * STATISTICS_NOTIFICATION = 0x9d (notification only, not a command)
2995 *
2996 * By default, uCode issues this notification after receiving a beacon
2997 * while associated. To disable this behavior, set DISABLE_NOTIF flag in the
2998 * REPLY_STATISTICS_CMD 0x9c, above.
2999 *
3000 * Statistics counters continue to increment beacon after beacon, but are
3001 * cleared when changing channels or when driver issues REPLY_STATISTICS_CMD
3002 * 0x9c with CLEAR_STATS bit set (see above).
3003 *
3004 * uCode also issues this notification during scans. uCode clears statistics
3005 * appropriately so that each notification contains statistics for only the
3006 * one channel that has just been scanned.
3007 */
3008#define STATISTICS_REPLY_FLG_BAND_24G_MSK cpu_to_le32(0x2)
3009#define STATISTICS_REPLY_FLG_HT40_MODE_MSK cpu_to_le32(0x8)
3010
3011struct iwl3945_notif_statistics {
3012 __le32 flag;
3013 struct iwl39_statistics_rx rx;
3014 struct iwl39_statistics_tx tx;
3015 struct iwl39_statistics_general general;
3016} __packed;
3017
3018struct iwl_notif_statistics {
3019 __le32 flag;
3020 struct statistics_rx rx;
3021 struct statistics_tx tx;
3022 struct statistics_general general;
3023} __packed;
3024
3025/*
3026 * MISSED_BEACONS_NOTIFICATION = 0xa2 (notification only, not a command)
3027 *
3028 * uCode send MISSED_BEACONS_NOTIFICATION to driver when detect beacon missed
3029 * in regardless of how many missed beacons, which mean when driver receive the
3030 * notification, inside the command, it can find all the beacons information
3031 * which include number of total missed beacons, number of consecutive missed
3032 * beacons, number of beacons received and number of beacons expected to
3033 * receive.
3034 *
3035 * If uCode detected consecutive_missed_beacons > 5, it will reset the radio
3036 * in order to bring the radio/PHY back to working state; which has no relation
3037 * to when driver will perform sensitivity calibration.
3038 *
3039 * Driver should set it own missed_beacon_threshold to decide when to perform
3040 * sensitivity calibration based on number of consecutive missed beacons in
3041 * order to improve overall performance, especially in noisy environment.
3042 *
3043 */
3044
3045#define IWL_MISSED_BEACON_THRESHOLD_MIN (1)
3046#define IWL_MISSED_BEACON_THRESHOLD_DEF (5)
3047#define IWL_MISSED_BEACON_THRESHOLD_MAX IWL_MISSED_BEACON_THRESHOLD_DEF
3048
3049struct iwl_missed_beacon_notif {
3050 __le32 consecutive_missed_beacons;
3051 __le32 total_missed_becons;
3052 __le32 num_expected_beacons;
3053 __le32 num_recvd_beacons;
3054} __packed;
3055
3056
3057/******************************************************************************
3058 * (11)
3059 * Rx Calibration Commands:
3060 *
3061 * With the uCode used for open source drivers, most Tx calibration (except
3062 * for Tx Power) and most Rx calibration is done by uCode during the
3063 * "initialize" phase of uCode boot. Driver must calibrate only:
3064 *
3065 * 1) Tx power (depends on temperature), described elsewhere
3066 * 2) Receiver gain balance (optimize MIMO, and detect disconnected antennas)
3067 * 3) Receiver sensitivity (to optimize signal detection)
3068 *
3069 *****************************************************************************/
3070
3071/**
3072 * SENSITIVITY_CMD = 0xa8 (command, has simple generic response)
3073 *
3074 * This command sets up the Rx signal detector for a sensitivity level that
3075 * is high enough to lock onto all signals within the associated network,
3076 * but low enough to ignore signals that are below a certain threshold, so as
3077 * not to have too many "false alarms". False alarms are signals that the
3078 * Rx DSP tries to lock onto, but then discards after determining that they
3079 * are noise.
3080 *
3081 * The optimum number of false alarms is between 5 and 50 per 200 TUs
3082 * (200 * 1024 uSecs, i.e. 204.8 milliseconds) of actual Rx time (i.e.
3083 * time listening, not transmitting). Driver must adjust sensitivity so that
3084 * the ratio of actual false alarms to actual Rx time falls within this range.
3085 *
3086 * While associated, uCode delivers STATISTICS_NOTIFICATIONs after each
3087 * received beacon. These provide information to the driver to analyze the
3088 * sensitivity. Don't analyze statistics that come in from scanning, or any
3089 * other non-associated-network source. Pertinent statistics include:
3090 *
3091 * From "general" statistics (struct statistics_rx_non_phy):
3092 *
3093 * (beacon_energy_[abc] & 0x0FF00) >> 8 (unsigned, higher value is lower level)
3094 * Measure of energy of desired signal. Used for establishing a level
3095 * below which the device does not detect signals.
3096 *
3097 * (beacon_silence_rssi_[abc] & 0x0FF00) >> 8 (unsigned, units in dB)
3098 * Measure of background noise in silent period after beacon.
3099 *
3100 * channel_load
3101 * uSecs of actual Rx time during beacon period (varies according to
3102 * how much time was spent transmitting).
3103 *
3104 * From "cck" and "ofdm" statistics (struct statistics_rx_phy), separately:
3105 *
3106 * false_alarm_cnt
3107 * Signal locks abandoned early (before phy-level header).
3108 *
3109 * plcp_err
3110 * Signal locks abandoned late (during phy-level header).
3111 *
3112 * NOTE: Both false_alarm_cnt and plcp_err increment monotonically from
3113 * beacon to beacon, i.e. each value is an accumulation of all errors
3114 * before and including the latest beacon. Values will wrap around to 0
3115 * after counting up to 2^32 - 1. Driver must differentiate vs.
3116 * previous beacon's values to determine # false alarms in the current
3117 * beacon period.
3118 *
3119 * Total number of false alarms = false_alarms + plcp_errs
3120 *
3121 * For OFDM, adjust the following table entries in struct iwl_sensitivity_cmd
3122 * (notice that the start points for OFDM are at or close to settings for
3123 * maximum sensitivity):
3124 *
3125 * START / MIN / MAX
3126 * HD_AUTO_CORR32_X1_TH_ADD_MIN_INDEX 90 / 85 / 120
3127 * HD_AUTO_CORR32_X1_TH_ADD_MIN_MRC_INDEX 170 / 170 / 210
3128 * HD_AUTO_CORR32_X4_TH_ADD_MIN_INDEX 105 / 105 / 140
3129 * HD_AUTO_CORR32_X4_TH_ADD_MIN_MRC_INDEX 220 / 220 / 270
3130 *
3131 * If actual rate of OFDM false alarms (+ plcp_errors) is too high
3132 * (greater than 50 for each 204.8 msecs listening), reduce sensitivity
3133 * by *adding* 1 to all 4 of the table entries above, up to the max for
3134 * each entry. Conversely, if false alarm rate is too low (less than 5
3135 * for each 204.8 msecs listening), *subtract* 1 from each entry to
3136 * increase sensitivity.
3137 *
3138 * For CCK sensitivity, keep track of the following:
3139 *
3140 * 1). 20-beacon history of maximum background noise, indicated by
3141 * (beacon_silence_rssi_[abc] & 0x0FF00), units in dB, across the
3142 * 3 receivers. For any given beacon, the "silence reference" is
3143 * the maximum of last 60 samples (20 beacons * 3 receivers).
3144 *
3145 * 2). 10-beacon history of strongest signal level, as indicated
3146 * by (beacon_energy_[abc] & 0x0FF00) >> 8, across the 3 receivers,
3147 * i.e. the strength of the signal through the best receiver at the
3148 * moment. These measurements are "upside down", with lower values
3149 * for stronger signals, so max energy will be *minimum* value.
3150 *
3151 * Then for any given beacon, the driver must determine the *weakest*
3152 * of the strongest signals; this is the minimum level that needs to be
3153 * successfully detected, when using the best receiver at the moment.
3154 * "Max cck energy" is the maximum (higher value means lower energy!)
3155 * of the last 10 minima. Once this is determined, driver must add
3156 * a little margin by adding "6" to it.
3157 *
3158 * 3). Number of consecutive beacon periods with too few false alarms.
3159 * Reset this to 0 at the first beacon period that falls within the
3160 * "good" range (5 to 50 false alarms per 204.8 milliseconds rx).
3161 *
3162 * Then, adjust the following CCK table entries in struct iwl_sensitivity_cmd
3163 * (notice that the start points for CCK are at maximum sensitivity):
3164 *
3165 * START / MIN / MAX
3166 * HD_AUTO_CORR40_X4_TH_ADD_MIN_INDEX 125 / 125 / 200
3167 * HD_AUTO_CORR40_X4_TH_ADD_MIN_MRC_INDEX 200 / 200 / 400
3168 * HD_MIN_ENERGY_CCK_DET_INDEX 100 / 0 / 100
3169 *
3170 * If actual rate of CCK false alarms (+ plcp_errors) is too high
3171 * (greater than 50 for each 204.8 msecs listening), method for reducing
3172 * sensitivity is:
3173 *
3174 * 1) *Add* 3 to value in HD_AUTO_CORR40_X4_TH_ADD_MIN_MRC_INDEX,
3175 * up to max 400.
3176 *
3177 * 2) If current value in HD_AUTO_CORR40_X4_TH_ADD_MIN_INDEX is < 160,
3178 * sensitivity has been reduced a significant amount; bring it up to
3179 * a moderate 161. Otherwise, *add* 3, up to max 200.
3180 *
3181 * 3) a) If current value in HD_AUTO_CORR40_X4_TH_ADD_MIN_INDEX is > 160,
3182 * sensitivity has been reduced only a moderate or small amount;
3183 * *subtract* 2 from value in HD_MIN_ENERGY_CCK_DET_INDEX,
3184 * down to min 0. Otherwise (if gain has been significantly reduced),
3185 * don't change the HD_MIN_ENERGY_CCK_DET_INDEX value.
3186 *
3187 * b) Save a snapshot of the "silence reference".
3188 *
3189 * If actual rate of CCK false alarms (+ plcp_errors) is too low
3190 * (less than 5 for each 204.8 msecs listening), method for increasing
3191 * sensitivity is used only if:
3192 *
3193 * 1a) Previous beacon did not have too many false alarms
3194 * 1b) AND difference between previous "silence reference" and current
3195 * "silence reference" (prev - current) is 2 or more,
3196 * OR 2) 100 or more consecutive beacon periods have had rate of
3197 * less than 5 false alarms per 204.8 milliseconds rx time.
3198 *
3199 * Method for increasing sensitivity:
3200 *
3201 * 1) *Subtract* 3 from value in HD_AUTO_CORR40_X4_TH_ADD_MIN_INDEX,
3202 * down to min 125.
3203 *
3204 * 2) *Subtract* 3 from value in HD_AUTO_CORR40_X4_TH_ADD_MIN_MRC_INDEX,
3205 * down to min 200.
3206 *
3207 * 3) *Add* 2 to value in HD_MIN_ENERGY_CCK_DET_INDEX, up to max 100.
3208 *
3209 * If actual rate of CCK false alarms (+ plcp_errors) is within good range
3210 * (between 5 and 50 for each 204.8 msecs listening):
3211 *
3212 * 1) Save a snapshot of the silence reference.
3213 *
3214 * 2) If previous beacon had too many CCK false alarms (+ plcp_errors),
3215 * give some extra margin to energy threshold by *subtracting* 8
3216 * from value in HD_MIN_ENERGY_CCK_DET_INDEX.
3217 *
3218 * For all cases (too few, too many, good range), make sure that the CCK
3219 * detection threshold (energy) is below the energy level for robust
3220 * detection over the past 10 beacon periods, the "Max cck energy".
3221 * Lower values mean higher energy; this means making sure that the value
3222 * in HD_MIN_ENERGY_CCK_DET_INDEX is at or *above* "Max cck energy".
3223 *
3224 */
3225
3226/*
3227 * Table entries in SENSITIVITY_CMD (struct iwl_sensitivity_cmd)
3228 */
3229#define HD_TABLE_SIZE (11) /* number of entries */
3230#define HD_MIN_ENERGY_CCK_DET_INDEX (0) /* table indexes */
3231#define HD_MIN_ENERGY_OFDM_DET_INDEX (1)
3232#define HD_AUTO_CORR32_X1_TH_ADD_MIN_INDEX (2)
3233#define HD_AUTO_CORR32_X1_TH_ADD_MIN_MRC_INDEX (3)
3234#define HD_AUTO_CORR40_X4_TH_ADD_MIN_MRC_INDEX (4)
3235#define HD_AUTO_CORR32_X4_TH_ADD_MIN_INDEX (5)
3236#define HD_AUTO_CORR32_X4_TH_ADD_MIN_MRC_INDEX (6)
3237#define HD_BARKER_CORR_TH_ADD_MIN_INDEX (7)
3238#define HD_BARKER_CORR_TH_ADD_MIN_MRC_INDEX (8)
3239#define HD_AUTO_CORR40_X4_TH_ADD_MIN_INDEX (9)
3240#define HD_OFDM_ENERGY_TH_IN_INDEX (10)
3241
3242/* Control field in struct iwl_sensitivity_cmd */
3243#define SENSITIVITY_CMD_CONTROL_DEFAULT_TABLE cpu_to_le16(0)
3244#define SENSITIVITY_CMD_CONTROL_WORK_TABLE cpu_to_le16(1)
3245
3246/**
3247 * struct iwl_sensitivity_cmd
3248 * @control: (1) updates working table, (0) updates default table
3249 * @table: energy threshold values, use HD_* as index into table
3250 *
3251 * Always use "1" in "control" to update uCode's working table and DSP.
3252 */
3253struct iwl_sensitivity_cmd {
3254 __le16 control; /* always use "1" */
3255 __le16 table[HD_TABLE_SIZE]; /* use HD_* as index */
3256} __packed;
3257
3258
3259/**
3260 * REPLY_PHY_CALIBRATION_CMD = 0xb0 (command, has simple generic response)
3261 *
3262 * This command sets the relative gains of 4965 device's 3 radio receiver chains.
3263 *
3264 * After the first association, driver should accumulate signal and noise
3265 * statistics from the STATISTICS_NOTIFICATIONs that follow the first 20
3266 * beacons from the associated network (don't collect statistics that come
3267 * in from scanning, or any other non-network source).
3268 *
3269 * DISCONNECTED ANTENNA:
3270 *
3271 * Driver should determine which antennas are actually connected, by comparing
3272 * average beacon signal levels for the 3 Rx chains. Accumulate (add) the
3273 * following values over 20 beacons, one accumulator for each of the chains
3274 * a/b/c, from struct statistics_rx_non_phy:
3275 *
3276 * beacon_rssi_[abc] & 0x0FF (unsigned, units in dB)
3277 *
3278 * Find the strongest signal from among a/b/c. Compare the other two to the
3279 * strongest. If any signal is more than 15 dB (times 20, unless you
3280 * divide the accumulated values by 20) below the strongest, the driver
3281 * considers that antenna to be disconnected, and should not try to use that
3282 * antenna/chain for Rx or Tx. If both A and B seem to be disconnected,
3283 * driver should declare the stronger one as connected, and attempt to use it
3284 * (A and B are the only 2 Tx chains!).
3285 *
3286 *
3287 * RX BALANCE:
3288 *
3289 * Driver should balance the 3 receivers (but just the ones that are connected
3290 * to antennas, see above) for gain, by comparing the average signal levels
3291 * detected during the silence after each beacon (background noise).
3292 * Accumulate (add) the following values over 20 beacons, one accumulator for
3293 * each of the chains a/b/c, from struct statistics_rx_non_phy:
3294 *
3295 * beacon_silence_rssi_[abc] & 0x0FF (unsigned, units in dB)
3296 *
3297 * Find the weakest background noise level from among a/b/c. This Rx chain
3298 * will be the reference, with 0 gain adjustment. Attenuate other channels by
3299 * finding noise difference:
3300 *
3301 * (accum_noise[i] - accum_noise[reference]) / 30
3302 *
3303 * The "30" adjusts the dB in the 20 accumulated samples to units of 1.5 dB.
3304 * For use in diff_gain_[abc] fields of struct iwl_calibration_cmd, the
3305 * driver should limit the difference results to a range of 0-3 (0-4.5 dB),
3306 * and set bit 2 to indicate "reduce gain". The value for the reference
3307 * (weakest) chain should be "0".
3308 *
3309 * diff_gain_[abc] bit fields:
3310 * 2: (1) reduce gain, (0) increase gain
3311 * 1-0: amount of gain, units of 1.5 dB
3312 */
3313
3314/* Phy calibration command for series */
3315/* The default calibrate table size if not specified by firmware */
3316#define IWL_DEFAULT_STANDARD_PHY_CALIBRATE_TBL_SIZE 18
3317enum {
3318 IWL_PHY_CALIBRATE_DIFF_GAIN_CMD = 7,
3319 IWL_MAX_STANDARD_PHY_CALIBRATE_TBL_SIZE = 19,
3320};
3321
3322#define IWL_MAX_PHY_CALIBRATE_TBL_SIZE (253)
3323
3324struct iwl_calib_hdr {
3325 u8 op_code;
3326 u8 first_group;
3327 u8 groups_num;
3328 u8 data_valid;
3329} __packed;
3330
3331/* IWL_PHY_CALIBRATE_DIFF_GAIN_CMD (7) */
3332struct iwl_calib_diff_gain_cmd {
3333 struct iwl_calib_hdr hdr;
3334 s8 diff_gain_a; /* see above */
3335 s8 diff_gain_b;
3336 s8 diff_gain_c;
3337 u8 reserved1;
3338} __packed;
3339
3340/******************************************************************************
3341 * (12)
3342 * Miscellaneous Commands:
3343 *
3344 *****************************************************************************/
3345
3346/*
3347 * LEDs Command & Response
3348 * REPLY_LEDS_CMD = 0x48 (command, has simple generic response)
3349 *
3350 * For each of 3 possible LEDs (Activity/Link/Tech, selected by "id" field),
3351 * this command turns it on or off, or sets up a periodic blinking cycle.
3352 */
3353struct iwl_led_cmd {
3354 __le32 interval; /* "interval" in uSec */
3355 u8 id; /* 1: Activity, 2: Link, 3: Tech */
3356 u8 off; /* # intervals off while blinking;
3357 * "0", with >0 "on" value, turns LED on */
3358 u8 on; /* # intervals on while blinking;
3359 * "0", regardless of "off", turns LED off */
3360 u8 reserved;
3361} __packed;
3362
3363
3364/******************************************************************************
3365 * (13)
3366 * Union of all expected notifications/responses:
3367 *
3368 *****************************************************************************/
3369
3370struct iwl_rx_packet {
3371 /*
3372 * The first 4 bytes of the RX frame header contain both the RX frame
3373 * size and some flags.
3374 * Bit fields:
3375 * 31: flag flush RB request
3376 * 30: flag ignore TC (terminal counter) request
3377 * 29: flag fast IRQ request
3378 * 28-14: Reserved
3379 * 13-00: RX frame size
3380 */
3381 __le32 len_n_flags;
3382 struct iwl_cmd_header hdr;
3383 union {
3384 struct iwl3945_rx_frame rx_frame;
3385 struct iwl3945_tx_resp tx_resp;
3386 struct iwl3945_beacon_notif beacon_status;
3387
3388 struct iwl_alive_resp alive_frame;
3389 struct iwl_spectrum_notification spectrum_notif;
3390 struct iwl_csa_notification csa_notif;
3391 struct iwl_error_resp err_resp;
3392 struct iwl_card_state_notif card_state_notif;
3393 struct iwl_add_sta_resp add_sta;
3394 struct iwl_rem_sta_resp rem_sta;
3395 struct iwl_sleep_notification sleep_notif;
3396 struct iwl_spectrum_resp spectrum;
3397 struct iwl_notif_statistics stats;
3398 struct iwl_compressed_ba_resp compressed_ba;
3399 struct iwl_missed_beacon_notif missed_beacon;
3400 __le32 status;
3401 u8 raw[0];
3402 } u;
3403} __packed;
3404
3405#endif /* __iwl_legacy_commands_h__ */
diff --git a/drivers/net/wireless/iwlegacy/iwl-core.c b/drivers/net/wireless/iwlegacy/iwl-core.c
new file mode 100644
index 000000000000..d418b647be80
--- /dev/null
+++ b/drivers/net/wireless/iwlegacy/iwl-core.c
@@ -0,0 +1,2674 @@
1/******************************************************************************
2 *
3 * GPL LICENSE SUMMARY
4 *
5 * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of version 2 of the GNU General Public License as
9 * published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful, but
12 * WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
19 * USA
20 *
21 * The full GNU General Public License is included in this distribution
22 * in the file called LICENSE.GPL.
23 *
24 * Contact Information:
25 * Intel Linux Wireless <ilw@linux.intel.com>
26 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
27 *****************************************************************************/
28
29#include <linux/kernel.h>
30#include <linux/module.h>
31#include <linux/etherdevice.h>
32#include <linux/sched.h>
33#include <linux/slab.h>
34#include <net/mac80211.h>
35
36#include "iwl-eeprom.h"
37#include "iwl-dev.h"
38#include "iwl-debug.h"
39#include "iwl-core.h"
40#include "iwl-io.h"
41#include "iwl-power.h"
42#include "iwl-sta.h"
43#include "iwl-helpers.h"
44
45
46MODULE_DESCRIPTION("iwl-legacy: common functions for 3945 and 4965");
47MODULE_VERSION(IWLWIFI_VERSION);
48MODULE_AUTHOR(DRV_COPYRIGHT " " DRV_AUTHOR);
49MODULE_LICENSE("GPL");
50
51/*
52 * set bt_coex_active to true, uCode will do kill/defer
53 * every time the priority line is asserted (BT is sending signals on the
54 * priority line in the PCIx).
55 * set bt_coex_active to false, uCode will ignore the BT activity and
56 * perform the normal operation
57 *
58 * User might experience transmit issue on some platform due to WiFi/BT
59 * co-exist problem. The possible behaviors are:
60 * Able to scan and finding all the available AP
61 * Not able to associate with any AP
62 * On those platforms, WiFi communication can be restored by set
63 * "bt_coex_active" module parameter to "false"
64 *
65 * default: bt_coex_active = true (BT_COEX_ENABLE)
66 */
67static bool bt_coex_active = true;
68module_param(bt_coex_active, bool, S_IRUGO);
69MODULE_PARM_DESC(bt_coex_active, "enable wifi/bluetooth co-exist");
70
71u32 iwlegacy_debug_level;
72EXPORT_SYMBOL(iwlegacy_debug_level);
73
74const u8 iwlegacy_bcast_addr[ETH_ALEN] = { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF };
75EXPORT_SYMBOL(iwlegacy_bcast_addr);
76
77
78/* This function both allocates and initializes hw and priv. */
79struct ieee80211_hw *iwl_legacy_alloc_all(struct iwl_cfg *cfg)
80{
81 struct iwl_priv *priv;
82 /* mac80211 allocates memory for this device instance, including
83 * space for this driver's private structure */
84 struct ieee80211_hw *hw;
85
86 hw = ieee80211_alloc_hw(sizeof(struct iwl_priv),
87 cfg->ops->ieee80211_ops);
88 if (hw == NULL) {
89 pr_err("%s: Can not allocate network device\n",
90 cfg->name);
91 goto out;
92 }
93
94 priv = hw->priv;
95 priv->hw = hw;
96
97out:
98 return hw;
99}
100EXPORT_SYMBOL(iwl_legacy_alloc_all);
101
102#define MAX_BIT_RATE_40_MHZ 150 /* Mbps */
103#define MAX_BIT_RATE_20_MHZ 72 /* Mbps */
104static void iwl_legacy_init_ht_hw_capab(const struct iwl_priv *priv,
105 struct ieee80211_sta_ht_cap *ht_info,
106 enum ieee80211_band band)
107{
108 u16 max_bit_rate = 0;
109 u8 rx_chains_num = priv->hw_params.rx_chains_num;
110 u8 tx_chains_num = priv->hw_params.tx_chains_num;
111
112 ht_info->cap = 0;
113 memset(&ht_info->mcs, 0, sizeof(ht_info->mcs));
114
115 ht_info->ht_supported = true;
116
117 ht_info->cap |= IEEE80211_HT_CAP_SGI_20;
118 max_bit_rate = MAX_BIT_RATE_20_MHZ;
119 if (priv->hw_params.ht40_channel & BIT(band)) {
120 ht_info->cap |= IEEE80211_HT_CAP_SUP_WIDTH_20_40;
121 ht_info->cap |= IEEE80211_HT_CAP_SGI_40;
122 ht_info->mcs.rx_mask[4] = 0x01;
123 max_bit_rate = MAX_BIT_RATE_40_MHZ;
124 }
125
126 if (priv->cfg->mod_params->amsdu_size_8K)
127 ht_info->cap |= IEEE80211_HT_CAP_MAX_AMSDU;
128
129 ht_info->ampdu_factor = CFG_HT_RX_AMPDU_FACTOR_DEF;
130 ht_info->ampdu_density = CFG_HT_MPDU_DENSITY_DEF;
131
132 ht_info->mcs.rx_mask[0] = 0xFF;
133 if (rx_chains_num >= 2)
134 ht_info->mcs.rx_mask[1] = 0xFF;
135 if (rx_chains_num >= 3)
136 ht_info->mcs.rx_mask[2] = 0xFF;
137
138 /* Highest supported Rx data rate */
139 max_bit_rate *= rx_chains_num;
140 WARN_ON(max_bit_rate & ~IEEE80211_HT_MCS_RX_HIGHEST_MASK);
141 ht_info->mcs.rx_highest = cpu_to_le16(max_bit_rate);
142
143 /* Tx MCS capabilities */
144 ht_info->mcs.tx_params = IEEE80211_HT_MCS_TX_DEFINED;
145 if (tx_chains_num != rx_chains_num) {
146 ht_info->mcs.tx_params |= IEEE80211_HT_MCS_TX_RX_DIFF;
147 ht_info->mcs.tx_params |= ((tx_chains_num - 1) <<
148 IEEE80211_HT_MCS_TX_MAX_STREAMS_SHIFT);
149 }
150}
151
152/**
153 * iwl_legacy_init_geos - Initialize mac80211's geo/channel info based from eeprom
154 */
155int iwl_legacy_init_geos(struct iwl_priv *priv)
156{
157 struct iwl_channel_info *ch;
158 struct ieee80211_supported_band *sband;
159 struct ieee80211_channel *channels;
160 struct ieee80211_channel *geo_ch;
161 struct ieee80211_rate *rates;
162 int i = 0;
163
164 if (priv->bands[IEEE80211_BAND_2GHZ].n_bitrates ||
165 priv->bands[IEEE80211_BAND_5GHZ].n_bitrates) {
166 IWL_DEBUG_INFO(priv, "Geography modes already initialized.\n");
167 set_bit(STATUS_GEO_CONFIGURED, &priv->status);
168 return 0;
169 }
170
171 channels = kzalloc(sizeof(struct ieee80211_channel) *
172 priv->channel_count, GFP_KERNEL);
173 if (!channels)
174 return -ENOMEM;
175
176 rates = kzalloc((sizeof(struct ieee80211_rate) * IWL_RATE_COUNT_LEGACY),
177 GFP_KERNEL);
178 if (!rates) {
179 kfree(channels);
180 return -ENOMEM;
181 }
182
183 /* 5.2GHz channels start after the 2.4GHz channels */
184 sband = &priv->bands[IEEE80211_BAND_5GHZ];
185 sband->channels = &channels[ARRAY_SIZE(iwlegacy_eeprom_band_1)];
186 /* just OFDM */
187 sband->bitrates = &rates[IWL_FIRST_OFDM_RATE];
188 sband->n_bitrates = IWL_RATE_COUNT_LEGACY - IWL_FIRST_OFDM_RATE;
189
190 if (priv->cfg->sku & IWL_SKU_N)
191 iwl_legacy_init_ht_hw_capab(priv, &sband->ht_cap,
192 IEEE80211_BAND_5GHZ);
193
194 sband = &priv->bands[IEEE80211_BAND_2GHZ];
195 sband->channels = channels;
196 /* OFDM & CCK */
197 sband->bitrates = rates;
198 sband->n_bitrates = IWL_RATE_COUNT_LEGACY;
199
200 if (priv->cfg->sku & IWL_SKU_N)
201 iwl_legacy_init_ht_hw_capab(priv, &sband->ht_cap,
202 IEEE80211_BAND_2GHZ);
203
204 priv->ieee_channels = channels;
205 priv->ieee_rates = rates;
206
207 for (i = 0; i < priv->channel_count; i++) {
208 ch = &priv->channel_info[i];
209
210 if (!iwl_legacy_is_channel_valid(ch))
211 continue;
212
213 if (iwl_legacy_is_channel_a_band(ch))
214 sband = &priv->bands[IEEE80211_BAND_5GHZ];
215 else
216 sband = &priv->bands[IEEE80211_BAND_2GHZ];
217
218 geo_ch = &sband->channels[sband->n_channels++];
219
220 geo_ch->center_freq =
221 ieee80211_channel_to_frequency(ch->channel, ch->band);
222 geo_ch->max_power = ch->max_power_avg;
223 geo_ch->max_antenna_gain = 0xff;
224 geo_ch->hw_value = ch->channel;
225
226 if (iwl_legacy_is_channel_valid(ch)) {
227 if (!(ch->flags & EEPROM_CHANNEL_IBSS))
228 geo_ch->flags |= IEEE80211_CHAN_NO_IBSS;
229
230 if (!(ch->flags & EEPROM_CHANNEL_ACTIVE))
231 geo_ch->flags |= IEEE80211_CHAN_PASSIVE_SCAN;
232
233 if (ch->flags & EEPROM_CHANNEL_RADAR)
234 geo_ch->flags |= IEEE80211_CHAN_RADAR;
235
236 geo_ch->flags |= ch->ht40_extension_channel;
237
238 if (ch->max_power_avg > priv->tx_power_device_lmt)
239 priv->tx_power_device_lmt = ch->max_power_avg;
240 } else {
241 geo_ch->flags |= IEEE80211_CHAN_DISABLED;
242 }
243
244 IWL_DEBUG_INFO(priv, "Channel %d Freq=%d[%sGHz] %s flag=0x%X\n",
245 ch->channel, geo_ch->center_freq,
246 iwl_legacy_is_channel_a_band(ch) ? "5.2" : "2.4",
247 geo_ch->flags & IEEE80211_CHAN_DISABLED ?
248 "restricted" : "valid",
249 geo_ch->flags);
250 }
251
252 if ((priv->bands[IEEE80211_BAND_5GHZ].n_channels == 0) &&
253 priv->cfg->sku & IWL_SKU_A) {
254 IWL_INFO(priv, "Incorrectly detected BG card as ABG. "
255 "Please send your PCI ID 0x%04X:0x%04X to maintainer.\n",
256 priv->pci_dev->device,
257 priv->pci_dev->subsystem_device);
258 priv->cfg->sku &= ~IWL_SKU_A;
259 }
260
261 IWL_INFO(priv, "Tunable channels: %d 802.11bg, %d 802.11a channels\n",
262 priv->bands[IEEE80211_BAND_2GHZ].n_channels,
263 priv->bands[IEEE80211_BAND_5GHZ].n_channels);
264
265 set_bit(STATUS_GEO_CONFIGURED, &priv->status);
266
267 return 0;
268}
269EXPORT_SYMBOL(iwl_legacy_init_geos);
270
271/*
272 * iwl_legacy_free_geos - undo allocations in iwl_legacy_init_geos
273 */
274void iwl_legacy_free_geos(struct iwl_priv *priv)
275{
276 kfree(priv->ieee_channels);
277 kfree(priv->ieee_rates);
278 clear_bit(STATUS_GEO_CONFIGURED, &priv->status);
279}
280EXPORT_SYMBOL(iwl_legacy_free_geos);
281
282static bool iwl_legacy_is_channel_extension(struct iwl_priv *priv,
283 enum ieee80211_band band,
284 u16 channel, u8 extension_chan_offset)
285{
286 const struct iwl_channel_info *ch_info;
287
288 ch_info = iwl_legacy_get_channel_info(priv, band, channel);
289 if (!iwl_legacy_is_channel_valid(ch_info))
290 return false;
291
292 if (extension_chan_offset == IEEE80211_HT_PARAM_CHA_SEC_ABOVE)
293 return !(ch_info->ht40_extension_channel &
294 IEEE80211_CHAN_NO_HT40PLUS);
295 else if (extension_chan_offset == IEEE80211_HT_PARAM_CHA_SEC_BELOW)
296 return !(ch_info->ht40_extension_channel &
297 IEEE80211_CHAN_NO_HT40MINUS);
298
299 return false;
300}
301
302bool iwl_legacy_is_ht40_tx_allowed(struct iwl_priv *priv,
303 struct iwl_rxon_context *ctx,
304 struct ieee80211_sta_ht_cap *ht_cap)
305{
306 if (!ctx->ht.enabled || !ctx->ht.is_40mhz)
307 return false;
308
309 /*
310 * We do not check for IEEE80211_HT_CAP_SUP_WIDTH_20_40
311 * the bit will not set if it is pure 40MHz case
312 */
313 if (ht_cap && !ht_cap->ht_supported)
314 return false;
315
316#ifdef CONFIG_IWLWIFI_LEGACY_DEBUGFS
317 if (priv->disable_ht40)
318 return false;
319#endif
320
321 return iwl_legacy_is_channel_extension(priv, priv->band,
322 le16_to_cpu(ctx->staging.channel),
323 ctx->ht.extension_chan_offset);
324}
325EXPORT_SYMBOL(iwl_legacy_is_ht40_tx_allowed);
326
327static u16 iwl_legacy_adjust_beacon_interval(u16 beacon_val, u16 max_beacon_val)
328{
329 u16 new_val;
330 u16 beacon_factor;
331
332 /*
333 * If mac80211 hasn't given us a beacon interval, program
334 * the default into the device.
335 */
336 if (!beacon_val)
337 return DEFAULT_BEACON_INTERVAL;
338
339 /*
340 * If the beacon interval we obtained from the peer
341 * is too large, we'll have to wake up more often
342 * (and in IBSS case, we'll beacon too much)
343 *
344 * For example, if max_beacon_val is 4096, and the
345 * requested beacon interval is 7000, we'll have to
346 * use 3500 to be able to wake up on the beacons.
347 *
348 * This could badly influence beacon detection stats.
349 */
350
351 beacon_factor = (beacon_val + max_beacon_val) / max_beacon_val;
352 new_val = beacon_val / beacon_factor;
353
354 if (!new_val)
355 new_val = max_beacon_val;
356
357 return new_val;
358}
359
360int
361iwl_legacy_send_rxon_timing(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
362{
363 u64 tsf;
364 s32 interval_tm, rem;
365 struct ieee80211_conf *conf = NULL;
366 u16 beacon_int;
367 struct ieee80211_vif *vif = ctx->vif;
368
369 conf = iwl_legacy_ieee80211_get_hw_conf(priv->hw);
370
371 lockdep_assert_held(&priv->mutex);
372
373 memset(&ctx->timing, 0, sizeof(struct iwl_rxon_time_cmd));
374
375 ctx->timing.timestamp = cpu_to_le64(priv->timestamp);
376 ctx->timing.listen_interval = cpu_to_le16(conf->listen_interval);
377
378 beacon_int = vif ? vif->bss_conf.beacon_int : 0;
379
380 /*
381 * TODO: For IBSS we need to get atim_window from mac80211,
382 * for now just always use 0
383 */
384 ctx->timing.atim_window = 0;
385
386 beacon_int = iwl_legacy_adjust_beacon_interval(beacon_int,
387 priv->hw_params.max_beacon_itrvl * TIME_UNIT);
388 ctx->timing.beacon_interval = cpu_to_le16(beacon_int);
389
390 tsf = priv->timestamp; /* tsf is modifed by do_div: copy it */
391 interval_tm = beacon_int * TIME_UNIT;
392 rem = do_div(tsf, interval_tm);
393 ctx->timing.beacon_init_val = cpu_to_le32(interval_tm - rem);
394
395 ctx->timing.dtim_period = vif ? (vif->bss_conf.dtim_period ?: 1) : 1;
396
397 IWL_DEBUG_ASSOC(priv,
398 "beacon interval %d beacon timer %d beacon tim %d\n",
399 le16_to_cpu(ctx->timing.beacon_interval),
400 le32_to_cpu(ctx->timing.beacon_init_val),
401 le16_to_cpu(ctx->timing.atim_window));
402
403 return iwl_legacy_send_cmd_pdu(priv, ctx->rxon_timing_cmd,
404 sizeof(ctx->timing), &ctx->timing);
405}
406EXPORT_SYMBOL(iwl_legacy_send_rxon_timing);
407
408void
409iwl_legacy_set_rxon_hwcrypto(struct iwl_priv *priv,
410 struct iwl_rxon_context *ctx,
411 int hw_decrypt)
412{
413 struct iwl_legacy_rxon_cmd *rxon = &ctx->staging;
414
415 if (hw_decrypt)
416 rxon->filter_flags &= ~RXON_FILTER_DIS_DECRYPT_MSK;
417 else
418 rxon->filter_flags |= RXON_FILTER_DIS_DECRYPT_MSK;
419
420}
421EXPORT_SYMBOL(iwl_legacy_set_rxon_hwcrypto);
422
423/* validate RXON structure is valid */
424int
425iwl_legacy_check_rxon_cmd(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
426{
427 struct iwl_legacy_rxon_cmd *rxon = &ctx->staging;
428 bool error = false;
429
430 if (rxon->flags & RXON_FLG_BAND_24G_MSK) {
431 if (rxon->flags & RXON_FLG_TGJ_NARROW_BAND_MSK) {
432 IWL_WARN(priv, "check 2.4G: wrong narrow\n");
433 error = true;
434 }
435 if (rxon->flags & RXON_FLG_RADAR_DETECT_MSK) {
436 IWL_WARN(priv, "check 2.4G: wrong radar\n");
437 error = true;
438 }
439 } else {
440 if (!(rxon->flags & RXON_FLG_SHORT_SLOT_MSK)) {
441 IWL_WARN(priv, "check 5.2G: not short slot!\n");
442 error = true;
443 }
444 if (rxon->flags & RXON_FLG_CCK_MSK) {
445 IWL_WARN(priv, "check 5.2G: CCK!\n");
446 error = true;
447 }
448 }
449 if ((rxon->node_addr[0] | rxon->bssid_addr[0]) & 0x1) {
450 IWL_WARN(priv, "mac/bssid mcast!\n");
451 error = true;
452 }
453
454 /* make sure basic rates 6Mbps and 1Mbps are supported */
455 if ((rxon->ofdm_basic_rates & IWL_RATE_6M_MASK) == 0 &&
456 (rxon->cck_basic_rates & IWL_RATE_1M_MASK) == 0) {
457 IWL_WARN(priv, "neither 1 nor 6 are basic\n");
458 error = true;
459 }
460
461 if (le16_to_cpu(rxon->assoc_id) > 2007) {
462 IWL_WARN(priv, "aid > 2007\n");
463 error = true;
464 }
465
466 if ((rxon->flags & (RXON_FLG_CCK_MSK | RXON_FLG_SHORT_SLOT_MSK))
467 == (RXON_FLG_CCK_MSK | RXON_FLG_SHORT_SLOT_MSK)) {
468 IWL_WARN(priv, "CCK and short slot\n");
469 error = true;
470 }
471
472 if ((rxon->flags & (RXON_FLG_CCK_MSK | RXON_FLG_AUTO_DETECT_MSK))
473 == (RXON_FLG_CCK_MSK | RXON_FLG_AUTO_DETECT_MSK)) {
474 IWL_WARN(priv, "CCK and auto detect");
475 error = true;
476 }
477
478 if ((rxon->flags & (RXON_FLG_AUTO_DETECT_MSK |
479 RXON_FLG_TGG_PROTECT_MSK)) ==
480 RXON_FLG_TGG_PROTECT_MSK) {
481 IWL_WARN(priv, "TGg but no auto-detect\n");
482 error = true;
483 }
484
485 if (error)
486 IWL_WARN(priv, "Tuning to channel %d\n",
487 le16_to_cpu(rxon->channel));
488
489 if (error) {
490 IWL_ERR(priv, "Invalid RXON\n");
491 return -EINVAL;
492 }
493 return 0;
494}
495EXPORT_SYMBOL(iwl_legacy_check_rxon_cmd);
496
497/**
498 * iwl_legacy_full_rxon_required - check if full RXON (vs RXON_ASSOC) cmd is needed
499 * @priv: staging_rxon is compared to active_rxon
500 *
501 * If the RXON structure is changing enough to require a new tune,
502 * or is clearing the RXON_FILTER_ASSOC_MSK, then return 1 to indicate that
503 * a new tune (full RXON command, rather than RXON_ASSOC cmd) is required.
504 */
505int iwl_legacy_full_rxon_required(struct iwl_priv *priv,
506 struct iwl_rxon_context *ctx)
507{
508 const struct iwl_legacy_rxon_cmd *staging = &ctx->staging;
509 const struct iwl_legacy_rxon_cmd *active = &ctx->active;
510
511#define CHK(cond) \
512 if ((cond)) { \
513 IWL_DEBUG_INFO(priv, "need full RXON - " #cond "\n"); \
514 return 1; \
515 }
516
517#define CHK_NEQ(c1, c2) \
518 if ((c1) != (c2)) { \
519 IWL_DEBUG_INFO(priv, "need full RXON - " \
520 #c1 " != " #c2 " - %d != %d\n", \
521 (c1), (c2)); \
522 return 1; \
523 }
524
525 /* These items are only settable from the full RXON command */
526 CHK(!iwl_legacy_is_associated_ctx(ctx));
527 CHK(compare_ether_addr(staging->bssid_addr, active->bssid_addr));
528 CHK(compare_ether_addr(staging->node_addr, active->node_addr));
529 CHK(compare_ether_addr(staging->wlap_bssid_addr,
530 active->wlap_bssid_addr));
531 CHK_NEQ(staging->dev_type, active->dev_type);
532 CHK_NEQ(staging->channel, active->channel);
533 CHK_NEQ(staging->air_propagation, active->air_propagation);
534 CHK_NEQ(staging->ofdm_ht_single_stream_basic_rates,
535 active->ofdm_ht_single_stream_basic_rates);
536 CHK_NEQ(staging->ofdm_ht_dual_stream_basic_rates,
537 active->ofdm_ht_dual_stream_basic_rates);
538 CHK_NEQ(staging->assoc_id, active->assoc_id);
539
540 /* flags, filter_flags, ofdm_basic_rates, and cck_basic_rates can
541 * be updated with the RXON_ASSOC command -- however only some
542 * flag transitions are allowed using RXON_ASSOC */
543
544 /* Check if we are not switching bands */
545 CHK_NEQ(staging->flags & RXON_FLG_BAND_24G_MSK,
546 active->flags & RXON_FLG_BAND_24G_MSK);
547
548 /* Check if we are switching association toggle */
549 CHK_NEQ(staging->filter_flags & RXON_FILTER_ASSOC_MSK,
550 active->filter_flags & RXON_FILTER_ASSOC_MSK);
551
552#undef CHK
553#undef CHK_NEQ
554
555 return 0;
556}
557EXPORT_SYMBOL(iwl_legacy_full_rxon_required);
558
559u8 iwl_legacy_get_lowest_plcp(struct iwl_priv *priv,
560 struct iwl_rxon_context *ctx)
561{
562 /*
563 * Assign the lowest rate -- should really get this from
564 * the beacon skb from mac80211.
565 */
566 if (ctx->staging.flags & RXON_FLG_BAND_24G_MSK)
567 return IWL_RATE_1M_PLCP;
568 else
569 return IWL_RATE_6M_PLCP;
570}
571EXPORT_SYMBOL(iwl_legacy_get_lowest_plcp);
572
573static void _iwl_legacy_set_rxon_ht(struct iwl_priv *priv,
574 struct iwl_ht_config *ht_conf,
575 struct iwl_rxon_context *ctx)
576{
577 struct iwl_legacy_rxon_cmd *rxon = &ctx->staging;
578
579 if (!ctx->ht.enabled) {
580 rxon->flags &= ~(RXON_FLG_CHANNEL_MODE_MSK |
581 RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK |
582 RXON_FLG_HT40_PROT_MSK |
583 RXON_FLG_HT_PROT_MSK);
584 return;
585 }
586
587 rxon->flags |= cpu_to_le32(ctx->ht.protection <<
588 RXON_FLG_HT_OPERATING_MODE_POS);
589
590 /* Set up channel bandwidth:
591 * 20 MHz only, 20/40 mixed or pure 40 if ht40 ok */
592 /* clear the HT channel mode before set the mode */
593 rxon->flags &= ~(RXON_FLG_CHANNEL_MODE_MSK |
594 RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK);
595 if (iwl_legacy_is_ht40_tx_allowed(priv, ctx, NULL)) {
596 /* pure ht40 */
597 if (ctx->ht.protection ==
598 IEEE80211_HT_OP_MODE_PROTECTION_20MHZ) {
599 rxon->flags |= RXON_FLG_CHANNEL_MODE_PURE_40;
600 /* Note: control channel is opposite of extension channel */
601 switch (ctx->ht.extension_chan_offset) {
602 case IEEE80211_HT_PARAM_CHA_SEC_ABOVE:
603 rxon->flags &=
604 ~RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK;
605 break;
606 case IEEE80211_HT_PARAM_CHA_SEC_BELOW:
607 rxon->flags |=
608 RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK;
609 break;
610 }
611 } else {
612 /* Note: control channel is opposite of extension channel */
613 switch (ctx->ht.extension_chan_offset) {
614 case IEEE80211_HT_PARAM_CHA_SEC_ABOVE:
615 rxon->flags &=
616 ~(RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK);
617 rxon->flags |= RXON_FLG_CHANNEL_MODE_MIXED;
618 break;
619 case IEEE80211_HT_PARAM_CHA_SEC_BELOW:
620 rxon->flags |=
621 RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK;
622 rxon->flags |= RXON_FLG_CHANNEL_MODE_MIXED;
623 break;
624 case IEEE80211_HT_PARAM_CHA_SEC_NONE:
625 default:
626 /* channel location only valid if in Mixed mode */
627 IWL_ERR(priv,
628 "invalid extension channel offset\n");
629 break;
630 }
631 }
632 } else {
633 rxon->flags |= RXON_FLG_CHANNEL_MODE_LEGACY;
634 }
635
636 if (priv->cfg->ops->hcmd->set_rxon_chain)
637 priv->cfg->ops->hcmd->set_rxon_chain(priv, ctx);
638
639 IWL_DEBUG_ASSOC(priv, "rxon flags 0x%X operation mode :0x%X "
640 "extension channel offset 0x%x\n",
641 le32_to_cpu(rxon->flags), ctx->ht.protection,
642 ctx->ht.extension_chan_offset);
643}
644
645void iwl_legacy_set_rxon_ht(struct iwl_priv *priv, struct iwl_ht_config *ht_conf)
646{
647 struct iwl_rxon_context *ctx;
648
649 for_each_context(priv, ctx)
650 _iwl_legacy_set_rxon_ht(priv, ht_conf, ctx);
651}
652EXPORT_SYMBOL(iwl_legacy_set_rxon_ht);
653
654/* Return valid, unused, channel for a passive scan to reset the RF */
655u8 iwl_legacy_get_single_channel_number(struct iwl_priv *priv,
656 enum ieee80211_band band)
657{
658 const struct iwl_channel_info *ch_info;
659 int i;
660 u8 channel = 0;
661 u8 min, max;
662 struct iwl_rxon_context *ctx;
663
664 if (band == IEEE80211_BAND_5GHZ) {
665 min = 14;
666 max = priv->channel_count;
667 } else {
668 min = 0;
669 max = 14;
670 }
671
672 for (i = min; i < max; i++) {
673 bool busy = false;
674
675 for_each_context(priv, ctx) {
676 busy = priv->channel_info[i].channel ==
677 le16_to_cpu(ctx->staging.channel);
678 if (busy)
679 break;
680 }
681
682 if (busy)
683 continue;
684
685 channel = priv->channel_info[i].channel;
686 ch_info = iwl_legacy_get_channel_info(priv, band, channel);
687 if (iwl_legacy_is_channel_valid(ch_info))
688 break;
689 }
690
691 return channel;
692}
693EXPORT_SYMBOL(iwl_legacy_get_single_channel_number);
694
695/**
696 * iwl_legacy_set_rxon_channel - Set the band and channel values in staging RXON
697 * @ch: requested channel as a pointer to struct ieee80211_channel
698
699 * NOTE: Does not commit to the hardware; it sets appropriate bit fields
700 * in the staging RXON flag structure based on the ch->band
701 */
702int
703iwl_legacy_set_rxon_channel(struct iwl_priv *priv, struct ieee80211_channel *ch,
704 struct iwl_rxon_context *ctx)
705{
706 enum ieee80211_band band = ch->band;
707 u16 channel = ch->hw_value;
708
709 if ((le16_to_cpu(ctx->staging.channel) == channel) &&
710 (priv->band == band))
711 return 0;
712
713 ctx->staging.channel = cpu_to_le16(channel);
714 if (band == IEEE80211_BAND_5GHZ)
715 ctx->staging.flags &= ~RXON_FLG_BAND_24G_MSK;
716 else
717 ctx->staging.flags |= RXON_FLG_BAND_24G_MSK;
718
719 priv->band = band;
720
721 IWL_DEBUG_INFO(priv, "Staging channel set to %d [%d]\n", channel, band);
722
723 return 0;
724}
725EXPORT_SYMBOL(iwl_legacy_set_rxon_channel);
726
727void iwl_legacy_set_flags_for_band(struct iwl_priv *priv,
728 struct iwl_rxon_context *ctx,
729 enum ieee80211_band band,
730 struct ieee80211_vif *vif)
731{
732 if (band == IEEE80211_BAND_5GHZ) {
733 ctx->staging.flags &=
734 ~(RXON_FLG_BAND_24G_MSK | RXON_FLG_AUTO_DETECT_MSK
735 | RXON_FLG_CCK_MSK);
736 ctx->staging.flags |= RXON_FLG_SHORT_SLOT_MSK;
737 } else {
738 /* Copied from iwl_post_associate() */
739 if (vif && vif->bss_conf.use_short_slot)
740 ctx->staging.flags |= RXON_FLG_SHORT_SLOT_MSK;
741 else
742 ctx->staging.flags &= ~RXON_FLG_SHORT_SLOT_MSK;
743
744 ctx->staging.flags |= RXON_FLG_BAND_24G_MSK;
745 ctx->staging.flags |= RXON_FLG_AUTO_DETECT_MSK;
746 ctx->staging.flags &= ~RXON_FLG_CCK_MSK;
747 }
748}
749EXPORT_SYMBOL(iwl_legacy_set_flags_for_band);
750
751/*
752 * initialize rxon structure with default values from eeprom
753 */
754void iwl_legacy_connection_init_rx_config(struct iwl_priv *priv,
755 struct iwl_rxon_context *ctx)
756{
757 const struct iwl_channel_info *ch_info;
758
759 memset(&ctx->staging, 0, sizeof(ctx->staging));
760
761 if (!ctx->vif) {
762 ctx->staging.dev_type = ctx->unused_devtype;
763 } else
764 switch (ctx->vif->type) {
765
766 case NL80211_IFTYPE_STATION:
767 ctx->staging.dev_type = ctx->station_devtype;
768 ctx->staging.filter_flags = RXON_FILTER_ACCEPT_GRP_MSK;
769 break;
770
771 case NL80211_IFTYPE_ADHOC:
772 ctx->staging.dev_type = ctx->ibss_devtype;
773 ctx->staging.flags = RXON_FLG_SHORT_PREAMBLE_MSK;
774 ctx->staging.filter_flags = RXON_FILTER_BCON_AWARE_MSK |
775 RXON_FILTER_ACCEPT_GRP_MSK;
776 break;
777
778 default:
779 IWL_ERR(priv, "Unsupported interface type %d\n",
780 ctx->vif->type);
781 break;
782 }
783
784#if 0
785 /* TODO: Figure out when short_preamble would be set and cache from
786 * that */
787 if (!hw_to_local(priv->hw)->short_preamble)
788 ctx->staging.flags &= ~RXON_FLG_SHORT_PREAMBLE_MSK;
789 else
790 ctx->staging.flags |= RXON_FLG_SHORT_PREAMBLE_MSK;
791#endif
792
793 ch_info = iwl_legacy_get_channel_info(priv, priv->band,
794 le16_to_cpu(ctx->active.channel));
795
796 if (!ch_info)
797 ch_info = &priv->channel_info[0];
798
799 ctx->staging.channel = cpu_to_le16(ch_info->channel);
800 priv->band = ch_info->band;
801
802 iwl_legacy_set_flags_for_band(priv, ctx, priv->band, ctx->vif);
803
804 ctx->staging.ofdm_basic_rates =
805 (IWL_OFDM_RATES_MASK >> IWL_FIRST_OFDM_RATE) & 0xFF;
806 ctx->staging.cck_basic_rates =
807 (IWL_CCK_RATES_MASK >> IWL_FIRST_CCK_RATE) & 0xF;
808
809 /* clear both MIX and PURE40 mode flag */
810 ctx->staging.flags &= ~(RXON_FLG_CHANNEL_MODE_MIXED |
811 RXON_FLG_CHANNEL_MODE_PURE_40);
812 if (ctx->vif)
813 memcpy(ctx->staging.node_addr, ctx->vif->addr, ETH_ALEN);
814
815 ctx->staging.ofdm_ht_single_stream_basic_rates = 0xff;
816 ctx->staging.ofdm_ht_dual_stream_basic_rates = 0xff;
817}
818EXPORT_SYMBOL(iwl_legacy_connection_init_rx_config);
819
820void iwl_legacy_set_rate(struct iwl_priv *priv)
821{
822 const struct ieee80211_supported_band *hw = NULL;
823 struct ieee80211_rate *rate;
824 struct iwl_rxon_context *ctx;
825 int i;
826
827 hw = iwl_get_hw_mode(priv, priv->band);
828 if (!hw) {
829 IWL_ERR(priv, "Failed to set rate: unable to get hw mode\n");
830 return;
831 }
832
833 priv->active_rate = 0;
834
835 for (i = 0; i < hw->n_bitrates; i++) {
836 rate = &(hw->bitrates[i]);
837 if (rate->hw_value < IWL_RATE_COUNT_LEGACY)
838 priv->active_rate |= (1 << rate->hw_value);
839 }
840
841 IWL_DEBUG_RATE(priv, "Set active_rate = %0x\n", priv->active_rate);
842
843 for_each_context(priv, ctx) {
844 ctx->staging.cck_basic_rates =
845 (IWL_CCK_BASIC_RATES_MASK >> IWL_FIRST_CCK_RATE) & 0xF;
846
847 ctx->staging.ofdm_basic_rates =
848 (IWL_OFDM_BASIC_RATES_MASK >> IWL_FIRST_OFDM_RATE) & 0xFF;
849 }
850}
851EXPORT_SYMBOL(iwl_legacy_set_rate);
852
853void iwl_legacy_chswitch_done(struct iwl_priv *priv, bool is_success)
854{
855 struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
856
857 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
858 return;
859
860 if (priv->switch_rxon.switch_in_progress) {
861 ieee80211_chswitch_done(ctx->vif, is_success);
862 mutex_lock(&priv->mutex);
863 priv->switch_rxon.switch_in_progress = false;
864 mutex_unlock(&priv->mutex);
865 }
866}
867EXPORT_SYMBOL(iwl_legacy_chswitch_done);
868
869void iwl_legacy_rx_csa(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb)
870{
871 struct iwl_rx_packet *pkt = rxb_addr(rxb);
872 struct iwl_csa_notification *csa = &(pkt->u.csa_notif);
873
874 struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
875 struct iwl_legacy_rxon_cmd *rxon = (void *)&ctx->active;
876
877 if (priv->switch_rxon.switch_in_progress) {
878 if (!le32_to_cpu(csa->status) &&
879 (csa->channel == priv->switch_rxon.channel)) {
880 rxon->channel = csa->channel;
881 ctx->staging.channel = csa->channel;
882 IWL_DEBUG_11H(priv, "CSA notif: channel %d\n",
883 le16_to_cpu(csa->channel));
884 iwl_legacy_chswitch_done(priv, true);
885 } else {
886 IWL_ERR(priv, "CSA notif (fail) : channel %d\n",
887 le16_to_cpu(csa->channel));
888 iwl_legacy_chswitch_done(priv, false);
889 }
890 }
891}
892EXPORT_SYMBOL(iwl_legacy_rx_csa);
893
894#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
895void iwl_legacy_print_rx_config_cmd(struct iwl_priv *priv,
896 struct iwl_rxon_context *ctx)
897{
898 struct iwl_legacy_rxon_cmd *rxon = &ctx->staging;
899
900 IWL_DEBUG_RADIO(priv, "RX CONFIG:\n");
901 iwl_print_hex_dump(priv, IWL_DL_RADIO, (u8 *) rxon, sizeof(*rxon));
902 IWL_DEBUG_RADIO(priv, "u16 channel: 0x%x\n",
903 le16_to_cpu(rxon->channel));
904 IWL_DEBUG_RADIO(priv, "u32 flags: 0x%08X\n", le32_to_cpu(rxon->flags));
905 IWL_DEBUG_RADIO(priv, "u32 filter_flags: 0x%08x\n",
906 le32_to_cpu(rxon->filter_flags));
907 IWL_DEBUG_RADIO(priv, "u8 dev_type: 0x%x\n", rxon->dev_type);
908 IWL_DEBUG_RADIO(priv, "u8 ofdm_basic_rates: 0x%02x\n",
909 rxon->ofdm_basic_rates);
910 IWL_DEBUG_RADIO(priv, "u8 cck_basic_rates: 0x%02x\n",
911 rxon->cck_basic_rates);
912 IWL_DEBUG_RADIO(priv, "u8[6] node_addr: %pM\n", rxon->node_addr);
913 IWL_DEBUG_RADIO(priv, "u8[6] bssid_addr: %pM\n", rxon->bssid_addr);
914 IWL_DEBUG_RADIO(priv, "u16 assoc_id: 0x%x\n",
915 le16_to_cpu(rxon->assoc_id));
916}
917EXPORT_SYMBOL(iwl_legacy_print_rx_config_cmd);
918#endif
919/**
920 * iwl_legacy_irq_handle_error - called for HW or SW error interrupt from card
921 */
922void iwl_legacy_irq_handle_error(struct iwl_priv *priv)
923{
924 /* Set the FW error flag -- cleared on iwl_down */
925 set_bit(STATUS_FW_ERROR, &priv->status);
926
927 /* Cancel currently queued command. */
928 clear_bit(STATUS_HCMD_ACTIVE, &priv->status);
929
930 IWL_ERR(priv, "Loaded firmware version: %s\n",
931 priv->hw->wiphy->fw_version);
932
933 priv->cfg->ops->lib->dump_nic_error_log(priv);
934 if (priv->cfg->ops->lib->dump_fh)
935 priv->cfg->ops->lib->dump_fh(priv, NULL, false);
936 priv->cfg->ops->lib->dump_nic_event_log(priv, false, NULL, false);
937#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
938 if (iwl_legacy_get_debug_level(priv) & IWL_DL_FW_ERRORS)
939 iwl_legacy_print_rx_config_cmd(priv,
940 &priv->contexts[IWL_RXON_CTX_BSS]);
941#endif
942
943 wake_up_interruptible(&priv->wait_command_queue);
944
945 /* Keep the restart process from trying to send host
946 * commands by clearing the INIT status bit */
947 clear_bit(STATUS_READY, &priv->status);
948
949 if (!test_bit(STATUS_EXIT_PENDING, &priv->status)) {
950 IWL_DEBUG(priv, IWL_DL_FW_ERRORS,
951 "Restarting adapter due to uCode error.\n");
952
953 if (priv->cfg->mod_params->restart_fw)
954 queue_work(priv->workqueue, &priv->restart);
955 }
956}
957EXPORT_SYMBOL(iwl_legacy_irq_handle_error);
958
959static int iwl_legacy_apm_stop_master(struct iwl_priv *priv)
960{
961 int ret = 0;
962
963 /* stop device's busmaster DMA activity */
964 iwl_legacy_set_bit(priv, CSR_RESET, CSR_RESET_REG_FLAG_STOP_MASTER);
965
966 ret = iwl_poll_bit(priv, CSR_RESET, CSR_RESET_REG_FLAG_MASTER_DISABLED,
967 CSR_RESET_REG_FLAG_MASTER_DISABLED, 100);
968 if (ret)
969 IWL_WARN(priv, "Master Disable Timed Out, 100 usec\n");
970
971 IWL_DEBUG_INFO(priv, "stop master\n");
972
973 return ret;
974}
975
976void iwl_legacy_apm_stop(struct iwl_priv *priv)
977{
978 IWL_DEBUG_INFO(priv, "Stop card, put in low power state\n");
979
980 /* Stop device's DMA activity */
981 iwl_legacy_apm_stop_master(priv);
982
983 /* Reset the entire device */
984 iwl_legacy_set_bit(priv, CSR_RESET, CSR_RESET_REG_FLAG_SW_RESET);
985
986 udelay(10);
987
988 /*
989 * Clear "initialization complete" bit to move adapter from
990 * D0A* (powered-up Active) --> D0U* (Uninitialized) state.
991 */
992 iwl_legacy_clear_bit(priv, CSR_GP_CNTRL,
993 CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
994}
995EXPORT_SYMBOL(iwl_legacy_apm_stop);
996
997
998/*
999 * Start up NIC's basic functionality after it has been reset
1000 * (e.g. after platform boot, or shutdown via iwl_legacy_apm_stop())
1001 * NOTE: This does not load uCode nor start the embedded processor
1002 */
1003int iwl_legacy_apm_init(struct iwl_priv *priv)
1004{
1005 int ret = 0;
1006 u16 lctl;
1007
1008 IWL_DEBUG_INFO(priv, "Init card's basic functions\n");
1009
1010 /*
1011 * Use "set_bit" below rather than "write", to preserve any hardware
1012 * bits already set by default after reset.
1013 */
1014
1015 /* Disable L0S exit timer (platform NMI Work/Around) */
1016 iwl_legacy_set_bit(priv, CSR_GIO_CHICKEN_BITS,
1017 CSR_GIO_CHICKEN_BITS_REG_BIT_DIS_L0S_EXIT_TIMER);
1018
1019 /*
1020 * Disable L0s without affecting L1;
1021 * don't wait for ICH L0s (ICH bug W/A)
1022 */
1023 iwl_legacy_set_bit(priv, CSR_GIO_CHICKEN_BITS,
1024 CSR_GIO_CHICKEN_BITS_REG_BIT_L1A_NO_L0S_RX);
1025
1026 /* Set FH wait threshold to maximum (HW error during stress W/A) */
1027 iwl_legacy_set_bit(priv, CSR_DBG_HPET_MEM_REG,
1028 CSR_DBG_HPET_MEM_REG_VAL);
1029
1030 /*
1031 * Enable HAP INTA (interrupt from management bus) to
1032 * wake device's PCI Express link L1a -> L0s
1033 * NOTE: This is no-op for 3945 (non-existant bit)
1034 */
1035 iwl_legacy_set_bit(priv, CSR_HW_IF_CONFIG_REG,
1036 CSR_HW_IF_CONFIG_REG_BIT_HAP_WAKE_L1A);
1037
1038 /*
1039 * HW bug W/A for instability in PCIe bus L0->L0S->L1 transition.
1040 * Check if BIOS (or OS) enabled L1-ASPM on this device.
1041 * If so (likely), disable L0S, so device moves directly L0->L1;
1042 * costs negligible amount of power savings.
1043 * If not (unlikely), enable L0S, so there is at least some
1044 * power savings, even without L1.
1045 */
1046 if (priv->cfg->base_params->set_l0s) {
1047 lctl = iwl_legacy_pcie_link_ctl(priv);
1048 if ((lctl & PCI_CFG_LINK_CTRL_VAL_L1_EN) ==
1049 PCI_CFG_LINK_CTRL_VAL_L1_EN) {
1050 /* L1-ASPM enabled; disable(!) L0S */
1051 iwl_legacy_set_bit(priv, CSR_GIO_REG,
1052 CSR_GIO_REG_VAL_L0S_ENABLED);
1053 IWL_DEBUG_POWER(priv, "L1 Enabled; Disabling L0S\n");
1054 } else {
1055 /* L1-ASPM disabled; enable(!) L0S */
1056 iwl_legacy_clear_bit(priv, CSR_GIO_REG,
1057 CSR_GIO_REG_VAL_L0S_ENABLED);
1058 IWL_DEBUG_POWER(priv, "L1 Disabled; Enabling L0S\n");
1059 }
1060 }
1061
1062 /* Configure analog phase-lock-loop before activating to D0A */
1063 if (priv->cfg->base_params->pll_cfg_val)
1064 iwl_legacy_set_bit(priv, CSR_ANA_PLL_CFG,
1065 priv->cfg->base_params->pll_cfg_val);
1066
1067 /*
1068 * Set "initialization complete" bit to move adapter from
1069 * D0U* --> D0A* (powered-up active) state.
1070 */
1071 iwl_legacy_set_bit(priv, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
1072
1073 /*
1074 * Wait for clock stabilization; once stabilized, access to
1075 * device-internal resources is supported, e.g. iwl_legacy_write_prph()
1076 * and accesses to uCode SRAM.
1077 */
1078 ret = iwl_poll_bit(priv, CSR_GP_CNTRL,
1079 CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
1080 CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY, 25000);
1081 if (ret < 0) {
1082 IWL_DEBUG_INFO(priv, "Failed to init the card\n");
1083 goto out;
1084 }
1085
1086 /*
1087 * Enable DMA and BSM (if used) clocks, wait for them to stabilize.
1088 * BSM (Boostrap State Machine) is only in 3945 and 4965.
1089 *
1090 * Write to "CLK_EN_REG"; "1" bits enable clocks, while "0" bits
1091 * do not disable clocks. This preserves any hardware bits already
1092 * set by default in "CLK_CTRL_REG" after reset.
1093 */
1094 if (priv->cfg->base_params->use_bsm)
1095 iwl_legacy_write_prph(priv, APMG_CLK_EN_REG,
1096 APMG_CLK_VAL_DMA_CLK_RQT | APMG_CLK_VAL_BSM_CLK_RQT);
1097 else
1098 iwl_legacy_write_prph(priv, APMG_CLK_EN_REG,
1099 APMG_CLK_VAL_DMA_CLK_RQT);
1100 udelay(20);
1101
1102 /* Disable L1-Active */
1103 iwl_legacy_set_bits_prph(priv, APMG_PCIDEV_STT_REG,
1104 APMG_PCIDEV_STT_VAL_L1_ACT_DIS);
1105
1106out:
1107 return ret;
1108}
1109EXPORT_SYMBOL(iwl_legacy_apm_init);
1110
1111
1112int iwl_legacy_set_tx_power(struct iwl_priv *priv, s8 tx_power, bool force)
1113{
1114 int ret;
1115 s8 prev_tx_power;
1116 bool defer;
1117 struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
1118
1119 lockdep_assert_held(&priv->mutex);
1120
1121 if (priv->tx_power_user_lmt == tx_power && !force)
1122 return 0;
1123
1124 if (!priv->cfg->ops->lib->send_tx_power)
1125 return -EOPNOTSUPP;
1126
1127 if (tx_power < IWL4965_TX_POWER_TARGET_POWER_MIN) {
1128 IWL_WARN(priv,
1129 "Requested user TXPOWER %d below lower limit %d.\n",
1130 tx_power,
1131 IWL4965_TX_POWER_TARGET_POWER_MIN);
1132 return -EINVAL;
1133 }
1134
1135 if (tx_power > priv->tx_power_device_lmt) {
1136 IWL_WARN(priv,
1137 "Requested user TXPOWER %d above upper limit %d.\n",
1138 tx_power, priv->tx_power_device_lmt);
1139 return -EINVAL;
1140 }
1141
1142 if (!iwl_legacy_is_ready_rf(priv))
1143 return -EIO;
1144
1145 /* scan complete and commit_rxon use tx_power_next value,
1146 * it always need to be updated for newest request */
1147 priv->tx_power_next = tx_power;
1148
1149 /* do not set tx power when scanning or channel changing */
1150 defer = test_bit(STATUS_SCANNING, &priv->status) ||
1151 memcmp(&ctx->active, &ctx->staging, sizeof(ctx->staging));
1152 if (defer && !force) {
1153 IWL_DEBUG_INFO(priv, "Deferring tx power set\n");
1154 return 0;
1155 }
1156
1157 prev_tx_power = priv->tx_power_user_lmt;
1158 priv->tx_power_user_lmt = tx_power;
1159
1160 ret = priv->cfg->ops->lib->send_tx_power(priv);
1161
1162 /* if fail to set tx_power, restore the orig. tx power */
1163 if (ret) {
1164 priv->tx_power_user_lmt = prev_tx_power;
1165 priv->tx_power_next = prev_tx_power;
1166 }
1167 return ret;
1168}
1169EXPORT_SYMBOL(iwl_legacy_set_tx_power);
1170
1171void iwl_legacy_send_bt_config(struct iwl_priv *priv)
1172{
1173 struct iwl_bt_cmd bt_cmd = {
1174 .lead_time = BT_LEAD_TIME_DEF,
1175 .max_kill = BT_MAX_KILL_DEF,
1176 .kill_ack_mask = 0,
1177 .kill_cts_mask = 0,
1178 };
1179
1180 if (!bt_coex_active)
1181 bt_cmd.flags = BT_COEX_DISABLE;
1182 else
1183 bt_cmd.flags = BT_COEX_ENABLE;
1184
1185 IWL_DEBUG_INFO(priv, "BT coex %s\n",
1186 (bt_cmd.flags == BT_COEX_DISABLE) ? "disable" : "active");
1187
1188 if (iwl_legacy_send_cmd_pdu(priv, REPLY_BT_CONFIG,
1189 sizeof(struct iwl_bt_cmd), &bt_cmd))
1190 IWL_ERR(priv, "failed to send BT Coex Config\n");
1191}
1192EXPORT_SYMBOL(iwl_legacy_send_bt_config);
1193
1194int iwl_legacy_send_statistics_request(struct iwl_priv *priv, u8 flags, bool clear)
1195{
1196 struct iwl_statistics_cmd statistics_cmd = {
1197 .configuration_flags =
1198 clear ? IWL_STATS_CONF_CLEAR_STATS : 0,
1199 };
1200
1201 if (flags & CMD_ASYNC)
1202 return iwl_legacy_send_cmd_pdu_async(priv, REPLY_STATISTICS_CMD,
1203 sizeof(struct iwl_statistics_cmd),
1204 &statistics_cmd, NULL);
1205 else
1206 return iwl_legacy_send_cmd_pdu(priv, REPLY_STATISTICS_CMD,
1207 sizeof(struct iwl_statistics_cmd),
1208 &statistics_cmd);
1209}
1210EXPORT_SYMBOL(iwl_legacy_send_statistics_request);
1211
1212void iwl_legacy_rx_pm_sleep_notif(struct iwl_priv *priv,
1213 struct iwl_rx_mem_buffer *rxb)
1214{
1215#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
1216 struct iwl_rx_packet *pkt = rxb_addr(rxb);
1217 struct iwl_sleep_notification *sleep = &(pkt->u.sleep_notif);
1218 IWL_DEBUG_RX(priv, "sleep mode: %d, src: %d\n",
1219 sleep->pm_sleep_mode, sleep->pm_wakeup_src);
1220#endif
1221}
1222EXPORT_SYMBOL(iwl_legacy_rx_pm_sleep_notif);
1223
1224void iwl_legacy_rx_pm_debug_statistics_notif(struct iwl_priv *priv,
1225 struct iwl_rx_mem_buffer *rxb)
1226{
1227 struct iwl_rx_packet *pkt = rxb_addr(rxb);
1228 u32 len = le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK;
1229 IWL_DEBUG_RADIO(priv, "Dumping %d bytes of unhandled "
1230 "notification for %s:\n", len,
1231 iwl_legacy_get_cmd_string(pkt->hdr.cmd));
1232 iwl_print_hex_dump(priv, IWL_DL_RADIO, pkt->u.raw, len);
1233}
1234EXPORT_SYMBOL(iwl_legacy_rx_pm_debug_statistics_notif);
1235
1236void iwl_legacy_rx_reply_error(struct iwl_priv *priv,
1237 struct iwl_rx_mem_buffer *rxb)
1238{
1239 struct iwl_rx_packet *pkt = rxb_addr(rxb);
1240
1241 IWL_ERR(priv, "Error Reply type 0x%08X cmd %s (0x%02X) "
1242 "seq 0x%04X ser 0x%08X\n",
1243 le32_to_cpu(pkt->u.err_resp.error_type),
1244 iwl_legacy_get_cmd_string(pkt->u.err_resp.cmd_id),
1245 pkt->u.err_resp.cmd_id,
1246 le16_to_cpu(pkt->u.err_resp.bad_cmd_seq_num),
1247 le32_to_cpu(pkt->u.err_resp.error_info));
1248}
1249EXPORT_SYMBOL(iwl_legacy_rx_reply_error);
1250
1251void iwl_legacy_clear_isr_stats(struct iwl_priv *priv)
1252{
1253 memset(&priv->isr_stats, 0, sizeof(priv->isr_stats));
1254}
1255
1256int iwl_legacy_mac_conf_tx(struct ieee80211_hw *hw, u16 queue,
1257 const struct ieee80211_tx_queue_params *params)
1258{
1259 struct iwl_priv *priv = hw->priv;
1260 struct iwl_rxon_context *ctx;
1261 unsigned long flags;
1262 int q;
1263
1264 IWL_DEBUG_MAC80211(priv, "enter\n");
1265
1266 if (!iwl_legacy_is_ready_rf(priv)) {
1267 IWL_DEBUG_MAC80211(priv, "leave - RF not ready\n");
1268 return -EIO;
1269 }
1270
1271 if (queue >= AC_NUM) {
1272 IWL_DEBUG_MAC80211(priv, "leave - queue >= AC_NUM %d\n", queue);
1273 return 0;
1274 }
1275
1276 q = AC_NUM - 1 - queue;
1277
1278 spin_lock_irqsave(&priv->lock, flags);
1279
1280 for_each_context(priv, ctx) {
1281 ctx->qos_data.def_qos_parm.ac[q].cw_min =
1282 cpu_to_le16(params->cw_min);
1283 ctx->qos_data.def_qos_parm.ac[q].cw_max =
1284 cpu_to_le16(params->cw_max);
1285 ctx->qos_data.def_qos_parm.ac[q].aifsn = params->aifs;
1286 ctx->qos_data.def_qos_parm.ac[q].edca_txop =
1287 cpu_to_le16((params->txop * 32));
1288
1289 ctx->qos_data.def_qos_parm.ac[q].reserved1 = 0;
1290 }
1291
1292 spin_unlock_irqrestore(&priv->lock, flags);
1293
1294 IWL_DEBUG_MAC80211(priv, "leave\n");
1295 return 0;
1296}
1297EXPORT_SYMBOL(iwl_legacy_mac_conf_tx);
1298
1299int iwl_legacy_mac_tx_last_beacon(struct ieee80211_hw *hw)
1300{
1301 struct iwl_priv *priv = hw->priv;
1302
1303 return priv->ibss_manager == IWL_IBSS_MANAGER;
1304}
1305EXPORT_SYMBOL_GPL(iwl_legacy_mac_tx_last_beacon);
1306
1307static int
1308iwl_legacy_set_mode(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
1309{
1310 iwl_legacy_connection_init_rx_config(priv, ctx);
1311
1312 if (priv->cfg->ops->hcmd->set_rxon_chain)
1313 priv->cfg->ops->hcmd->set_rxon_chain(priv, ctx);
1314
1315 return iwl_legacy_commit_rxon(priv, ctx);
1316}
1317
1318static int iwl_legacy_setup_interface(struct iwl_priv *priv,
1319 struct iwl_rxon_context *ctx)
1320{
1321 struct ieee80211_vif *vif = ctx->vif;
1322 int err;
1323
1324 lockdep_assert_held(&priv->mutex);
1325
1326 /*
1327 * This variable will be correct only when there's just
1328 * a single context, but all code using it is for hardware
1329 * that supports only one context.
1330 */
1331 priv->iw_mode = vif->type;
1332
1333 ctx->is_active = true;
1334
1335 err = iwl_legacy_set_mode(priv, ctx);
1336 if (err) {
1337 if (!ctx->always_active)
1338 ctx->is_active = false;
1339 return err;
1340 }
1341
1342 return 0;
1343}
1344
1345int
1346iwl_legacy_mac_add_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
1347{
1348 struct iwl_priv *priv = hw->priv;
1349 struct iwl_vif_priv *vif_priv = (void *)vif->drv_priv;
1350 struct iwl_rxon_context *tmp, *ctx = NULL;
1351 int err;
1352
1353 IWL_DEBUG_MAC80211(priv, "enter: type %d, addr %pM\n",
1354 vif->type, vif->addr);
1355
1356 mutex_lock(&priv->mutex);
1357
1358 if (!iwl_legacy_is_ready_rf(priv)) {
1359 IWL_WARN(priv, "Try to add interface when device not ready\n");
1360 err = -EINVAL;
1361 goto out;
1362 }
1363
1364 for_each_context(priv, tmp) {
1365 u32 possible_modes =
1366 tmp->interface_modes | tmp->exclusive_interface_modes;
1367
1368 if (tmp->vif) {
1369 /* check if this busy context is exclusive */
1370 if (tmp->exclusive_interface_modes &
1371 BIT(tmp->vif->type)) {
1372 err = -EINVAL;
1373 goto out;
1374 }
1375 continue;
1376 }
1377
1378 if (!(possible_modes & BIT(vif->type)))
1379 continue;
1380
1381 /* have maybe usable context w/o interface */
1382 ctx = tmp;
1383 break;
1384 }
1385
1386 if (!ctx) {
1387 err = -EOPNOTSUPP;
1388 goto out;
1389 }
1390
1391 vif_priv->ctx = ctx;
1392 ctx->vif = vif;
1393
1394 err = iwl_legacy_setup_interface(priv, ctx);
1395 if (!err)
1396 goto out;
1397
1398 ctx->vif = NULL;
1399 priv->iw_mode = NL80211_IFTYPE_STATION;
1400 out:
1401 mutex_unlock(&priv->mutex);
1402
1403 IWL_DEBUG_MAC80211(priv, "leave\n");
1404 return err;
1405}
1406EXPORT_SYMBOL(iwl_legacy_mac_add_interface);
1407
1408static void iwl_legacy_teardown_interface(struct iwl_priv *priv,
1409 struct ieee80211_vif *vif,
1410 bool mode_change)
1411{
1412 struct iwl_rxon_context *ctx = iwl_legacy_rxon_ctx_from_vif(vif);
1413
1414 lockdep_assert_held(&priv->mutex);
1415
1416 if (priv->scan_vif == vif) {
1417 iwl_legacy_scan_cancel_timeout(priv, 200);
1418 iwl_legacy_force_scan_end(priv);
1419 }
1420
1421 if (!mode_change) {
1422 iwl_legacy_set_mode(priv, ctx);
1423 if (!ctx->always_active)
1424 ctx->is_active = false;
1425 }
1426}
1427
1428void iwl_legacy_mac_remove_interface(struct ieee80211_hw *hw,
1429 struct ieee80211_vif *vif)
1430{
1431 struct iwl_priv *priv = hw->priv;
1432 struct iwl_rxon_context *ctx = iwl_legacy_rxon_ctx_from_vif(vif);
1433
1434 IWL_DEBUG_MAC80211(priv, "enter\n");
1435
1436 mutex_lock(&priv->mutex);
1437
1438 WARN_ON(ctx->vif != vif);
1439 ctx->vif = NULL;
1440
1441 iwl_legacy_teardown_interface(priv, vif, false);
1442
1443 memset(priv->bssid, 0, ETH_ALEN);
1444 mutex_unlock(&priv->mutex);
1445
1446 IWL_DEBUG_MAC80211(priv, "leave\n");
1447
1448}
1449EXPORT_SYMBOL(iwl_legacy_mac_remove_interface);
1450
1451int iwl_legacy_alloc_txq_mem(struct iwl_priv *priv)
1452{
1453 if (!priv->txq)
1454 priv->txq = kzalloc(
1455 sizeof(struct iwl_tx_queue) *
1456 priv->cfg->base_params->num_of_queues,
1457 GFP_KERNEL);
1458 if (!priv->txq) {
1459 IWL_ERR(priv, "Not enough memory for txq\n");
1460 return -ENOMEM;
1461 }
1462 return 0;
1463}
1464EXPORT_SYMBOL(iwl_legacy_alloc_txq_mem);
1465
1466void iwl_legacy_txq_mem(struct iwl_priv *priv)
1467{
1468 kfree(priv->txq);
1469 priv->txq = NULL;
1470}
1471EXPORT_SYMBOL(iwl_legacy_txq_mem);
1472
1473#ifdef CONFIG_IWLWIFI_LEGACY_DEBUGFS
1474
1475#define IWL_TRAFFIC_DUMP_SIZE (IWL_TRAFFIC_ENTRY_SIZE * IWL_TRAFFIC_ENTRIES)
1476
1477void iwl_legacy_reset_traffic_log(struct iwl_priv *priv)
1478{
1479 priv->tx_traffic_idx = 0;
1480 priv->rx_traffic_idx = 0;
1481 if (priv->tx_traffic)
1482 memset(priv->tx_traffic, 0, IWL_TRAFFIC_DUMP_SIZE);
1483 if (priv->rx_traffic)
1484 memset(priv->rx_traffic, 0, IWL_TRAFFIC_DUMP_SIZE);
1485}
1486
1487int iwl_legacy_alloc_traffic_mem(struct iwl_priv *priv)
1488{
1489 u32 traffic_size = IWL_TRAFFIC_DUMP_SIZE;
1490
1491 if (iwlegacy_debug_level & IWL_DL_TX) {
1492 if (!priv->tx_traffic) {
1493 priv->tx_traffic =
1494 kzalloc(traffic_size, GFP_KERNEL);
1495 if (!priv->tx_traffic)
1496 return -ENOMEM;
1497 }
1498 }
1499 if (iwlegacy_debug_level & IWL_DL_RX) {
1500 if (!priv->rx_traffic) {
1501 priv->rx_traffic =
1502 kzalloc(traffic_size, GFP_KERNEL);
1503 if (!priv->rx_traffic)
1504 return -ENOMEM;
1505 }
1506 }
1507 iwl_legacy_reset_traffic_log(priv);
1508 return 0;
1509}
1510EXPORT_SYMBOL(iwl_legacy_alloc_traffic_mem);
1511
1512void iwl_legacy_free_traffic_mem(struct iwl_priv *priv)
1513{
1514 kfree(priv->tx_traffic);
1515 priv->tx_traffic = NULL;
1516
1517 kfree(priv->rx_traffic);
1518 priv->rx_traffic = NULL;
1519}
1520EXPORT_SYMBOL(iwl_legacy_free_traffic_mem);
1521
1522void iwl_legacy_dbg_log_tx_data_frame(struct iwl_priv *priv,
1523 u16 length, struct ieee80211_hdr *header)
1524{
1525 __le16 fc;
1526 u16 len;
1527
1528 if (likely(!(iwlegacy_debug_level & IWL_DL_TX)))
1529 return;
1530
1531 if (!priv->tx_traffic)
1532 return;
1533
1534 fc = header->frame_control;
1535 if (ieee80211_is_data(fc)) {
1536 len = (length > IWL_TRAFFIC_ENTRY_SIZE)
1537 ? IWL_TRAFFIC_ENTRY_SIZE : length;
1538 memcpy((priv->tx_traffic +
1539 (priv->tx_traffic_idx * IWL_TRAFFIC_ENTRY_SIZE)),
1540 header, len);
1541 priv->tx_traffic_idx =
1542 (priv->tx_traffic_idx + 1) % IWL_TRAFFIC_ENTRIES;
1543 }
1544}
1545EXPORT_SYMBOL(iwl_legacy_dbg_log_tx_data_frame);
1546
1547void iwl_legacy_dbg_log_rx_data_frame(struct iwl_priv *priv,
1548 u16 length, struct ieee80211_hdr *header)
1549{
1550 __le16 fc;
1551 u16 len;
1552
1553 if (likely(!(iwlegacy_debug_level & IWL_DL_RX)))
1554 return;
1555
1556 if (!priv->rx_traffic)
1557 return;
1558
1559 fc = header->frame_control;
1560 if (ieee80211_is_data(fc)) {
1561 len = (length > IWL_TRAFFIC_ENTRY_SIZE)
1562 ? IWL_TRAFFIC_ENTRY_SIZE : length;
1563 memcpy((priv->rx_traffic +
1564 (priv->rx_traffic_idx * IWL_TRAFFIC_ENTRY_SIZE)),
1565 header, len);
1566 priv->rx_traffic_idx =
1567 (priv->rx_traffic_idx + 1) % IWL_TRAFFIC_ENTRIES;
1568 }
1569}
1570EXPORT_SYMBOL(iwl_legacy_dbg_log_rx_data_frame);
1571
1572const char *iwl_legacy_get_mgmt_string(int cmd)
1573{
1574 switch (cmd) {
1575 IWL_CMD(MANAGEMENT_ASSOC_REQ);
1576 IWL_CMD(MANAGEMENT_ASSOC_RESP);
1577 IWL_CMD(MANAGEMENT_REASSOC_REQ);
1578 IWL_CMD(MANAGEMENT_REASSOC_RESP);
1579 IWL_CMD(MANAGEMENT_PROBE_REQ);
1580 IWL_CMD(MANAGEMENT_PROBE_RESP);
1581 IWL_CMD(MANAGEMENT_BEACON);
1582 IWL_CMD(MANAGEMENT_ATIM);
1583 IWL_CMD(MANAGEMENT_DISASSOC);
1584 IWL_CMD(MANAGEMENT_AUTH);
1585 IWL_CMD(MANAGEMENT_DEAUTH);
1586 IWL_CMD(MANAGEMENT_ACTION);
1587 default:
1588 return "UNKNOWN";
1589
1590 }
1591}
1592
1593const char *iwl_legacy_get_ctrl_string(int cmd)
1594{
1595 switch (cmd) {
1596 IWL_CMD(CONTROL_BACK_REQ);
1597 IWL_CMD(CONTROL_BACK);
1598 IWL_CMD(CONTROL_PSPOLL);
1599 IWL_CMD(CONTROL_RTS);
1600 IWL_CMD(CONTROL_CTS);
1601 IWL_CMD(CONTROL_ACK);
1602 IWL_CMD(CONTROL_CFEND);
1603 IWL_CMD(CONTROL_CFENDACK);
1604 default:
1605 return "UNKNOWN";
1606
1607 }
1608}
1609
1610void iwl_legacy_clear_traffic_stats(struct iwl_priv *priv)
1611{
1612 memset(&priv->tx_stats, 0, sizeof(struct traffic_stats));
1613 memset(&priv->rx_stats, 0, sizeof(struct traffic_stats));
1614}
1615
1616/*
1617 * if CONFIG_IWLWIFI_LEGACY_DEBUGFS defined,
1618 * iwl_legacy_update_stats function will
1619 * record all the MGMT, CTRL and DATA pkt for both TX and Rx pass
1620 * Use debugFs to display the rx/rx_statistics
1621 * if CONFIG_IWLWIFI_LEGACY_DEBUGFS not being defined, then no MGMT and CTRL
1622 * information will be recorded, but DATA pkt still will be recorded
1623 * for the reason of iwl_led.c need to control the led blinking based on
1624 * number of tx and rx data.
1625 *
1626 */
1627void
1628iwl_legacy_update_stats(struct iwl_priv *priv, bool is_tx, __le16 fc, u16 len)
1629{
1630 struct traffic_stats *stats;
1631
1632 if (is_tx)
1633 stats = &priv->tx_stats;
1634 else
1635 stats = &priv->rx_stats;
1636
1637 if (ieee80211_is_mgmt(fc)) {
1638 switch (fc & cpu_to_le16(IEEE80211_FCTL_STYPE)) {
1639 case cpu_to_le16(IEEE80211_STYPE_ASSOC_REQ):
1640 stats->mgmt[MANAGEMENT_ASSOC_REQ]++;
1641 break;
1642 case cpu_to_le16(IEEE80211_STYPE_ASSOC_RESP):
1643 stats->mgmt[MANAGEMENT_ASSOC_RESP]++;
1644 break;
1645 case cpu_to_le16(IEEE80211_STYPE_REASSOC_REQ):
1646 stats->mgmt[MANAGEMENT_REASSOC_REQ]++;
1647 break;
1648 case cpu_to_le16(IEEE80211_STYPE_REASSOC_RESP):
1649 stats->mgmt[MANAGEMENT_REASSOC_RESP]++;
1650 break;
1651 case cpu_to_le16(IEEE80211_STYPE_PROBE_REQ):
1652 stats->mgmt[MANAGEMENT_PROBE_REQ]++;
1653 break;
1654 case cpu_to_le16(IEEE80211_STYPE_PROBE_RESP):
1655 stats->mgmt[MANAGEMENT_PROBE_RESP]++;
1656 break;
1657 case cpu_to_le16(IEEE80211_STYPE_BEACON):
1658 stats->mgmt[MANAGEMENT_BEACON]++;
1659 break;
1660 case cpu_to_le16(IEEE80211_STYPE_ATIM):
1661 stats->mgmt[MANAGEMENT_ATIM]++;
1662 break;
1663 case cpu_to_le16(IEEE80211_STYPE_DISASSOC):
1664 stats->mgmt[MANAGEMENT_DISASSOC]++;
1665 break;
1666 case cpu_to_le16(IEEE80211_STYPE_AUTH):
1667 stats->mgmt[MANAGEMENT_AUTH]++;
1668 break;
1669 case cpu_to_le16(IEEE80211_STYPE_DEAUTH):
1670 stats->mgmt[MANAGEMENT_DEAUTH]++;
1671 break;
1672 case cpu_to_le16(IEEE80211_STYPE_ACTION):
1673 stats->mgmt[MANAGEMENT_ACTION]++;
1674 break;
1675 }
1676 } else if (ieee80211_is_ctl(fc)) {
1677 switch (fc & cpu_to_le16(IEEE80211_FCTL_STYPE)) {
1678 case cpu_to_le16(IEEE80211_STYPE_BACK_REQ):
1679 stats->ctrl[CONTROL_BACK_REQ]++;
1680 break;
1681 case cpu_to_le16(IEEE80211_STYPE_BACK):
1682 stats->ctrl[CONTROL_BACK]++;
1683 break;
1684 case cpu_to_le16(IEEE80211_STYPE_PSPOLL):
1685 stats->ctrl[CONTROL_PSPOLL]++;
1686 break;
1687 case cpu_to_le16(IEEE80211_STYPE_RTS):
1688 stats->ctrl[CONTROL_RTS]++;
1689 break;
1690 case cpu_to_le16(IEEE80211_STYPE_CTS):
1691 stats->ctrl[CONTROL_CTS]++;
1692 break;
1693 case cpu_to_le16(IEEE80211_STYPE_ACK):
1694 stats->ctrl[CONTROL_ACK]++;
1695 break;
1696 case cpu_to_le16(IEEE80211_STYPE_CFEND):
1697 stats->ctrl[CONTROL_CFEND]++;
1698 break;
1699 case cpu_to_le16(IEEE80211_STYPE_CFENDACK):
1700 stats->ctrl[CONTROL_CFENDACK]++;
1701 break;
1702 }
1703 } else {
1704 /* data */
1705 stats->data_cnt++;
1706 stats->data_bytes += len;
1707 }
1708}
1709EXPORT_SYMBOL(iwl_legacy_update_stats);
1710#endif
1711
1712static void _iwl_legacy_force_rf_reset(struct iwl_priv *priv)
1713{
1714 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
1715 return;
1716
1717 if (!iwl_legacy_is_any_associated(priv)) {
1718 IWL_DEBUG_SCAN(priv, "force reset rejected: not associated\n");
1719 return;
1720 }
1721 /*
1722 * There is no easy and better way to force reset the radio,
1723 * the only known method is switching channel which will force to
1724 * reset and tune the radio.
1725 * Use internal short scan (single channel) operation to should
1726 * achieve this objective.
1727 * Driver should reset the radio when number of consecutive missed
1728 * beacon, or any other uCode error condition detected.
1729 */
1730 IWL_DEBUG_INFO(priv, "perform radio reset.\n");
1731 iwl_legacy_internal_short_hw_scan(priv);
1732}
1733
1734
1735int iwl_legacy_force_reset(struct iwl_priv *priv, int mode, bool external)
1736{
1737 struct iwl_force_reset *force_reset;
1738
1739 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
1740 return -EINVAL;
1741
1742 if (mode >= IWL_MAX_FORCE_RESET) {
1743 IWL_DEBUG_INFO(priv, "invalid reset request.\n");
1744 return -EINVAL;
1745 }
1746 force_reset = &priv->force_reset[mode];
1747 force_reset->reset_request_count++;
1748 if (!external) {
1749 if (force_reset->last_force_reset_jiffies &&
1750 time_after(force_reset->last_force_reset_jiffies +
1751 force_reset->reset_duration, jiffies)) {
1752 IWL_DEBUG_INFO(priv, "force reset rejected\n");
1753 force_reset->reset_reject_count++;
1754 return -EAGAIN;
1755 }
1756 }
1757 force_reset->reset_success_count++;
1758 force_reset->last_force_reset_jiffies = jiffies;
1759 IWL_DEBUG_INFO(priv, "perform force reset (%d)\n", mode);
1760 switch (mode) {
1761 case IWL_RF_RESET:
1762 _iwl_legacy_force_rf_reset(priv);
1763 break;
1764 case IWL_FW_RESET:
1765 /*
1766 * if the request is from external(ex: debugfs),
1767 * then always perform the request in regardless the module
1768 * parameter setting
1769 * if the request is from internal (uCode error or driver
1770 * detect failure), then fw_restart module parameter
1771 * need to be check before performing firmware reload
1772 */
1773 if (!external && !priv->cfg->mod_params->restart_fw) {
1774 IWL_DEBUG_INFO(priv, "Cancel firmware reload based on "
1775 "module parameter setting\n");
1776 break;
1777 }
1778 IWL_ERR(priv, "On demand firmware reload\n");
1779 /* Set the FW error flag -- cleared on iwl_down */
1780 set_bit(STATUS_FW_ERROR, &priv->status);
1781 wake_up_interruptible(&priv->wait_command_queue);
1782 /*
1783 * Keep the restart process from trying to send host
1784 * commands by clearing the INIT status bit
1785 */
1786 clear_bit(STATUS_READY, &priv->status);
1787 queue_work(priv->workqueue, &priv->restart);
1788 break;
1789 }
1790 return 0;
1791}
1792
1793int
1794iwl_legacy_mac_change_interface(struct ieee80211_hw *hw,
1795 struct ieee80211_vif *vif,
1796 enum nl80211_iftype newtype, bool newp2p)
1797{
1798 struct iwl_priv *priv = hw->priv;
1799 struct iwl_rxon_context *ctx = iwl_legacy_rxon_ctx_from_vif(vif);
1800 struct iwl_rxon_context *tmp;
1801 u32 interface_modes;
1802 int err;
1803
1804 newtype = ieee80211_iftype_p2p(newtype, newp2p);
1805
1806 mutex_lock(&priv->mutex);
1807
1808 interface_modes = ctx->interface_modes | ctx->exclusive_interface_modes;
1809
1810 if (!(interface_modes & BIT(newtype))) {
1811 err = -EBUSY;
1812 goto out;
1813 }
1814
1815 if (ctx->exclusive_interface_modes & BIT(newtype)) {
1816 for_each_context(priv, tmp) {
1817 if (ctx == tmp)
1818 continue;
1819
1820 if (!tmp->vif)
1821 continue;
1822
1823 /*
1824 * The current mode switch would be exclusive, but
1825 * another context is active ... refuse the switch.
1826 */
1827 err = -EBUSY;
1828 goto out;
1829 }
1830 }
1831
1832 /* success */
1833 iwl_legacy_teardown_interface(priv, vif, true);
1834 vif->type = newtype;
1835 err = iwl_legacy_setup_interface(priv, ctx);
1836 WARN_ON(err);
1837 /*
1838 * We've switched internally, but submitting to the
1839 * device may have failed for some reason. Mask this
1840 * error, because otherwise mac80211 will not switch
1841 * (and set the interface type back) and we'll be
1842 * out of sync with it.
1843 */
1844 err = 0;
1845
1846 out:
1847 mutex_unlock(&priv->mutex);
1848 return err;
1849}
1850EXPORT_SYMBOL(iwl_legacy_mac_change_interface);
1851
1852/*
1853 * On every watchdog tick we check (latest) time stamp. If it does not
1854 * change during timeout period and queue is not empty we reset firmware.
1855 */
1856static int iwl_legacy_check_stuck_queue(struct iwl_priv *priv, int cnt)
1857{
1858 struct iwl_tx_queue *txq = &priv->txq[cnt];
1859 struct iwl_queue *q = &txq->q;
1860 unsigned long timeout;
1861 int ret;
1862
1863 if (q->read_ptr == q->write_ptr) {
1864 txq->time_stamp = jiffies;
1865 return 0;
1866 }
1867
1868 timeout = txq->time_stamp +
1869 msecs_to_jiffies(priv->cfg->base_params->wd_timeout);
1870
1871 if (time_after(jiffies, timeout)) {
1872 IWL_ERR(priv, "Queue %d stuck for %u ms.\n",
1873 q->id, priv->cfg->base_params->wd_timeout);
1874 ret = iwl_legacy_force_reset(priv, IWL_FW_RESET, false);
1875 return (ret == -EAGAIN) ? 0 : 1;
1876 }
1877
1878 return 0;
1879}
1880
1881/*
1882 * Making watchdog tick be a quarter of timeout assure we will
1883 * discover the queue hung between timeout and 1.25*timeout
1884 */
1885#define IWL_WD_TICK(timeout) ((timeout) / 4)
1886
1887/*
1888 * Watchdog timer callback, we check each tx queue for stuck, if if hung
1889 * we reset the firmware. If everything is fine just rearm the timer.
1890 */
1891void iwl_legacy_bg_watchdog(unsigned long data)
1892{
1893 struct iwl_priv *priv = (struct iwl_priv *)data;
1894 int cnt;
1895 unsigned long timeout;
1896
1897 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
1898 return;
1899
1900 timeout = priv->cfg->base_params->wd_timeout;
1901 if (timeout == 0)
1902 return;
1903
1904 /* monitor and check for stuck cmd queue */
1905 if (iwl_legacy_check_stuck_queue(priv, priv->cmd_queue))
1906 return;
1907
1908 /* monitor and check for other stuck queues */
1909 if (iwl_legacy_is_any_associated(priv)) {
1910 for (cnt = 0; cnt < priv->hw_params.max_txq_num; cnt++) {
1911 /* skip as we already checked the command queue */
1912 if (cnt == priv->cmd_queue)
1913 continue;
1914 if (iwl_legacy_check_stuck_queue(priv, cnt))
1915 return;
1916 }
1917 }
1918
1919 mod_timer(&priv->watchdog, jiffies +
1920 msecs_to_jiffies(IWL_WD_TICK(timeout)));
1921}
1922EXPORT_SYMBOL(iwl_legacy_bg_watchdog);
1923
1924void iwl_legacy_setup_watchdog(struct iwl_priv *priv)
1925{
1926 unsigned int timeout = priv->cfg->base_params->wd_timeout;
1927
1928 if (timeout)
1929 mod_timer(&priv->watchdog,
1930 jiffies + msecs_to_jiffies(IWL_WD_TICK(timeout)));
1931 else
1932 del_timer(&priv->watchdog);
1933}
1934EXPORT_SYMBOL(iwl_legacy_setup_watchdog);
1935
1936/*
1937 * extended beacon time format
1938 * time in usec will be changed into a 32-bit value in extended:internal format
1939 * the extended part is the beacon counts
1940 * the internal part is the time in usec within one beacon interval
1941 */
1942u32
1943iwl_legacy_usecs_to_beacons(struct iwl_priv *priv,
1944 u32 usec, u32 beacon_interval)
1945{
1946 u32 quot;
1947 u32 rem;
1948 u32 interval = beacon_interval * TIME_UNIT;
1949
1950 if (!interval || !usec)
1951 return 0;
1952
1953 quot = (usec / interval) &
1954 (iwl_legacy_beacon_time_mask_high(priv,
1955 priv->hw_params.beacon_time_tsf_bits) >>
1956 priv->hw_params.beacon_time_tsf_bits);
1957 rem = (usec % interval) & iwl_legacy_beacon_time_mask_low(priv,
1958 priv->hw_params.beacon_time_tsf_bits);
1959
1960 return (quot << priv->hw_params.beacon_time_tsf_bits) + rem;
1961}
1962EXPORT_SYMBOL(iwl_legacy_usecs_to_beacons);
1963
1964/* base is usually what we get from ucode with each received frame,
1965 * the same as HW timer counter counting down
1966 */
1967__le32 iwl_legacy_add_beacon_time(struct iwl_priv *priv, u32 base,
1968 u32 addon, u32 beacon_interval)
1969{
1970 u32 base_low = base & iwl_legacy_beacon_time_mask_low(priv,
1971 priv->hw_params.beacon_time_tsf_bits);
1972 u32 addon_low = addon & iwl_legacy_beacon_time_mask_low(priv,
1973 priv->hw_params.beacon_time_tsf_bits);
1974 u32 interval = beacon_interval * TIME_UNIT;
1975 u32 res = (base & iwl_legacy_beacon_time_mask_high(priv,
1976 priv->hw_params.beacon_time_tsf_bits)) +
1977 (addon & iwl_legacy_beacon_time_mask_high(priv,
1978 priv->hw_params.beacon_time_tsf_bits));
1979
1980 if (base_low > addon_low)
1981 res += base_low - addon_low;
1982 else if (base_low < addon_low) {
1983 res += interval + base_low - addon_low;
1984 res += (1 << priv->hw_params.beacon_time_tsf_bits);
1985 } else
1986 res += (1 << priv->hw_params.beacon_time_tsf_bits);
1987
1988 return cpu_to_le32(res);
1989}
1990EXPORT_SYMBOL(iwl_legacy_add_beacon_time);
1991
1992#ifdef CONFIG_PM
1993
1994int iwl_legacy_pci_suspend(struct device *device)
1995{
1996 struct pci_dev *pdev = to_pci_dev(device);
1997 struct iwl_priv *priv = pci_get_drvdata(pdev);
1998
1999 /*
2000 * This function is called when system goes into suspend state
2001 * mac80211 will call iwl_mac_stop() from the mac80211 suspend function
2002 * first but since iwl_mac_stop() has no knowledge of who the caller is,
2003 * it will not call apm_ops.stop() to stop the DMA operation.
2004 * Calling apm_ops.stop here to make sure we stop the DMA.
2005 */
2006 iwl_legacy_apm_stop(priv);
2007
2008 return 0;
2009}
2010EXPORT_SYMBOL(iwl_legacy_pci_suspend);
2011
2012int iwl_legacy_pci_resume(struct device *device)
2013{
2014 struct pci_dev *pdev = to_pci_dev(device);
2015 struct iwl_priv *priv = pci_get_drvdata(pdev);
2016 bool hw_rfkill = false;
2017
2018 /*
2019 * We disable the RETRY_TIMEOUT register (0x41) to keep
2020 * PCI Tx retries from interfering with C3 CPU state.
2021 */
2022 pci_write_config_byte(pdev, PCI_CFG_RETRY_TIMEOUT, 0x00);
2023
2024 iwl_legacy_enable_interrupts(priv);
2025
2026 if (!(iwl_read32(priv, CSR_GP_CNTRL) &
2027 CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW))
2028 hw_rfkill = true;
2029
2030 if (hw_rfkill)
2031 set_bit(STATUS_RF_KILL_HW, &priv->status);
2032 else
2033 clear_bit(STATUS_RF_KILL_HW, &priv->status);
2034
2035 wiphy_rfkill_set_hw_state(priv->hw->wiphy, hw_rfkill);
2036
2037 return 0;
2038}
2039EXPORT_SYMBOL(iwl_legacy_pci_resume);
2040
2041const struct dev_pm_ops iwl_legacy_pm_ops = {
2042 .suspend = iwl_legacy_pci_suspend,
2043 .resume = iwl_legacy_pci_resume,
2044 .freeze = iwl_legacy_pci_suspend,
2045 .thaw = iwl_legacy_pci_resume,
2046 .poweroff = iwl_legacy_pci_suspend,
2047 .restore = iwl_legacy_pci_resume,
2048};
2049EXPORT_SYMBOL(iwl_legacy_pm_ops);
2050
2051#endif /* CONFIG_PM */
2052
2053static void
2054iwl_legacy_update_qos(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
2055{
2056 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
2057 return;
2058
2059 if (!ctx->is_active)
2060 return;
2061
2062 ctx->qos_data.def_qos_parm.qos_flags = 0;
2063
2064 if (ctx->qos_data.qos_active)
2065 ctx->qos_data.def_qos_parm.qos_flags |=
2066 QOS_PARAM_FLG_UPDATE_EDCA_MSK;
2067
2068 if (ctx->ht.enabled)
2069 ctx->qos_data.def_qos_parm.qos_flags |= QOS_PARAM_FLG_TGN_MSK;
2070
2071 IWL_DEBUG_QOS(priv, "send QoS cmd with Qos active=%d FLAGS=0x%X\n",
2072 ctx->qos_data.qos_active,
2073 ctx->qos_data.def_qos_parm.qos_flags);
2074
2075 iwl_legacy_send_cmd_pdu_async(priv, ctx->qos_cmd,
2076 sizeof(struct iwl_qosparam_cmd),
2077 &ctx->qos_data.def_qos_parm, NULL);
2078}
2079
2080/**
2081 * iwl_legacy_mac_config - mac80211 config callback
2082 */
2083int iwl_legacy_mac_config(struct ieee80211_hw *hw, u32 changed)
2084{
2085 struct iwl_priv *priv = hw->priv;
2086 const struct iwl_channel_info *ch_info;
2087 struct ieee80211_conf *conf = &hw->conf;
2088 struct ieee80211_channel *channel = conf->channel;
2089 struct iwl_ht_config *ht_conf = &priv->current_ht_config;
2090 struct iwl_rxon_context *ctx;
2091 unsigned long flags = 0;
2092 int ret = 0;
2093 u16 ch;
2094 int scan_active = 0;
2095 bool ht_changed[NUM_IWL_RXON_CTX] = {};
2096
2097 if (WARN_ON(!priv->cfg->ops->legacy))
2098 return -EOPNOTSUPP;
2099
2100 mutex_lock(&priv->mutex);
2101
2102 IWL_DEBUG_MAC80211(priv, "enter to channel %d changed 0x%X\n",
2103 channel->hw_value, changed);
2104
2105 if (unlikely(!priv->cfg->mod_params->disable_hw_scan &&
2106 test_bit(STATUS_SCANNING, &priv->status))) {
2107 scan_active = 1;
2108 IWL_DEBUG_MAC80211(priv, "leave - scanning\n");
2109 }
2110
2111 if (changed & (IEEE80211_CONF_CHANGE_SMPS |
2112 IEEE80211_CONF_CHANGE_CHANNEL)) {
2113 /* mac80211 uses static for non-HT which is what we want */
2114 priv->current_ht_config.smps = conf->smps_mode;
2115
2116 /*
2117 * Recalculate chain counts.
2118 *
2119 * If monitor mode is enabled then mac80211 will
2120 * set up the SM PS mode to OFF if an HT channel is
2121 * configured.
2122 */
2123 if (priv->cfg->ops->hcmd->set_rxon_chain)
2124 for_each_context(priv, ctx)
2125 priv->cfg->ops->hcmd->set_rxon_chain(priv, ctx);
2126 }
2127
2128 /* during scanning mac80211 will delay channel setting until
2129 * scan finish with changed = 0
2130 */
2131 if (!changed || (changed & IEEE80211_CONF_CHANGE_CHANNEL)) {
2132 if (scan_active)
2133 goto set_ch_out;
2134
2135 ch = channel->hw_value;
2136 ch_info = iwl_legacy_get_channel_info(priv, channel->band, ch);
2137 if (!iwl_legacy_is_channel_valid(ch_info)) {
2138 IWL_DEBUG_MAC80211(priv, "leave - invalid channel\n");
2139 ret = -EINVAL;
2140 goto set_ch_out;
2141 }
2142
2143 spin_lock_irqsave(&priv->lock, flags);
2144
2145 for_each_context(priv, ctx) {
2146 /* Configure HT40 channels */
2147 if (ctx->ht.enabled != conf_is_ht(conf)) {
2148 ctx->ht.enabled = conf_is_ht(conf);
2149 ht_changed[ctx->ctxid] = true;
2150 }
2151 if (ctx->ht.enabled) {
2152 if (conf_is_ht40_minus(conf)) {
2153 ctx->ht.extension_chan_offset =
2154 IEEE80211_HT_PARAM_CHA_SEC_BELOW;
2155 ctx->ht.is_40mhz = true;
2156 } else if (conf_is_ht40_plus(conf)) {
2157 ctx->ht.extension_chan_offset =
2158 IEEE80211_HT_PARAM_CHA_SEC_ABOVE;
2159 ctx->ht.is_40mhz = true;
2160 } else {
2161 ctx->ht.extension_chan_offset =
2162 IEEE80211_HT_PARAM_CHA_SEC_NONE;
2163 ctx->ht.is_40mhz = false;
2164 }
2165 } else
2166 ctx->ht.is_40mhz = false;
2167
2168 /*
2169 * Default to no protection. Protection mode will
2170 * later be set from BSS config in iwl_ht_conf
2171 */
2172 ctx->ht.protection =
2173 IEEE80211_HT_OP_MODE_PROTECTION_NONE;
2174
2175 /* if we are switching from ht to 2.4 clear flags
2176 * from any ht related info since 2.4 does not
2177 * support ht */
2178 if ((le16_to_cpu(ctx->staging.channel) != ch))
2179 ctx->staging.flags = 0;
2180
2181 iwl_legacy_set_rxon_channel(priv, channel, ctx);
2182 iwl_legacy_set_rxon_ht(priv, ht_conf);
2183
2184 iwl_legacy_set_flags_for_band(priv, ctx, channel->band,
2185 ctx->vif);
2186 }
2187
2188 spin_unlock_irqrestore(&priv->lock, flags);
2189
2190 if (priv->cfg->ops->legacy->update_bcast_stations)
2191 ret =
2192 priv->cfg->ops->legacy->update_bcast_stations(priv);
2193
2194 set_ch_out:
2195 /* The list of supported rates and rate mask can be different
2196 * for each band; since the band may have changed, reset
2197 * the rate mask to what mac80211 lists */
2198 iwl_legacy_set_rate(priv);
2199 }
2200
2201 if (changed & (IEEE80211_CONF_CHANGE_PS |
2202 IEEE80211_CONF_CHANGE_IDLE)) {
2203 ret = iwl_legacy_power_update_mode(priv, false);
2204 if (ret)
2205 IWL_DEBUG_MAC80211(priv, "Error setting sleep level\n");
2206 }
2207
2208 if (changed & IEEE80211_CONF_CHANGE_POWER) {
2209 IWL_DEBUG_MAC80211(priv, "TX Power old=%d new=%d\n",
2210 priv->tx_power_user_lmt, conf->power_level);
2211
2212 iwl_legacy_set_tx_power(priv, conf->power_level, false);
2213 }
2214
2215 if (!iwl_legacy_is_ready(priv)) {
2216 IWL_DEBUG_MAC80211(priv, "leave - not ready\n");
2217 goto out;
2218 }
2219
2220 if (scan_active)
2221 goto out;
2222
2223 for_each_context(priv, ctx) {
2224 if (memcmp(&ctx->active, &ctx->staging, sizeof(ctx->staging)))
2225 iwl_legacy_commit_rxon(priv, ctx);
2226 else
2227 IWL_DEBUG_INFO(priv,
2228 "Not re-sending same RXON configuration.\n");
2229 if (ht_changed[ctx->ctxid])
2230 iwl_legacy_update_qos(priv, ctx);
2231 }
2232
2233out:
2234 IWL_DEBUG_MAC80211(priv, "leave\n");
2235 mutex_unlock(&priv->mutex);
2236 return ret;
2237}
2238EXPORT_SYMBOL(iwl_legacy_mac_config);
2239
2240void iwl_legacy_mac_reset_tsf(struct ieee80211_hw *hw)
2241{
2242 struct iwl_priv *priv = hw->priv;
2243 unsigned long flags;
2244 /* IBSS can only be the IWL_RXON_CTX_BSS context */
2245 struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
2246
2247 if (WARN_ON(!priv->cfg->ops->legacy))
2248 return;
2249
2250 mutex_lock(&priv->mutex);
2251 IWL_DEBUG_MAC80211(priv, "enter\n");
2252
2253 spin_lock_irqsave(&priv->lock, flags);
2254 memset(&priv->current_ht_config, 0, sizeof(struct iwl_ht_config));
2255 spin_unlock_irqrestore(&priv->lock, flags);
2256
2257 spin_lock_irqsave(&priv->lock, flags);
2258
2259 /* new association get rid of ibss beacon skb */
2260 if (priv->beacon_skb)
2261 dev_kfree_skb(priv->beacon_skb);
2262
2263 priv->beacon_skb = NULL;
2264
2265 priv->timestamp = 0;
2266
2267 spin_unlock_irqrestore(&priv->lock, flags);
2268
2269 iwl_legacy_scan_cancel_timeout(priv, 100);
2270 if (!iwl_legacy_is_ready_rf(priv)) {
2271 IWL_DEBUG_MAC80211(priv, "leave - not ready\n");
2272 mutex_unlock(&priv->mutex);
2273 return;
2274 }
2275
2276 /* we are restarting association process
2277 * clear RXON_FILTER_ASSOC_MSK bit
2278 */
2279 ctx->staging.filter_flags &= ~RXON_FILTER_ASSOC_MSK;
2280 iwl_legacy_commit_rxon(priv, ctx);
2281
2282 iwl_legacy_set_rate(priv);
2283
2284 mutex_unlock(&priv->mutex);
2285
2286 IWL_DEBUG_MAC80211(priv, "leave\n");
2287}
2288EXPORT_SYMBOL(iwl_legacy_mac_reset_tsf);
2289
2290static void iwl_legacy_ht_conf(struct iwl_priv *priv,
2291 struct ieee80211_vif *vif)
2292{
2293 struct iwl_ht_config *ht_conf = &priv->current_ht_config;
2294 struct ieee80211_sta *sta;
2295 struct ieee80211_bss_conf *bss_conf = &vif->bss_conf;
2296 struct iwl_rxon_context *ctx = iwl_legacy_rxon_ctx_from_vif(vif);
2297
2298 IWL_DEBUG_ASSOC(priv, "enter:\n");
2299
2300 if (!ctx->ht.enabled)
2301 return;
2302
2303 ctx->ht.protection =
2304 bss_conf->ht_operation_mode & IEEE80211_HT_OP_MODE_PROTECTION;
2305 ctx->ht.non_gf_sta_present =
2306 !!(bss_conf->ht_operation_mode &
2307 IEEE80211_HT_OP_MODE_NON_GF_STA_PRSNT);
2308
2309 ht_conf->single_chain_sufficient = false;
2310
2311 switch (vif->type) {
2312 case NL80211_IFTYPE_STATION:
2313 rcu_read_lock();
2314 sta = ieee80211_find_sta(vif, bss_conf->bssid);
2315 if (sta) {
2316 struct ieee80211_sta_ht_cap *ht_cap = &sta->ht_cap;
2317 int maxstreams;
2318
2319 maxstreams = (ht_cap->mcs.tx_params &
2320 IEEE80211_HT_MCS_TX_MAX_STREAMS_MASK)
2321 >> IEEE80211_HT_MCS_TX_MAX_STREAMS_SHIFT;
2322 maxstreams += 1;
2323
2324 if ((ht_cap->mcs.rx_mask[1] == 0) &&
2325 (ht_cap->mcs.rx_mask[2] == 0))
2326 ht_conf->single_chain_sufficient = true;
2327 if (maxstreams <= 1)
2328 ht_conf->single_chain_sufficient = true;
2329 } else {
2330 /*
2331 * If at all, this can only happen through a race
2332 * when the AP disconnects us while we're still
2333 * setting up the connection, in that case mac80211
2334 * will soon tell us about that.
2335 */
2336 ht_conf->single_chain_sufficient = true;
2337 }
2338 rcu_read_unlock();
2339 break;
2340 case NL80211_IFTYPE_ADHOC:
2341 ht_conf->single_chain_sufficient = true;
2342 break;
2343 default:
2344 break;
2345 }
2346
2347 IWL_DEBUG_ASSOC(priv, "leave\n");
2348}
2349
2350static inline void iwl_legacy_set_no_assoc(struct iwl_priv *priv,
2351 struct ieee80211_vif *vif)
2352{
2353 struct iwl_rxon_context *ctx = iwl_legacy_rxon_ctx_from_vif(vif);
2354
2355 /*
2356 * inform the ucode that there is no longer an
2357 * association and that no more packets should be
2358 * sent
2359 */
2360 ctx->staging.filter_flags &= ~RXON_FILTER_ASSOC_MSK;
2361 ctx->staging.assoc_id = 0;
2362 iwl_legacy_commit_rxon(priv, ctx);
2363}
2364
2365static void iwl_legacy_beacon_update(struct ieee80211_hw *hw,
2366 struct ieee80211_vif *vif)
2367{
2368 struct iwl_priv *priv = hw->priv;
2369 unsigned long flags;
2370 __le64 timestamp;
2371 struct sk_buff *skb = ieee80211_beacon_get(hw, vif);
2372
2373 if (!skb)
2374 return;
2375
2376 IWL_DEBUG_MAC80211(priv, "enter\n");
2377
2378 lockdep_assert_held(&priv->mutex);
2379
2380 if (!priv->beacon_ctx) {
2381 IWL_ERR(priv, "update beacon but no beacon context!\n");
2382 dev_kfree_skb(skb);
2383 return;
2384 }
2385
2386 spin_lock_irqsave(&priv->lock, flags);
2387
2388 if (priv->beacon_skb)
2389 dev_kfree_skb(priv->beacon_skb);
2390
2391 priv->beacon_skb = skb;
2392
2393 timestamp = ((struct ieee80211_mgmt *)skb->data)->u.beacon.timestamp;
2394 priv->timestamp = le64_to_cpu(timestamp);
2395
2396 IWL_DEBUG_MAC80211(priv, "leave\n");
2397 spin_unlock_irqrestore(&priv->lock, flags);
2398
2399 if (!iwl_legacy_is_ready_rf(priv)) {
2400 IWL_DEBUG_MAC80211(priv, "leave - RF not ready\n");
2401 return;
2402 }
2403
2404 priv->cfg->ops->legacy->post_associate(priv);
2405}
2406
2407void iwl_legacy_mac_bss_info_changed(struct ieee80211_hw *hw,
2408 struct ieee80211_vif *vif,
2409 struct ieee80211_bss_conf *bss_conf,
2410 u32 changes)
2411{
2412 struct iwl_priv *priv = hw->priv;
2413 struct iwl_rxon_context *ctx = iwl_legacy_rxon_ctx_from_vif(vif);
2414 int ret;
2415
2416 if (WARN_ON(!priv->cfg->ops->legacy))
2417 return;
2418
2419 IWL_DEBUG_MAC80211(priv, "changes = 0x%X\n", changes);
2420
2421 if (!iwl_legacy_is_alive(priv))
2422 return;
2423
2424 mutex_lock(&priv->mutex);
2425
2426 if (changes & BSS_CHANGED_QOS) {
2427 unsigned long flags;
2428
2429 spin_lock_irqsave(&priv->lock, flags);
2430 ctx->qos_data.qos_active = bss_conf->qos;
2431 iwl_legacy_update_qos(priv, ctx);
2432 spin_unlock_irqrestore(&priv->lock, flags);
2433 }
2434
2435 if (changes & BSS_CHANGED_BEACON_ENABLED) {
2436 /*
2437 * the add_interface code must make sure we only ever
2438 * have a single interface that could be beaconing at
2439 * any time.
2440 */
2441 if (vif->bss_conf.enable_beacon)
2442 priv->beacon_ctx = ctx;
2443 else
2444 priv->beacon_ctx = NULL;
2445 }
2446
2447 if (changes & BSS_CHANGED_BSSID) {
2448 IWL_DEBUG_MAC80211(priv, "BSSID %pM\n", bss_conf->bssid);
2449
2450 /*
2451 * If there is currently a HW scan going on in the
2452 * background then we need to cancel it else the RXON
2453 * below/in post_associate will fail.
2454 */
2455 if (iwl_legacy_scan_cancel_timeout(priv, 100)) {
2456 IWL_WARN(priv,
2457 "Aborted scan still in progress after 100ms\n");
2458 IWL_DEBUG_MAC80211(priv,
2459 "leaving - scan abort failed.\n");
2460 mutex_unlock(&priv->mutex);
2461 return;
2462 }
2463
2464 /* mac80211 only sets assoc when in STATION mode */
2465 if (vif->type == NL80211_IFTYPE_ADHOC || bss_conf->assoc) {
2466 memcpy(ctx->staging.bssid_addr,
2467 bss_conf->bssid, ETH_ALEN);
2468
2469 /* currently needed in a few places */
2470 memcpy(priv->bssid, bss_conf->bssid, ETH_ALEN);
2471 } else {
2472 ctx->staging.filter_flags &=
2473 ~RXON_FILTER_ASSOC_MSK;
2474 }
2475
2476 }
2477
2478 /*
2479 * This needs to be after setting the BSSID in case
2480 * mac80211 decides to do both changes at once because
2481 * it will invoke post_associate.
2482 */
2483 if (vif->type == NL80211_IFTYPE_ADHOC && changes & BSS_CHANGED_BEACON)
2484 iwl_legacy_beacon_update(hw, vif);
2485
2486 if (changes & BSS_CHANGED_ERP_PREAMBLE) {
2487 IWL_DEBUG_MAC80211(priv, "ERP_PREAMBLE %d\n",
2488 bss_conf->use_short_preamble);
2489 if (bss_conf->use_short_preamble)
2490 ctx->staging.flags |= RXON_FLG_SHORT_PREAMBLE_MSK;
2491 else
2492 ctx->staging.flags &= ~RXON_FLG_SHORT_PREAMBLE_MSK;
2493 }
2494
2495 if (changes & BSS_CHANGED_ERP_CTS_PROT) {
2496 IWL_DEBUG_MAC80211(priv,
2497 "ERP_CTS %d\n", bss_conf->use_cts_prot);
2498 if (bss_conf->use_cts_prot &&
2499 (priv->band != IEEE80211_BAND_5GHZ))
2500 ctx->staging.flags |= RXON_FLG_TGG_PROTECT_MSK;
2501 else
2502 ctx->staging.flags &= ~RXON_FLG_TGG_PROTECT_MSK;
2503 if (bss_conf->use_cts_prot)
2504 ctx->staging.flags |= RXON_FLG_SELF_CTS_EN;
2505 else
2506 ctx->staging.flags &= ~RXON_FLG_SELF_CTS_EN;
2507 }
2508
2509 if (changes & BSS_CHANGED_BASIC_RATES) {
2510 /* XXX use this information
2511 *
2512 * To do that, remove code from iwl_legacy_set_rate() and put something
2513 * like this here:
2514 *
2515 if (A-band)
2516 ctx->staging.ofdm_basic_rates =
2517 bss_conf->basic_rates;
2518 else
2519 ctx->staging.ofdm_basic_rates =
2520 bss_conf->basic_rates >> 4;
2521 ctx->staging.cck_basic_rates =
2522 bss_conf->basic_rates & 0xF;
2523 */
2524 }
2525
2526 if (changes & BSS_CHANGED_HT) {
2527 iwl_legacy_ht_conf(priv, vif);
2528
2529 if (priv->cfg->ops->hcmd->set_rxon_chain)
2530 priv->cfg->ops->hcmd->set_rxon_chain(priv, ctx);
2531 }
2532
2533 if (changes & BSS_CHANGED_ASSOC) {
2534 IWL_DEBUG_MAC80211(priv, "ASSOC %d\n", bss_conf->assoc);
2535 if (bss_conf->assoc) {
2536 priv->timestamp = bss_conf->timestamp;
2537
2538 if (!iwl_legacy_is_rfkill(priv))
2539 priv->cfg->ops->legacy->post_associate(priv);
2540 } else
2541 iwl_legacy_set_no_assoc(priv, vif);
2542 }
2543
2544 if (changes && iwl_legacy_is_associated_ctx(ctx) && bss_conf->aid) {
2545 IWL_DEBUG_MAC80211(priv, "Changes (%#x) while associated\n",
2546 changes);
2547 ret = iwl_legacy_send_rxon_assoc(priv, ctx);
2548 if (!ret) {
2549 /* Sync active_rxon with latest change. */
2550 memcpy((void *)&ctx->active,
2551 &ctx->staging,
2552 sizeof(struct iwl_legacy_rxon_cmd));
2553 }
2554 }
2555
2556 if (changes & BSS_CHANGED_BEACON_ENABLED) {
2557 if (vif->bss_conf.enable_beacon) {
2558 memcpy(ctx->staging.bssid_addr,
2559 bss_conf->bssid, ETH_ALEN);
2560 memcpy(priv->bssid, bss_conf->bssid, ETH_ALEN);
2561 priv->cfg->ops->legacy->config_ap(priv);
2562 } else
2563 iwl_legacy_set_no_assoc(priv, vif);
2564 }
2565
2566 if (changes & BSS_CHANGED_IBSS) {
2567 ret = priv->cfg->ops->legacy->manage_ibss_station(priv, vif,
2568 bss_conf->ibss_joined);
2569 if (ret)
2570 IWL_ERR(priv, "failed to %s IBSS station %pM\n",
2571 bss_conf->ibss_joined ? "add" : "remove",
2572 bss_conf->bssid);
2573 }
2574
2575 mutex_unlock(&priv->mutex);
2576
2577 IWL_DEBUG_MAC80211(priv, "leave\n");
2578}
2579EXPORT_SYMBOL(iwl_legacy_mac_bss_info_changed);
2580
2581irqreturn_t iwl_legacy_isr(int irq, void *data)
2582{
2583 struct iwl_priv *priv = data;
2584 u32 inta, inta_mask;
2585 u32 inta_fh;
2586 unsigned long flags;
2587 if (!priv)
2588 return IRQ_NONE;
2589
2590 spin_lock_irqsave(&priv->lock, flags);
2591
2592 /* Disable (but don't clear!) interrupts here to avoid
2593 * back-to-back ISRs and sporadic interrupts from our NIC.
2594 * If we have something to service, the tasklet will re-enable ints.
2595 * If we *don't* have something, we'll re-enable before leaving here. */
2596 inta_mask = iwl_read32(priv, CSR_INT_MASK); /* just for debug */
2597 iwl_write32(priv, CSR_INT_MASK, 0x00000000);
2598
2599 /* Discover which interrupts are active/pending */
2600 inta = iwl_read32(priv, CSR_INT);
2601 inta_fh = iwl_read32(priv, CSR_FH_INT_STATUS);
2602
2603 /* Ignore interrupt if there's nothing in NIC to service.
2604 * This may be due to IRQ shared with another device,
2605 * or due to sporadic interrupts thrown from our NIC. */
2606 if (!inta && !inta_fh) {
2607 IWL_DEBUG_ISR(priv,
2608 "Ignore interrupt, inta == 0, inta_fh == 0\n");
2609 goto none;
2610 }
2611
2612 if ((inta == 0xFFFFFFFF) || ((inta & 0xFFFFFFF0) == 0xa5a5a5a0)) {
2613 /* Hardware disappeared. It might have already raised
2614 * an interrupt */
2615 IWL_WARN(priv, "HARDWARE GONE?? INTA == 0x%08x\n", inta);
2616 goto unplugged;
2617 }
2618
2619 IWL_DEBUG_ISR(priv, "ISR inta 0x%08x, enabled 0x%08x, fh 0x%08x\n",
2620 inta, inta_mask, inta_fh);
2621
2622 inta &= ~CSR_INT_BIT_SCD;
2623
2624 /* iwl_irq_tasklet() will service interrupts and re-enable them */
2625 if (likely(inta || inta_fh))
2626 tasklet_schedule(&priv->irq_tasklet);
2627
2628unplugged:
2629 spin_unlock_irqrestore(&priv->lock, flags);
2630 return IRQ_HANDLED;
2631
2632none:
2633 /* re-enable interrupts here since we don't have anything to service. */
2634 /* only Re-enable if diabled by irq */
2635 if (test_bit(STATUS_INT_ENABLED, &priv->status))
2636 iwl_legacy_enable_interrupts(priv);
2637 spin_unlock_irqrestore(&priv->lock, flags);
2638 return IRQ_NONE;
2639}
2640EXPORT_SYMBOL(iwl_legacy_isr);
2641
2642/*
2643 * iwl_legacy_tx_cmd_protection: Set rts/cts. 3945 and 4965 only share this
2644 * function.
2645 */
2646void iwl_legacy_tx_cmd_protection(struct iwl_priv *priv,
2647 struct ieee80211_tx_info *info,
2648 __le16 fc, __le32 *tx_flags)
2649{
2650 if (info->control.rates[0].flags & IEEE80211_TX_RC_USE_RTS_CTS) {
2651 *tx_flags |= TX_CMD_FLG_RTS_MSK;
2652 *tx_flags &= ~TX_CMD_FLG_CTS_MSK;
2653 *tx_flags |= TX_CMD_FLG_FULL_TXOP_PROT_MSK;
2654
2655 if (!ieee80211_is_mgmt(fc))
2656 return;
2657
2658 switch (fc & cpu_to_le16(IEEE80211_FCTL_STYPE)) {
2659 case cpu_to_le16(IEEE80211_STYPE_AUTH):
2660 case cpu_to_le16(IEEE80211_STYPE_DEAUTH):
2661 case cpu_to_le16(IEEE80211_STYPE_ASSOC_REQ):
2662 case cpu_to_le16(IEEE80211_STYPE_REASSOC_REQ):
2663 *tx_flags &= ~TX_CMD_FLG_RTS_MSK;
2664 *tx_flags |= TX_CMD_FLG_CTS_MSK;
2665 break;
2666 }
2667 } else if (info->control.rates[0].flags &
2668 IEEE80211_TX_RC_USE_CTS_PROTECT) {
2669 *tx_flags &= ~TX_CMD_FLG_RTS_MSK;
2670 *tx_flags |= TX_CMD_FLG_CTS_MSK;
2671 *tx_flags |= TX_CMD_FLG_FULL_TXOP_PROT_MSK;
2672 }
2673}
2674EXPORT_SYMBOL(iwl_legacy_tx_cmd_protection);
diff --git a/drivers/net/wireless/iwlegacy/iwl-core.h b/drivers/net/wireless/iwlegacy/iwl-core.h
new file mode 100644
index 000000000000..f03b463e4378
--- /dev/null
+++ b/drivers/net/wireless/iwlegacy/iwl-core.h
@@ -0,0 +1,646 @@
1/******************************************************************************
2 *
3 * This file is provided under a dual BSD/GPLv2 license. When using or
4 * redistributing this file, you may do so under either license.
5 *
6 * GPL LICENSE SUMMARY
7 *
8 * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of version 2 of the GNU General Public License as
12 * published by the Free Software Foundation.
13 *
14 * This program is distributed in the hope that it will be useful, but
15 * WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
22 * USA
23 *
24 * The full GNU General Public License is included in this distribution
25 * in the file called LICENSE.GPL.
26 *
27 * Contact Information:
28 * Intel Linux Wireless <ilw@linux.intel.com>
29 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
30 *
31 * BSD LICENSE
32 *
33 * Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved.
34 * All rights reserved.
35 *
36 * Redistribution and use in source and binary forms, with or without
37 * modification, are permitted provided that the following conditions
38 * are met:
39 *
40 * * Redistributions of source code must retain the above copyright
41 * notice, this list of conditions and the following disclaimer.
42 * * Redistributions in binary form must reproduce the above copyright
43 * notice, this list of conditions and the following disclaimer in
44 * the documentation and/or other materials provided with the
45 * distribution.
46 * * Neither the name Intel Corporation nor the names of its
47 * contributors may be used to endorse or promote products derived
48 * from this software without specific prior written permission.
49 *
50 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
51 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
52 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
53 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
54 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
55 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
56 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
57 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
58 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
59 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
60 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
61 *****************************************************************************/
62
63#ifndef __iwl_legacy_core_h__
64#define __iwl_legacy_core_h__
65
66/************************
67 * forward declarations *
68 ************************/
69struct iwl_host_cmd;
70struct iwl_cmd;
71
72
73#define IWLWIFI_VERSION "in-tree:"
74#define DRV_COPYRIGHT "Copyright(c) 2003-2011 Intel Corporation"
75#define DRV_AUTHOR "<ilw@linux.intel.com>"
76
77#define IWL_PCI_DEVICE(dev, subdev, cfg) \
78 .vendor = PCI_VENDOR_ID_INTEL, .device = (dev), \
79 .subvendor = PCI_ANY_ID, .subdevice = (subdev), \
80 .driver_data = (kernel_ulong_t)&(cfg)
81
82#define TIME_UNIT 1024
83
84#define IWL_SKU_G 0x1
85#define IWL_SKU_A 0x2
86#define IWL_SKU_N 0x8
87
88#define IWL_CMD(x) case x: return #x
89
90struct iwl_hcmd_ops {
91 int (*rxon_assoc)(struct iwl_priv *priv, struct iwl_rxon_context *ctx);
92 int (*commit_rxon)(struct iwl_priv *priv, struct iwl_rxon_context *ctx);
93 void (*set_rxon_chain)(struct iwl_priv *priv,
94 struct iwl_rxon_context *ctx);
95};
96
97struct iwl_hcmd_utils_ops {
98 u16 (*get_hcmd_size)(u8 cmd_id, u16 len);
99 u16 (*build_addsta_hcmd)(const struct iwl_legacy_addsta_cmd *cmd,
100 u8 *data);
101 int (*request_scan)(struct iwl_priv *priv, struct ieee80211_vif *vif);
102 void (*post_scan)(struct iwl_priv *priv);
103};
104
105struct iwl_apm_ops {
106 int (*init)(struct iwl_priv *priv);
107 void (*config)(struct iwl_priv *priv);
108};
109
110struct iwl_debugfs_ops {
111 ssize_t (*rx_stats_read)(struct file *file, char __user *user_buf,
112 size_t count, loff_t *ppos);
113 ssize_t (*tx_stats_read)(struct file *file, char __user *user_buf,
114 size_t count, loff_t *ppos);
115 ssize_t (*general_stats_read)(struct file *file, char __user *user_buf,
116 size_t count, loff_t *ppos);
117};
118
119struct iwl_temp_ops {
120 void (*temperature)(struct iwl_priv *priv);
121};
122
123struct iwl_lib_ops {
124 /* set hw dependent parameters */
125 int (*set_hw_params)(struct iwl_priv *priv);
126 /* Handling TX */
127 void (*txq_update_byte_cnt_tbl)(struct iwl_priv *priv,
128 struct iwl_tx_queue *txq,
129 u16 byte_cnt);
130 int (*txq_attach_buf_to_tfd)(struct iwl_priv *priv,
131 struct iwl_tx_queue *txq,
132 dma_addr_t addr,
133 u16 len, u8 reset, u8 pad);
134 void (*txq_free_tfd)(struct iwl_priv *priv,
135 struct iwl_tx_queue *txq);
136 int (*txq_init)(struct iwl_priv *priv,
137 struct iwl_tx_queue *txq);
138 /* setup Rx handler */
139 void (*rx_handler_setup)(struct iwl_priv *priv);
140 /* alive notification after init uCode load */
141 void (*init_alive_start)(struct iwl_priv *priv);
142 /* check validity of rtc data address */
143 int (*is_valid_rtc_data_addr)(u32 addr);
144 /* 1st ucode load */
145 int (*load_ucode)(struct iwl_priv *priv);
146 int (*dump_nic_event_log)(struct iwl_priv *priv,
147 bool full_log, char **buf, bool display);
148 void (*dump_nic_error_log)(struct iwl_priv *priv);
149 int (*dump_fh)(struct iwl_priv *priv, char **buf, bool display);
150 int (*set_channel_switch)(struct iwl_priv *priv,
151 struct ieee80211_channel_switch *ch_switch);
152 /* power management */
153 struct iwl_apm_ops apm_ops;
154
155 /* power */
156 int (*send_tx_power) (struct iwl_priv *priv);
157 void (*update_chain_flags)(struct iwl_priv *priv);
158
159 /* eeprom operations (as defined in iwl-eeprom.h) */
160 struct iwl_eeprom_ops eeprom_ops;
161
162 /* temperature */
163 struct iwl_temp_ops temp_ops;
164 /* check for plcp health */
165 bool (*check_plcp_health)(struct iwl_priv *priv,
166 struct iwl_rx_packet *pkt);
167
168 struct iwl_debugfs_ops debugfs_ops;
169
170};
171
172struct iwl_led_ops {
173 int (*cmd)(struct iwl_priv *priv, struct iwl_led_cmd *led_cmd);
174};
175
176struct iwl_legacy_ops {
177 void (*post_associate)(struct iwl_priv *priv);
178 void (*config_ap)(struct iwl_priv *priv);
179 /* station management */
180 int (*update_bcast_stations)(struct iwl_priv *priv);
181 int (*manage_ibss_station)(struct iwl_priv *priv,
182 struct ieee80211_vif *vif, bool add);
183};
184
185struct iwl_ops {
186 const struct iwl_lib_ops *lib;
187 const struct iwl_hcmd_ops *hcmd;
188 const struct iwl_hcmd_utils_ops *utils;
189 const struct iwl_led_ops *led;
190 const struct iwl_nic_ops *nic;
191 const struct iwl_legacy_ops *legacy;
192 const struct ieee80211_ops *ieee80211_ops;
193};
194
195struct iwl_mod_params {
196 int sw_crypto; /* def: 0 = using hardware encryption */
197 int disable_hw_scan; /* def: 0 = use h/w scan */
198 int num_of_queues; /* def: HW dependent */
199 int disable_11n; /* def: 0 = 11n capabilities enabled */
200 int amsdu_size_8K; /* def: 1 = enable 8K amsdu size */
201 int antenna; /* def: 0 = both antennas (use diversity) */
202 int restart_fw; /* def: 1 = restart firmware */
203};
204
205/*
206 * @led_compensation: compensate on the led on/off time per HW according
207 * to the deviation to achieve the desired led frequency.
208 * The detail algorithm is described in iwl-led.c
209 * @chain_noise_num_beacons: number of beacons used to compute chain noise
210 * @plcp_delta_threshold: plcp error rate threshold used to trigger
211 * radio tuning when there is a high receiving plcp error rate
212 * @wd_timeout: TX queues watchdog timeout
213 * @temperature_kelvin: temperature report by uCode in kelvin
214 * @max_event_log_size: size of event log buffer size for ucode event logging
215 * @ucode_tracing: support ucode continuous tracing
216 * @sensitivity_calib_by_driver: driver has the capability to perform
217 * sensitivity calibration operation
218 * @chain_noise_calib_by_driver: driver has the capability to perform
219 * chain noise calibration operation
220 */
221struct iwl_base_params {
222 int eeprom_size;
223 int num_of_queues; /* def: HW dependent */
224 int num_of_ampdu_queues;/* def: HW dependent */
225 /* for iwl_legacy_apm_init() */
226 u32 pll_cfg_val;
227 bool set_l0s;
228 bool use_bsm;
229
230 u16 led_compensation;
231 int chain_noise_num_beacons;
232 u8 plcp_delta_threshold;
233 unsigned int wd_timeout;
234 bool temperature_kelvin;
235 u32 max_event_log_size;
236 const bool ucode_tracing;
237 const bool sensitivity_calib_by_driver;
238 const bool chain_noise_calib_by_driver;
239};
240
241/**
242 * struct iwl_cfg
243 * @fw_name_pre: Firmware filename prefix. The api version and extension
244 * (.ucode) will be added to filename before loading from disk. The
245 * filename is constructed as fw_name_pre<api>.ucode.
246 * @ucode_api_max: Highest version of uCode API supported by driver.
247 * @ucode_api_min: Lowest version of uCode API supported by driver.
248 * @scan_antennas: available antenna for scan operation
249 * @led_mode: 0=blinking, 1=On(RF On)/Off(RF Off)
250 *
251 * We enable the driver to be backward compatible wrt API version. The
252 * driver specifies which APIs it supports (with @ucode_api_max being the
253 * highest and @ucode_api_min the lowest). Firmware will only be loaded if
254 * it has a supported API version. The firmware's API version will be
255 * stored in @iwl_priv, enabling the driver to make runtime changes based
256 * on firmware version used.
257 *
258 * For example,
259 * if (IWL_UCODE_API(priv->ucode_ver) >= 2) {
260 * Driver interacts with Firmware API version >= 2.
261 * } else {
262 * Driver interacts with Firmware API version 1.
263 * }
264 *
265 * The ideal usage of this infrastructure is to treat a new ucode API
266 * release as a new hardware revision. That is, through utilizing the
267 * iwl_hcmd_utils_ops etc. we accommodate different command structures
268 * and flows between hardware versions as well as their API
269 * versions.
270 *
271 */
272struct iwl_cfg {
273 /* params specific to an individual device within a device family */
274 const char *name;
275 const char *fw_name_pre;
276 const unsigned int ucode_api_max;
277 const unsigned int ucode_api_min;
278 u8 valid_tx_ant;
279 u8 valid_rx_ant;
280 unsigned int sku;
281 u16 eeprom_ver;
282 u16 eeprom_calib_ver;
283 const struct iwl_ops *ops;
284 /* module based parameters which can be set from modprobe cmd */
285 const struct iwl_mod_params *mod_params;
286 /* params not likely to change within a device family */
287 struct iwl_base_params *base_params;
288 /* params likely to change within a device family */
289 u8 scan_rx_antennas[IEEE80211_NUM_BANDS];
290 u8 scan_tx_antennas[IEEE80211_NUM_BANDS];
291 enum iwl_led_mode led_mode;
292};
293
294/***************************
295 * L i b *
296 ***************************/
297
298struct ieee80211_hw *iwl_legacy_alloc_all(struct iwl_cfg *cfg);
299int iwl_legacy_mac_conf_tx(struct ieee80211_hw *hw, u16 queue,
300 const struct ieee80211_tx_queue_params *params);
301int iwl_legacy_mac_tx_last_beacon(struct ieee80211_hw *hw);
302void iwl_legacy_set_rxon_hwcrypto(struct iwl_priv *priv,
303 struct iwl_rxon_context *ctx,
304 int hw_decrypt);
305int iwl_legacy_check_rxon_cmd(struct iwl_priv *priv,
306 struct iwl_rxon_context *ctx);
307int iwl_legacy_full_rxon_required(struct iwl_priv *priv,
308 struct iwl_rxon_context *ctx);
309int iwl_legacy_set_rxon_channel(struct iwl_priv *priv,
310 struct ieee80211_channel *ch,
311 struct iwl_rxon_context *ctx);
312void iwl_legacy_set_flags_for_band(struct iwl_priv *priv,
313 struct iwl_rxon_context *ctx,
314 enum ieee80211_band band,
315 struct ieee80211_vif *vif);
316u8 iwl_legacy_get_single_channel_number(struct iwl_priv *priv,
317 enum ieee80211_band band);
318void iwl_legacy_set_rxon_ht(struct iwl_priv *priv,
319 struct iwl_ht_config *ht_conf);
320bool iwl_legacy_is_ht40_tx_allowed(struct iwl_priv *priv,
321 struct iwl_rxon_context *ctx,
322 struct ieee80211_sta_ht_cap *ht_cap);
323void iwl_legacy_connection_init_rx_config(struct iwl_priv *priv,
324 struct iwl_rxon_context *ctx);
325void iwl_legacy_set_rate(struct iwl_priv *priv);
326int iwl_legacy_set_decrypted_flag(struct iwl_priv *priv,
327 struct ieee80211_hdr *hdr,
328 u32 decrypt_res,
329 struct ieee80211_rx_status *stats);
330void iwl_legacy_irq_handle_error(struct iwl_priv *priv);
331int iwl_legacy_mac_add_interface(struct ieee80211_hw *hw,
332 struct ieee80211_vif *vif);
333void iwl_legacy_mac_remove_interface(struct ieee80211_hw *hw,
334 struct ieee80211_vif *vif);
335int iwl_legacy_mac_change_interface(struct ieee80211_hw *hw,
336 struct ieee80211_vif *vif,
337 enum nl80211_iftype newtype, bool newp2p);
338int iwl_legacy_alloc_txq_mem(struct iwl_priv *priv);
339void iwl_legacy_txq_mem(struct iwl_priv *priv);
340
341#ifdef CONFIG_IWLWIFI_LEGACY_DEBUGFS
342int iwl_legacy_alloc_traffic_mem(struct iwl_priv *priv);
343void iwl_legacy_free_traffic_mem(struct iwl_priv *priv);
344void iwl_legacy_reset_traffic_log(struct iwl_priv *priv);
345void iwl_legacy_dbg_log_tx_data_frame(struct iwl_priv *priv,
346 u16 length, struct ieee80211_hdr *header);
347void iwl_legacy_dbg_log_rx_data_frame(struct iwl_priv *priv,
348 u16 length, struct ieee80211_hdr *header);
349const char *iwl_legacy_get_mgmt_string(int cmd);
350const char *iwl_legacy_get_ctrl_string(int cmd);
351void iwl_legacy_clear_traffic_stats(struct iwl_priv *priv);
352void iwl_legacy_update_stats(struct iwl_priv *priv, bool is_tx, __le16 fc,
353 u16 len);
354#else
355static inline int iwl_legacy_alloc_traffic_mem(struct iwl_priv *priv)
356{
357 return 0;
358}
359static inline void iwl_legacy_free_traffic_mem(struct iwl_priv *priv)
360{
361}
362static inline void iwl_legacy_reset_traffic_log(struct iwl_priv *priv)
363{
364}
365static inline void iwl_legacy_dbg_log_tx_data_frame(struct iwl_priv *priv,
366 u16 length, struct ieee80211_hdr *header)
367{
368}
369static inline void iwl_legacy_dbg_log_rx_data_frame(struct iwl_priv *priv,
370 u16 length, struct ieee80211_hdr *header)
371{
372}
373static inline void iwl_legacy_update_stats(struct iwl_priv *priv, bool is_tx,
374 __le16 fc, u16 len)
375{
376}
377#endif
378/*****************************************************
379 * RX handlers.
380 * **************************************************/
381void iwl_legacy_rx_pm_sleep_notif(struct iwl_priv *priv,
382 struct iwl_rx_mem_buffer *rxb);
383void iwl_legacy_rx_pm_debug_statistics_notif(struct iwl_priv *priv,
384 struct iwl_rx_mem_buffer *rxb);
385void iwl_legacy_rx_reply_error(struct iwl_priv *priv,
386 struct iwl_rx_mem_buffer *rxb);
387
388/*****************************************************
389* RX
390******************************************************/
391void iwl_legacy_cmd_queue_unmap(struct iwl_priv *priv);
392void iwl_legacy_cmd_queue_free(struct iwl_priv *priv);
393int iwl_legacy_rx_queue_alloc(struct iwl_priv *priv);
394void iwl_legacy_rx_queue_update_write_ptr(struct iwl_priv *priv,
395 struct iwl_rx_queue *q);
396int iwl_legacy_rx_queue_space(const struct iwl_rx_queue *q);
397void iwl_legacy_tx_cmd_complete(struct iwl_priv *priv,
398 struct iwl_rx_mem_buffer *rxb);
399/* Handlers */
400void iwl_legacy_rx_spectrum_measure_notif(struct iwl_priv *priv,
401 struct iwl_rx_mem_buffer *rxb);
402void iwl_legacy_recover_from_statistics(struct iwl_priv *priv,
403 struct iwl_rx_packet *pkt);
404void iwl_legacy_chswitch_done(struct iwl_priv *priv, bool is_success);
405void iwl_legacy_rx_csa(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb);
406
407/* TX helpers */
408
409/*****************************************************
410* TX
411******************************************************/
412void iwl_legacy_txq_update_write_ptr(struct iwl_priv *priv,
413 struct iwl_tx_queue *txq);
414int iwl_legacy_tx_queue_init(struct iwl_priv *priv, struct iwl_tx_queue *txq,
415 int slots_num, u32 txq_id);
416void iwl_legacy_tx_queue_reset(struct iwl_priv *priv,
417 struct iwl_tx_queue *txq,
418 int slots_num, u32 txq_id);
419void iwl_legacy_tx_queue_unmap(struct iwl_priv *priv, int txq_id);
420void iwl_legacy_tx_queue_free(struct iwl_priv *priv, int txq_id);
421void iwl_legacy_setup_watchdog(struct iwl_priv *priv);
422/*****************************************************
423 * TX power
424 ****************************************************/
425int iwl_legacy_set_tx_power(struct iwl_priv *priv, s8 tx_power, bool force);
426
427/*******************************************************************************
428 * Rate
429 ******************************************************************************/
430
431u8 iwl_legacy_get_lowest_plcp(struct iwl_priv *priv,
432 struct iwl_rxon_context *ctx);
433
434/*******************************************************************************
435 * Scanning
436 ******************************************************************************/
437void iwl_legacy_init_scan_params(struct iwl_priv *priv);
438int iwl_legacy_scan_cancel(struct iwl_priv *priv);
439int iwl_legacy_scan_cancel_timeout(struct iwl_priv *priv, unsigned long ms);
440void iwl_legacy_force_scan_end(struct iwl_priv *priv);
441int iwl_legacy_mac_hw_scan(struct ieee80211_hw *hw,
442 struct ieee80211_vif *vif,
443 struct cfg80211_scan_request *req);
444void iwl_legacy_internal_short_hw_scan(struct iwl_priv *priv);
445int iwl_legacy_force_reset(struct iwl_priv *priv, int mode, bool external);
446u16 iwl_legacy_fill_probe_req(struct iwl_priv *priv,
447 struct ieee80211_mgmt *frame,
448 const u8 *ta, const u8 *ie, int ie_len, int left);
449void iwl_legacy_setup_rx_scan_handlers(struct iwl_priv *priv);
450u16 iwl_legacy_get_active_dwell_time(struct iwl_priv *priv,
451 enum ieee80211_band band,
452 u8 n_probes);
453u16 iwl_legacy_get_passive_dwell_time(struct iwl_priv *priv,
454 enum ieee80211_band band,
455 struct ieee80211_vif *vif);
456void iwl_legacy_setup_scan_deferred_work(struct iwl_priv *priv);
457void iwl_legacy_cancel_scan_deferred_work(struct iwl_priv *priv);
458
459/* For faster active scanning, scan will move to the next channel if fewer than
460 * PLCP_QUIET_THRESH packets are heard on this channel within
461 * ACTIVE_QUIET_TIME after sending probe request. This shortens the dwell
462 * time if it's a quiet channel (nothing responded to our probe, and there's
463 * no other traffic).
464 * Disable "quiet" feature by setting PLCP_QUIET_THRESH to 0. */
465#define IWL_ACTIVE_QUIET_TIME cpu_to_le16(10) /* msec */
466#define IWL_PLCP_QUIET_THRESH cpu_to_le16(1) /* packets */
467
468#define IWL_SCAN_CHECK_WATCHDOG (HZ * 7)
469
470/*****************************************************
471 * S e n d i n g H o s t C o m m a n d s *
472 *****************************************************/
473
474const char *iwl_legacy_get_cmd_string(u8 cmd);
475int __must_check iwl_legacy_send_cmd_sync(struct iwl_priv *priv,
476 struct iwl_host_cmd *cmd);
477int iwl_legacy_send_cmd(struct iwl_priv *priv, struct iwl_host_cmd *cmd);
478int __must_check iwl_legacy_send_cmd_pdu(struct iwl_priv *priv, u8 id,
479 u16 len, const void *data);
480int iwl_legacy_send_cmd_pdu_async(struct iwl_priv *priv, u8 id, u16 len,
481 const void *data,
482 void (*callback)(struct iwl_priv *priv,
483 struct iwl_device_cmd *cmd,
484 struct iwl_rx_packet *pkt));
485
486int iwl_legacy_enqueue_hcmd(struct iwl_priv *priv, struct iwl_host_cmd *cmd);
487
488
489/*****************************************************
490 * PCI *
491 *****************************************************/
492
493static inline u16 iwl_legacy_pcie_link_ctl(struct iwl_priv *priv)
494{
495 int pos;
496 u16 pci_lnk_ctl;
497 pos = pci_find_capability(priv->pci_dev, PCI_CAP_ID_EXP);
498 pci_read_config_word(priv->pci_dev, pos + PCI_EXP_LNKCTL, &pci_lnk_ctl);
499 return pci_lnk_ctl;
500}
501
502void iwl_legacy_bg_watchdog(unsigned long data);
503u32 iwl_legacy_usecs_to_beacons(struct iwl_priv *priv,
504 u32 usec, u32 beacon_interval);
505__le32 iwl_legacy_add_beacon_time(struct iwl_priv *priv, u32 base,
506 u32 addon, u32 beacon_interval);
507
508#ifdef CONFIG_PM
509int iwl_legacy_pci_suspend(struct device *device);
510int iwl_legacy_pci_resume(struct device *device);
511extern const struct dev_pm_ops iwl_legacy_pm_ops;
512
513#define IWL_LEGACY_PM_OPS (&iwl_legacy_pm_ops)
514
515#else /* !CONFIG_PM */
516
517#define IWL_LEGACY_PM_OPS NULL
518
519#endif /* !CONFIG_PM */
520
521/*****************************************************
522* Error Handling Debugging
523******************************************************/
524void iwl4965_dump_nic_error_log(struct iwl_priv *priv);
525int iwl4965_dump_nic_event_log(struct iwl_priv *priv,
526 bool full_log, char **buf, bool display);
527#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
528void iwl_legacy_print_rx_config_cmd(struct iwl_priv *priv,
529 struct iwl_rxon_context *ctx);
530#else
531static inline void iwl_legacy_print_rx_config_cmd(struct iwl_priv *priv,
532 struct iwl_rxon_context *ctx)
533{
534}
535#endif
536
537void iwl_legacy_clear_isr_stats(struct iwl_priv *priv);
538
539/*****************************************************
540* GEOS
541******************************************************/
542int iwl_legacy_init_geos(struct iwl_priv *priv);
543void iwl_legacy_free_geos(struct iwl_priv *priv);
544
545/*************** DRIVER STATUS FUNCTIONS *****/
546
547#define STATUS_HCMD_ACTIVE 0 /* host command in progress */
548/* 1 is unused (used to be STATUS_HCMD_SYNC_ACTIVE) */
549#define STATUS_INT_ENABLED 2
550#define STATUS_RF_KILL_HW 3
551#define STATUS_CT_KILL 4
552#define STATUS_INIT 5
553#define STATUS_ALIVE 6
554#define STATUS_READY 7
555#define STATUS_TEMPERATURE 8
556#define STATUS_GEO_CONFIGURED 9
557#define STATUS_EXIT_PENDING 10
558#define STATUS_STATISTICS 12
559#define STATUS_SCANNING 13
560#define STATUS_SCAN_ABORTING 14
561#define STATUS_SCAN_HW 15
562#define STATUS_POWER_PMI 16
563#define STATUS_FW_ERROR 17
564
565
566static inline int iwl_legacy_is_ready(struct iwl_priv *priv)
567{
568 /* The adapter is 'ready' if READY and GEO_CONFIGURED bits are
569 * set but EXIT_PENDING is not */
570 return test_bit(STATUS_READY, &priv->status) &&
571 test_bit(STATUS_GEO_CONFIGURED, &priv->status) &&
572 !test_bit(STATUS_EXIT_PENDING, &priv->status);
573}
574
575static inline int iwl_legacy_is_alive(struct iwl_priv *priv)
576{
577 return test_bit(STATUS_ALIVE, &priv->status);
578}
579
580static inline int iwl_legacy_is_init(struct iwl_priv *priv)
581{
582 return test_bit(STATUS_INIT, &priv->status);
583}
584
585static inline int iwl_legacy_is_rfkill_hw(struct iwl_priv *priv)
586{
587 return test_bit(STATUS_RF_KILL_HW, &priv->status);
588}
589
590static inline int iwl_legacy_is_rfkill(struct iwl_priv *priv)
591{
592 return iwl_legacy_is_rfkill_hw(priv);
593}
594
595static inline int iwl_legacy_is_ctkill(struct iwl_priv *priv)
596{
597 return test_bit(STATUS_CT_KILL, &priv->status);
598}
599
600static inline int iwl_legacy_is_ready_rf(struct iwl_priv *priv)
601{
602
603 if (iwl_legacy_is_rfkill(priv))
604 return 0;
605
606 return iwl_legacy_is_ready(priv);
607}
608
609extern void iwl_legacy_send_bt_config(struct iwl_priv *priv);
610extern int iwl_legacy_send_statistics_request(struct iwl_priv *priv,
611 u8 flags, bool clear);
612void iwl_legacy_apm_stop(struct iwl_priv *priv);
613int iwl_legacy_apm_init(struct iwl_priv *priv);
614
615int iwl_legacy_send_rxon_timing(struct iwl_priv *priv,
616 struct iwl_rxon_context *ctx);
617static inline int iwl_legacy_send_rxon_assoc(struct iwl_priv *priv,
618 struct iwl_rxon_context *ctx)
619{
620 return priv->cfg->ops->hcmd->rxon_assoc(priv, ctx);
621}
622static inline int iwl_legacy_commit_rxon(struct iwl_priv *priv,
623 struct iwl_rxon_context *ctx)
624{
625 return priv->cfg->ops->hcmd->commit_rxon(priv, ctx);
626}
627static inline const struct ieee80211_supported_band *iwl_get_hw_mode(
628 struct iwl_priv *priv, enum ieee80211_band band)
629{
630 return priv->hw->wiphy->bands[band];
631}
632
633/* mac80211 handlers */
634int iwl_legacy_mac_config(struct ieee80211_hw *hw, u32 changed);
635void iwl_legacy_mac_reset_tsf(struct ieee80211_hw *hw);
636void iwl_legacy_mac_bss_info_changed(struct ieee80211_hw *hw,
637 struct ieee80211_vif *vif,
638 struct ieee80211_bss_conf *bss_conf,
639 u32 changes);
640void iwl_legacy_tx_cmd_protection(struct iwl_priv *priv,
641 struct ieee80211_tx_info *info,
642 __le16 fc, __le32 *tx_flags);
643
644irqreturn_t iwl_legacy_isr(int irq, void *data);
645
646#endif /* __iwl_legacy_core_h__ */
diff --git a/drivers/net/wireless/iwlegacy/iwl-csr.h b/drivers/net/wireless/iwlegacy/iwl-csr.h
new file mode 100644
index 000000000000..668a9616c269
--- /dev/null
+++ b/drivers/net/wireless/iwlegacy/iwl-csr.h
@@ -0,0 +1,422 @@
1/******************************************************************************
2 *
3 * This file is provided under a dual BSD/GPLv2 license. When using or
4 * redistributing this file, you may do so under either license.
5 *
6 * GPL LICENSE SUMMARY
7 *
8 * Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved.
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of version 2 of the GNU General Public License as
12 * published by the Free Software Foundation.
13 *
14 * This program is distributed in the hope that it will be useful, but
15 * WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
22 * USA
23 *
24 * The full GNU General Public License is included in this distribution
25 * in the file called LICENSE.GPL.
26 *
27 * Contact Information:
28 * Intel Linux Wireless <ilw@linux.intel.com>
29 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
30 *
31 * BSD LICENSE
32 *
33 * Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved.
34 * All rights reserved.
35 *
36 * Redistribution and use in source and binary forms, with or without
37 * modification, are permitted provided that the following conditions
38 * are met:
39 *
40 * * Redistributions of source code must retain the above copyright
41 * notice, this list of conditions and the following disclaimer.
42 * * Redistributions in binary form must reproduce the above copyright
43 * notice, this list of conditions and the following disclaimer in
44 * the documentation and/or other materials provided with the
45 * distribution.
46 * * Neither the name Intel Corporation nor the names of its
47 * contributors may be used to endorse or promote products derived
48 * from this software without specific prior written permission.
49 *
50 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
51 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
52 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
53 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
54 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
55 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
56 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
57 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
58 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
59 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
60 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
61 *
62 *****************************************************************************/
63#ifndef __iwl_legacy_csr_h__
64#define __iwl_legacy_csr_h__
65/*
66 * CSR (control and status registers)
67 *
68 * CSR registers are mapped directly into PCI bus space, and are accessible
69 * whenever platform supplies power to device, even when device is in
70 * low power states due to driver-invoked device resets
71 * (e.g. CSR_RESET_REG_FLAG_SW_RESET) or uCode-driven power-saving modes.
72 *
73 * Use iwl_write32() and iwl_read32() family to access these registers;
74 * these provide simple PCI bus access, without waking up the MAC.
75 * Do not use iwl_legacy_write_direct32() family for these registers;
76 * no need to "grab nic access" via CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ.
77 * The MAC (uCode processor, etc.) does not need to be powered up for accessing
78 * the CSR registers.
79 *
80 * NOTE: Device does need to be awake in order to read this memory
81 * via CSR_EEPROM register
82 */
83#define CSR_BASE (0x000)
84
85#define CSR_HW_IF_CONFIG_REG (CSR_BASE+0x000) /* hardware interface config */
86#define CSR_INT_COALESCING (CSR_BASE+0x004) /* accum ints, 32-usec units */
87#define CSR_INT (CSR_BASE+0x008) /* host interrupt status/ack */
88#define CSR_INT_MASK (CSR_BASE+0x00c) /* host interrupt enable */
89#define CSR_FH_INT_STATUS (CSR_BASE+0x010) /* busmaster int status/ack*/
90#define CSR_GPIO_IN (CSR_BASE+0x018) /* read external chip pins */
91#define CSR_RESET (CSR_BASE+0x020) /* busmaster enable, NMI, etc*/
92#define CSR_GP_CNTRL (CSR_BASE+0x024)
93
94/* 2nd byte of CSR_INT_COALESCING, not accessible via iwl_write32()! */
95#define CSR_INT_PERIODIC_REG (CSR_BASE+0x005)
96
97/*
98 * Hardware revision info
99 * Bit fields:
100 * 31-8: Reserved
101 * 7-4: Type of device: see CSR_HW_REV_TYPE_xxx definitions
102 * 3-2: Revision step: 0 = A, 1 = B, 2 = C, 3 = D
103 * 1-0: "Dash" (-) value, as in A-1, etc.
104 *
105 * NOTE: Revision step affects calculation of CCK txpower for 4965.
106 * NOTE: See also CSR_HW_REV_WA_REG (work-around for bug in 4965).
107 */
108#define CSR_HW_REV (CSR_BASE+0x028)
109
110/*
111 * EEPROM memory reads
112 *
113 * NOTE: Device must be awake, initialized via apm_ops.init(),
114 * in order to read.
115 */
116#define CSR_EEPROM_REG (CSR_BASE+0x02c)
117#define CSR_EEPROM_GP (CSR_BASE+0x030)
118
119#define CSR_GIO_REG (CSR_BASE+0x03C)
120#define CSR_GP_UCODE_REG (CSR_BASE+0x048)
121#define CSR_GP_DRIVER_REG (CSR_BASE+0x050)
122
123/*
124 * UCODE-DRIVER GP (general purpose) mailbox registers.
125 * SET/CLR registers set/clear bit(s) if "1" is written.
126 */
127#define CSR_UCODE_DRV_GP1 (CSR_BASE+0x054)
128#define CSR_UCODE_DRV_GP1_SET (CSR_BASE+0x058)
129#define CSR_UCODE_DRV_GP1_CLR (CSR_BASE+0x05c)
130#define CSR_UCODE_DRV_GP2 (CSR_BASE+0x060)
131
132#define CSR_LED_REG (CSR_BASE+0x094)
133#define CSR_DRAM_INT_TBL_REG (CSR_BASE+0x0A0)
134
135/* GIO Chicken Bits (PCI Express bus link power management) */
136#define CSR_GIO_CHICKEN_BITS (CSR_BASE+0x100)
137
138/* Analog phase-lock-loop configuration */
139#define CSR_ANA_PLL_CFG (CSR_BASE+0x20c)
140
141/*
142 * CSR Hardware Revision Workaround Register. Indicates hardware rev;
143 * "step" determines CCK backoff for txpower calculation. Used for 4965 only.
144 * See also CSR_HW_REV register.
145 * Bit fields:
146 * 3-2: 0 = A, 1 = B, 2 = C, 3 = D step
147 * 1-0: "Dash" (-) value, as in C-1, etc.
148 */
149#define CSR_HW_REV_WA_REG (CSR_BASE+0x22C)
150
151#define CSR_DBG_HPET_MEM_REG (CSR_BASE+0x240)
152#define CSR_DBG_LINK_PWR_MGMT_REG (CSR_BASE+0x250)
153
154/* Bits for CSR_HW_IF_CONFIG_REG */
155#define CSR49_HW_IF_CONFIG_REG_BIT_4965_R (0x00000010)
156#define CSR_HW_IF_CONFIG_REG_MSK_BOARD_VER (0x00000C00)
157#define CSR_HW_IF_CONFIG_REG_BIT_MAC_SI (0x00000100)
158#define CSR_HW_IF_CONFIG_REG_BIT_RADIO_SI (0x00000200)
159
160#define CSR39_HW_IF_CONFIG_REG_BIT_3945_MB (0x00000100)
161#define CSR39_HW_IF_CONFIG_REG_BIT_3945_MM (0x00000200)
162#define CSR39_HW_IF_CONFIG_REG_BIT_SKU_MRC (0x00000400)
163#define CSR39_HW_IF_CONFIG_REG_BIT_BOARD_TYPE (0x00000800)
164#define CSR39_HW_IF_CONFIG_REG_BITS_SILICON_TYPE_A (0x00000000)
165#define CSR39_HW_IF_CONFIG_REG_BITS_SILICON_TYPE_B (0x00001000)
166
167#define CSR_HW_IF_CONFIG_REG_BIT_HAP_WAKE_L1A (0x00080000)
168#define CSR_HW_IF_CONFIG_REG_BIT_EEPROM_OWN_SEM (0x00200000)
169#define CSR_HW_IF_CONFIG_REG_BIT_NIC_READY (0x00400000) /* PCI_OWN_SEM */
170#define CSR_HW_IF_CONFIG_REG_BIT_NIC_PREPARE_DONE (0x02000000) /* ME_OWN */
171#define CSR_HW_IF_CONFIG_REG_PREPARE (0x08000000) /* WAKE_ME */
172
173#define CSR_INT_PERIODIC_DIS (0x00) /* disable periodic int*/
174#define CSR_INT_PERIODIC_ENA (0xFF) /* 255*32 usec ~ 8 msec*/
175
176/* interrupt flags in INTA, set by uCode or hardware (e.g. dma),
177 * acknowledged (reset) by host writing "1" to flagged bits. */
178#define CSR_INT_BIT_FH_RX (1 << 31) /* Rx DMA, cmd responses, FH_INT[17:16] */
179#define CSR_INT_BIT_HW_ERR (1 << 29) /* DMA hardware error FH_INT[31] */
180#define CSR_INT_BIT_RX_PERIODIC (1 << 28) /* Rx periodic */
181#define CSR_INT_BIT_FH_TX (1 << 27) /* Tx DMA FH_INT[1:0] */
182#define CSR_INT_BIT_SCD (1 << 26) /* TXQ pointer advanced */
183#define CSR_INT_BIT_SW_ERR (1 << 25) /* uCode error */
184#define CSR_INT_BIT_RF_KILL (1 << 7) /* HW RFKILL switch GP_CNTRL[27] toggled */
185#define CSR_INT_BIT_CT_KILL (1 << 6) /* Critical temp (chip too hot) rfkill */
186#define CSR_INT_BIT_SW_RX (1 << 3) /* Rx, command responses, 3945 */
187#define CSR_INT_BIT_WAKEUP (1 << 1) /* NIC controller waking up (pwr mgmt) */
188#define CSR_INT_BIT_ALIVE (1 << 0) /* uCode interrupts once it initializes */
189
190#define CSR_INI_SET_MASK (CSR_INT_BIT_FH_RX | \
191 CSR_INT_BIT_HW_ERR | \
192 CSR_INT_BIT_FH_TX | \
193 CSR_INT_BIT_SW_ERR | \
194 CSR_INT_BIT_RF_KILL | \
195 CSR_INT_BIT_SW_RX | \
196 CSR_INT_BIT_WAKEUP | \
197 CSR_INT_BIT_ALIVE)
198
199/* interrupt flags in FH (flow handler) (PCI busmaster DMA) */
200#define CSR_FH_INT_BIT_ERR (1 << 31) /* Error */
201#define CSR_FH_INT_BIT_HI_PRIOR (1 << 30) /* High priority Rx, bypass coalescing */
202#define CSR39_FH_INT_BIT_RX_CHNL2 (1 << 18) /* Rx channel 2 (3945 only) */
203#define CSR_FH_INT_BIT_RX_CHNL1 (1 << 17) /* Rx channel 1 */
204#define CSR_FH_INT_BIT_RX_CHNL0 (1 << 16) /* Rx channel 0 */
205#define CSR39_FH_INT_BIT_TX_CHNL6 (1 << 6) /* Tx channel 6 (3945 only) */
206#define CSR_FH_INT_BIT_TX_CHNL1 (1 << 1) /* Tx channel 1 */
207#define CSR_FH_INT_BIT_TX_CHNL0 (1 << 0) /* Tx channel 0 */
208
209#define CSR39_FH_INT_RX_MASK (CSR_FH_INT_BIT_HI_PRIOR | \
210 CSR39_FH_INT_BIT_RX_CHNL2 | \
211 CSR_FH_INT_BIT_RX_CHNL1 | \
212 CSR_FH_INT_BIT_RX_CHNL0)
213
214
215#define CSR39_FH_INT_TX_MASK (CSR39_FH_INT_BIT_TX_CHNL6 | \
216 CSR_FH_INT_BIT_TX_CHNL1 | \
217 CSR_FH_INT_BIT_TX_CHNL0)
218
219#define CSR49_FH_INT_RX_MASK (CSR_FH_INT_BIT_HI_PRIOR | \
220 CSR_FH_INT_BIT_RX_CHNL1 | \
221 CSR_FH_INT_BIT_RX_CHNL0)
222
223#define CSR49_FH_INT_TX_MASK (CSR_FH_INT_BIT_TX_CHNL1 | \
224 CSR_FH_INT_BIT_TX_CHNL0)
225
226/* GPIO */
227#define CSR_GPIO_IN_BIT_AUX_POWER (0x00000200)
228#define CSR_GPIO_IN_VAL_VAUX_PWR_SRC (0x00000000)
229#define CSR_GPIO_IN_VAL_VMAIN_PWR_SRC (0x00000200)
230
231/* RESET */
232#define CSR_RESET_REG_FLAG_NEVO_RESET (0x00000001)
233#define CSR_RESET_REG_FLAG_FORCE_NMI (0x00000002)
234#define CSR_RESET_REG_FLAG_SW_RESET (0x00000080)
235#define CSR_RESET_REG_FLAG_MASTER_DISABLED (0x00000100)
236#define CSR_RESET_REG_FLAG_STOP_MASTER (0x00000200)
237#define CSR_RESET_LINK_PWR_MGMT_DISABLED (0x80000000)
238
239/*
240 * GP (general purpose) CONTROL REGISTER
241 * Bit fields:
242 * 27: HW_RF_KILL_SW
243 * Indicates state of (platform's) hardware RF-Kill switch
244 * 26-24: POWER_SAVE_TYPE
245 * Indicates current power-saving mode:
246 * 000 -- No power saving
247 * 001 -- MAC power-down
248 * 010 -- PHY (radio) power-down
249 * 011 -- Error
250 * 9-6: SYS_CONFIG
251 * Indicates current system configuration, reflecting pins on chip
252 * as forced high/low by device circuit board.
253 * 4: GOING_TO_SLEEP
254 * Indicates MAC is entering a power-saving sleep power-down.
255 * Not a good time to access device-internal resources.
256 * 3: MAC_ACCESS_REQ
257 * Host sets this to request and maintain MAC wakeup, to allow host
258 * access to device-internal resources. Host must wait for
259 * MAC_CLOCK_READY (and !GOING_TO_SLEEP) before accessing non-CSR
260 * device registers.
261 * 2: INIT_DONE
262 * Host sets this to put device into fully operational D0 power mode.
263 * Host resets this after SW_RESET to put device into low power mode.
264 * 0: MAC_CLOCK_READY
265 * Indicates MAC (ucode processor, etc.) is powered up and can run.
266 * Internal resources are accessible.
267 * NOTE: This does not indicate that the processor is actually running.
268 * NOTE: This does not indicate that 4965 or 3945 has completed
269 * init or post-power-down restore of internal SRAM memory.
270 * Use CSR_UCODE_DRV_GP1_BIT_MAC_SLEEP as indication that
271 * SRAM is restored and uCode is in normal operation mode.
272 * Later devices (5xxx/6xxx/1xxx) use non-volatile SRAM, and
273 * do not need to save/restore it.
274 * NOTE: After device reset, this bit remains "0" until host sets
275 * INIT_DONE
276 */
277#define CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY (0x00000001)
278#define CSR_GP_CNTRL_REG_FLAG_INIT_DONE (0x00000004)
279#define CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ (0x00000008)
280#define CSR_GP_CNTRL_REG_FLAG_GOING_TO_SLEEP (0x00000010)
281
282#define CSR_GP_CNTRL_REG_VAL_MAC_ACCESS_EN (0x00000001)
283
284#define CSR_GP_CNTRL_REG_MSK_POWER_SAVE_TYPE (0x07000000)
285#define CSR_GP_CNTRL_REG_FLAG_MAC_POWER_SAVE (0x04000000)
286#define CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW (0x08000000)
287
288
289/* EEPROM REG */
290#define CSR_EEPROM_REG_READ_VALID_MSK (0x00000001)
291#define CSR_EEPROM_REG_BIT_CMD (0x00000002)
292#define CSR_EEPROM_REG_MSK_ADDR (0x0000FFFC)
293#define CSR_EEPROM_REG_MSK_DATA (0xFFFF0000)
294
295/* EEPROM GP */
296#define CSR_EEPROM_GP_VALID_MSK (0x00000007) /* signature */
297#define CSR_EEPROM_GP_IF_OWNER_MSK (0x00000180)
298#define CSR_EEPROM_GP_GOOD_SIG_EEP_LESS_THAN_4K (0x00000002)
299#define CSR_EEPROM_GP_GOOD_SIG_EEP_MORE_THAN_4K (0x00000004)
300
301/* GP REG */
302#define CSR_GP_REG_POWER_SAVE_STATUS_MSK (0x03000000) /* bit 24/25 */
303#define CSR_GP_REG_NO_POWER_SAVE (0x00000000)
304#define CSR_GP_REG_MAC_POWER_SAVE (0x01000000)
305#define CSR_GP_REG_PHY_POWER_SAVE (0x02000000)
306#define CSR_GP_REG_POWER_SAVE_ERROR (0x03000000)
307
308
309/* CSR GIO */
310#define CSR_GIO_REG_VAL_L0S_ENABLED (0x00000002)
311
312/*
313 * UCODE-DRIVER GP (general purpose) mailbox register 1
314 * Host driver and uCode write and/or read this register to communicate with
315 * each other.
316 * Bit fields:
317 * 4: UCODE_DISABLE
318 * Host sets this to request permanent halt of uCode, same as
319 * sending CARD_STATE command with "halt" bit set.
320 * 3: CT_KILL_EXIT
321 * Host sets this to request exit from CT_KILL state, i.e. host thinks
322 * device temperature is low enough to continue normal operation.
323 * 2: CMD_BLOCKED
324 * Host sets this during RF KILL power-down sequence (HW, SW, CT KILL)
325 * to release uCode to clear all Tx and command queues, enter
326 * unassociated mode, and power down.
327 * NOTE: Some devices also use HBUS_TARG_MBX_C register for this bit.
328 * 1: SW_BIT_RFKILL
329 * Host sets this when issuing CARD_STATE command to request
330 * device sleep.
331 * 0: MAC_SLEEP
332 * uCode sets this when preparing a power-saving power-down.
333 * uCode resets this when power-up is complete and SRAM is sane.
334 * NOTE: 3945/4965 saves internal SRAM data to host when powering down,
335 * and must restore this data after powering back up.
336 * MAC_SLEEP is the best indication that restore is complete.
337 * Later devices (5xxx/6xxx/1xxx) use non-volatile SRAM, and
338 * do not need to save/restore it.
339 */
340#define CSR_UCODE_DRV_GP1_BIT_MAC_SLEEP (0x00000001)
341#define CSR_UCODE_SW_BIT_RFKILL (0x00000002)
342#define CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED (0x00000004)
343#define CSR_UCODE_DRV_GP1_REG_BIT_CT_KILL_EXIT (0x00000008)
344
345/* GIO Chicken Bits (PCI Express bus link power management) */
346#define CSR_GIO_CHICKEN_BITS_REG_BIT_L1A_NO_L0S_RX (0x00800000)
347#define CSR_GIO_CHICKEN_BITS_REG_BIT_DIS_L0S_EXIT_TIMER (0x20000000)
348
349/* LED */
350#define CSR_LED_BSM_CTRL_MSK (0xFFFFFFDF)
351#define CSR_LED_REG_TRUN_ON (0x78)
352#define CSR_LED_REG_TRUN_OFF (0x38)
353
354/* ANA_PLL */
355#define CSR39_ANA_PLL_CFG_VAL (0x01000000)
356
357/* HPET MEM debug */
358#define CSR_DBG_HPET_MEM_REG_VAL (0xFFFF0000)
359
360/* DRAM INT TABLE */
361#define CSR_DRAM_INT_TBL_ENABLE (1 << 31)
362#define CSR_DRAM_INIT_TBL_WRAP_CHECK (1 << 27)
363
364/*
365 * HBUS (Host-side Bus)
366 *
367 * HBUS registers are mapped directly into PCI bus space, but are used
368 * to indirectly access device's internal memory or registers that
369 * may be powered-down.
370 *
371 * Use iwl_legacy_write_direct32()/iwl_legacy_read_direct32() family
372 * for these registers;
373 * host must "grab nic access" via CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ
374 * to make sure the MAC (uCode processor, etc.) is powered up for accessing
375 * internal resources.
376 *
377 * Do not use iwl_write32()/iwl_read32() family to access these registers;
378 * these provide only simple PCI bus access, without waking up the MAC.
379 */
380#define HBUS_BASE (0x400)
381
382/*
383 * Registers for accessing device's internal SRAM memory (e.g. SCD SRAM
384 * structures, error log, event log, verifying uCode load).
385 * First write to address register, then read from or write to data register
386 * to complete the job. Once the address register is set up, accesses to
387 * data registers auto-increment the address by one dword.
388 * Bit usage for address registers (read or write):
389 * 0-31: memory address within device
390 */
391#define HBUS_TARG_MEM_RADDR (HBUS_BASE+0x00c)
392#define HBUS_TARG_MEM_WADDR (HBUS_BASE+0x010)
393#define HBUS_TARG_MEM_WDAT (HBUS_BASE+0x018)
394#define HBUS_TARG_MEM_RDAT (HBUS_BASE+0x01c)
395
396/* Mailbox C, used as workaround alternative to CSR_UCODE_DRV_GP1 mailbox */
397#define HBUS_TARG_MBX_C (HBUS_BASE+0x030)
398#define HBUS_TARG_MBX_C_REG_BIT_CMD_BLOCKED (0x00000004)
399
400/*
401 * Registers for accessing device's internal peripheral registers
402 * (e.g. SCD, BSM, etc.). First write to address register,
403 * then read from or write to data register to complete the job.
404 * Bit usage for address registers (read or write):
405 * 0-15: register address (offset) within device
406 * 24-25: (# bytes - 1) to read or write (e.g. 3 for dword)
407 */
408#define HBUS_TARG_PRPH_WADDR (HBUS_BASE+0x044)
409#define HBUS_TARG_PRPH_RADDR (HBUS_BASE+0x048)
410#define HBUS_TARG_PRPH_WDAT (HBUS_BASE+0x04c)
411#define HBUS_TARG_PRPH_RDAT (HBUS_BASE+0x050)
412
413/*
414 * Per-Tx-queue write pointer (index, really!)
415 * Indicates index to next TFD that driver will fill (1 past latest filled).
416 * Bit usage:
417 * 0-7: queue write index
418 * 11-8: queue selector
419 */
420#define HBUS_TARG_WRPTR (HBUS_BASE+0x060)
421
422#endif /* !__iwl_legacy_csr_h__ */
diff --git a/drivers/net/wireless/iwlegacy/iwl-debug.h b/drivers/net/wireless/iwlegacy/iwl-debug.h
new file mode 100644
index 000000000000..ae13112701bf
--- /dev/null
+++ b/drivers/net/wireless/iwlegacy/iwl-debug.h
@@ -0,0 +1,198 @@
1/******************************************************************************
2 *
3 * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved.
4 *
5 * Portions of this file are derived from the ipw3945 project.
6 *
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms of version 2 of the GNU General Public License as
9 * published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful, but WITHOUT
12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 * more details.
15 *
16 * You should have received a copy of the GNU General Public License along with
17 * this program; if not, write to the Free Software Foundation, Inc.,
18 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
19 *
20 * The full GNU General Public License is included in this distribution in the
21 * file called LICENSE.
22 *
23 * Contact Information:
24 * Intel Linux Wireless <ilw@linux.intel.com>
25 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
26 *
27 *****************************************************************************/
28
29#ifndef __iwl_legacy_debug_h__
30#define __iwl_legacy_debug_h__
31
32struct iwl_priv;
33extern u32 iwlegacy_debug_level;
34
35#define IWL_ERR(p, f, a...) dev_err(&((p)->pci_dev->dev), f, ## a)
36#define IWL_WARN(p, f, a...) dev_warn(&((p)->pci_dev->dev), f, ## a)
37#define IWL_INFO(p, f, a...) dev_info(&((p)->pci_dev->dev), f, ## a)
38#define IWL_CRIT(p, f, a...) dev_crit(&((p)->pci_dev->dev), f, ## a)
39
40#define iwl_print_hex_error(priv, p, len) \
41do { \
42 print_hex_dump(KERN_ERR, "iwl data: ", \
43 DUMP_PREFIX_OFFSET, 16, 1, p, len, 1); \
44} while (0)
45
46#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
47#define IWL_DEBUG(__priv, level, fmt, args...) \
48do { \
49 if (iwl_legacy_get_debug_level(__priv) & (level)) \
50 dev_printk(KERN_ERR, &(__priv->hw->wiphy->dev), \
51 "%c %s " fmt, in_interrupt() ? 'I' : 'U', \
52 __func__ , ## args); \
53} while (0)
54
55#define IWL_DEBUG_LIMIT(__priv, level, fmt, args...) \
56do { \
57 if ((iwl_legacy_get_debug_level(__priv) & (level)) && net_ratelimit()) \
58 dev_printk(KERN_ERR, &(__priv->hw->wiphy->dev), \
59 "%c %s " fmt, in_interrupt() ? 'I' : 'U', \
60 __func__ , ## args); \
61} while (0)
62
63#define iwl_print_hex_dump(priv, level, p, len) \
64do { \
65 if (iwl_legacy_get_debug_level(priv) & level) \
66 print_hex_dump(KERN_DEBUG, "iwl data: ", \
67 DUMP_PREFIX_OFFSET, 16, 1, p, len, 1); \
68} while (0)
69
70#else
71#define IWL_DEBUG(__priv, level, fmt, args...)
72#define IWL_DEBUG_LIMIT(__priv, level, fmt, args...)
73static inline void iwl_print_hex_dump(struct iwl_priv *priv, int level,
74 const void *p, u32 len)
75{}
76#endif /* CONFIG_IWLWIFI_LEGACY_DEBUG */
77
78#ifdef CONFIG_IWLWIFI_LEGACY_DEBUGFS
79int iwl_legacy_dbgfs_register(struct iwl_priv *priv, const char *name);
80void iwl_legacy_dbgfs_unregister(struct iwl_priv *priv);
81#else
82static inline int
83iwl_legacy_dbgfs_register(struct iwl_priv *priv, const char *name)
84{
85 return 0;
86}
87static inline void iwl_legacy_dbgfs_unregister(struct iwl_priv *priv)
88{
89}
90#endif /* CONFIG_IWLWIFI_LEGACY_DEBUGFS */
91
92/*
93 * To use the debug system:
94 *
95 * If you are defining a new debug classification, simply add it to the #define
96 * list here in the form of
97 *
98 * #define IWL_DL_xxxx VALUE
99 *
100 * where xxxx should be the name of the classification (for example, WEP).
101 *
102 * You then need to either add a IWL_xxxx_DEBUG() macro definition for your
103 * classification, or use IWL_DEBUG(IWL_DL_xxxx, ...) whenever you want
104 * to send output to that classification.
105 *
106 * The active debug levels can be accessed via files
107 *
108 * /sys/module/iwl4965/parameters/debug{50}
109 * /sys/module/iwl3945/parameters/debug
110 * /sys/class/net/wlan0/device/debug_level
111 *
112 * when CONFIG_IWLWIFI_LEGACY_DEBUG=y.
113 */
114
115/* 0x0000000F - 0x00000001 */
116#define IWL_DL_INFO (1 << 0)
117#define IWL_DL_MAC80211 (1 << 1)
118#define IWL_DL_HCMD (1 << 2)
119#define IWL_DL_STATE (1 << 3)
120/* 0x000000F0 - 0x00000010 */
121#define IWL_DL_MACDUMP (1 << 4)
122#define IWL_DL_HCMD_DUMP (1 << 5)
123#define IWL_DL_EEPROM (1 << 6)
124#define IWL_DL_RADIO (1 << 7)
125/* 0x00000F00 - 0x00000100 */
126#define IWL_DL_POWER (1 << 8)
127#define IWL_DL_TEMP (1 << 9)
128#define IWL_DL_NOTIF (1 << 10)
129#define IWL_DL_SCAN (1 << 11)
130/* 0x0000F000 - 0x00001000 */
131#define IWL_DL_ASSOC (1 << 12)
132#define IWL_DL_DROP (1 << 13)
133#define IWL_DL_TXPOWER (1 << 14)
134#define IWL_DL_AP (1 << 15)
135/* 0x000F0000 - 0x00010000 */
136#define IWL_DL_FW (1 << 16)
137#define IWL_DL_RF_KILL (1 << 17)
138#define IWL_DL_FW_ERRORS (1 << 18)
139#define IWL_DL_LED (1 << 19)
140/* 0x00F00000 - 0x00100000 */
141#define IWL_DL_RATE (1 << 20)
142#define IWL_DL_CALIB (1 << 21)
143#define IWL_DL_WEP (1 << 22)
144#define IWL_DL_TX (1 << 23)
145/* 0x0F000000 - 0x01000000 */
146#define IWL_DL_RX (1 << 24)
147#define IWL_DL_ISR (1 << 25)
148#define IWL_DL_HT (1 << 26)
149#define IWL_DL_IO (1 << 27)
150/* 0xF0000000 - 0x10000000 */
151#define IWL_DL_11H (1 << 28)
152#define IWL_DL_STATS (1 << 29)
153#define IWL_DL_TX_REPLY (1 << 30)
154#define IWL_DL_QOS (1 << 31)
155
156#define IWL_DEBUG_INFO(p, f, a...) IWL_DEBUG(p, IWL_DL_INFO, f, ## a)
157#define IWL_DEBUG_MAC80211(p, f, a...) IWL_DEBUG(p, IWL_DL_MAC80211, f, ## a)
158#define IWL_DEBUG_MACDUMP(p, f, a...) IWL_DEBUG(p, IWL_DL_MACDUMP, f, ## a)
159#define IWL_DEBUG_TEMP(p, f, a...) IWL_DEBUG(p, IWL_DL_TEMP, f, ## a)
160#define IWL_DEBUG_SCAN(p, f, a...) IWL_DEBUG(p, IWL_DL_SCAN, f, ## a)
161#define IWL_DEBUG_RX(p, f, a...) IWL_DEBUG(p, IWL_DL_RX, f, ## a)
162#define IWL_DEBUG_TX(p, f, a...) IWL_DEBUG(p, IWL_DL_TX, f, ## a)
163#define IWL_DEBUG_ISR(p, f, a...) IWL_DEBUG(p, IWL_DL_ISR, f, ## a)
164#define IWL_DEBUG_LED(p, f, a...) IWL_DEBUG(p, IWL_DL_LED, f, ## a)
165#define IWL_DEBUG_WEP(p, f, a...) IWL_DEBUG(p, IWL_DL_WEP, f, ## a)
166#define IWL_DEBUG_HC(p, f, a...) IWL_DEBUG(p, IWL_DL_HCMD, f, ## a)
167#define IWL_DEBUG_HC_DUMP(p, f, a...) IWL_DEBUG(p, IWL_DL_HCMD_DUMP, f, ## a)
168#define IWL_DEBUG_EEPROM(p, f, a...) IWL_DEBUG(p, IWL_DL_EEPROM, f, ## a)
169#define IWL_DEBUG_CALIB(p, f, a...) IWL_DEBUG(p, IWL_DL_CALIB, f, ## a)
170#define IWL_DEBUG_FW(p, f, a...) IWL_DEBUG(p, IWL_DL_FW, f, ## a)
171#define IWL_DEBUG_RF_KILL(p, f, a...) IWL_DEBUG(p, IWL_DL_RF_KILL, f, ## a)
172#define IWL_DEBUG_DROP(p, f, a...) IWL_DEBUG(p, IWL_DL_DROP, f, ## a)
173#define IWL_DEBUG_DROP_LIMIT(p, f, a...) \
174 IWL_DEBUG_LIMIT(p, IWL_DL_DROP, f, ## a)
175#define IWL_DEBUG_AP(p, f, a...) IWL_DEBUG(p, IWL_DL_AP, f, ## a)
176#define IWL_DEBUG_TXPOWER(p, f, a...) IWL_DEBUG(p, IWL_DL_TXPOWER, f, ## a)
177#define IWL_DEBUG_IO(p, f, a...) IWL_DEBUG(p, IWL_DL_IO, f, ## a)
178#define IWL_DEBUG_RATE(p, f, a...) IWL_DEBUG(p, IWL_DL_RATE, f, ## a)
179#define IWL_DEBUG_RATE_LIMIT(p, f, a...) \
180 IWL_DEBUG_LIMIT(p, IWL_DL_RATE, f, ## a)
181#define IWL_DEBUG_NOTIF(p, f, a...) IWL_DEBUG(p, IWL_DL_NOTIF, f, ## a)
182#define IWL_DEBUG_ASSOC(p, f, a...) \
183 IWL_DEBUG(p, IWL_DL_ASSOC | IWL_DL_INFO, f, ## a)
184#define IWL_DEBUG_ASSOC_LIMIT(p, f, a...) \
185 IWL_DEBUG_LIMIT(p, IWL_DL_ASSOC | IWL_DL_INFO, f, ## a)
186#define IWL_DEBUG_HT(p, f, a...) IWL_DEBUG(p, IWL_DL_HT, f, ## a)
187#define IWL_DEBUG_STATS(p, f, a...) IWL_DEBUG(p, IWL_DL_STATS, f, ## a)
188#define IWL_DEBUG_STATS_LIMIT(p, f, a...) \
189 IWL_DEBUG_LIMIT(p, IWL_DL_STATS, f, ## a)
190#define IWL_DEBUG_TX_REPLY(p, f, a...) IWL_DEBUG(p, IWL_DL_TX_REPLY, f, ## a)
191#define IWL_DEBUG_TX_REPLY_LIMIT(p, f, a...) \
192 IWL_DEBUG_LIMIT(p, IWL_DL_TX_REPLY, f, ## a)
193#define IWL_DEBUG_QOS(p, f, a...) IWL_DEBUG(p, IWL_DL_QOS, f, ## a)
194#define IWL_DEBUG_RADIO(p, f, a...) IWL_DEBUG(p, IWL_DL_RADIO, f, ## a)
195#define IWL_DEBUG_POWER(p, f, a...) IWL_DEBUG(p, IWL_DL_POWER, f, ## a)
196#define IWL_DEBUG_11H(p, f, a...) IWL_DEBUG(p, IWL_DL_11H, f, ## a)
197
198#endif
diff --git a/drivers/net/wireless/iwlegacy/iwl-debugfs.c b/drivers/net/wireless/iwlegacy/iwl-debugfs.c
new file mode 100644
index 000000000000..2d32438b4cb8
--- /dev/null
+++ b/drivers/net/wireless/iwlegacy/iwl-debugfs.c
@@ -0,0 +1,1467 @@
1/******************************************************************************
2 *
3 * GPL LICENSE SUMMARY
4 *
5 * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of version 2 of the GNU General Public License as
9 * published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful, but
12 * WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
19 * USA
20 *
21 * The full GNU General Public License is included in this distribution
22 * in the file called LICENSE.GPL.
23 *
24 * Contact Information:
25 * Intel Linux Wireless <ilw@linux.intel.com>
26 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
27 *****************************************************************************/
28#include <linux/ieee80211.h>
29#include <net/mac80211.h>
30
31
32#include "iwl-dev.h"
33#include "iwl-debug.h"
34#include "iwl-core.h"
35#include "iwl-io.h"
36
37/* create and remove of files */
38#define DEBUGFS_ADD_FILE(name, parent, mode) do { \
39 if (!debugfs_create_file(#name, mode, parent, priv, \
40 &iwl_legacy_dbgfs_##name##_ops)) \
41 goto err; \
42} while (0)
43
44#define DEBUGFS_ADD_BOOL(name, parent, ptr) do { \
45 struct dentry *__tmp; \
46 __tmp = debugfs_create_bool(#name, S_IWUSR | S_IRUSR, \
47 parent, ptr); \
48 if (IS_ERR(__tmp) || !__tmp) \
49 goto err; \
50} while (0)
51
52#define DEBUGFS_ADD_X32(name, parent, ptr) do { \
53 struct dentry *__tmp; \
54 __tmp = debugfs_create_x32(#name, S_IWUSR | S_IRUSR, \
55 parent, ptr); \
56 if (IS_ERR(__tmp) || !__tmp) \
57 goto err; \
58} while (0)
59
60/* file operation */
61#define DEBUGFS_READ_FUNC(name) \
62static ssize_t iwl_legacy_dbgfs_##name##_read(struct file *file, \
63 char __user *user_buf, \
64 size_t count, loff_t *ppos);
65
66#define DEBUGFS_WRITE_FUNC(name) \
67static ssize_t iwl_legacy_dbgfs_##name##_write(struct file *file, \
68 const char __user *user_buf, \
69 size_t count, loff_t *ppos);
70
71
72static int
73iwl_legacy_dbgfs_open_file_generic(struct inode *inode, struct file *file)
74{
75 file->private_data = inode->i_private;
76 return 0;
77}
78
79#define DEBUGFS_READ_FILE_OPS(name) \
80 DEBUGFS_READ_FUNC(name); \
81static const struct file_operations iwl_legacy_dbgfs_##name##_ops = { \
82 .read = iwl_legacy_dbgfs_##name##_read, \
83 .open = iwl_legacy_dbgfs_open_file_generic, \
84 .llseek = generic_file_llseek, \
85};
86
87#define DEBUGFS_WRITE_FILE_OPS(name) \
88 DEBUGFS_WRITE_FUNC(name); \
89static const struct file_operations iwl_legacy_dbgfs_##name##_ops = { \
90 .write = iwl_legacy_dbgfs_##name##_write, \
91 .open = iwl_legacy_dbgfs_open_file_generic, \
92 .llseek = generic_file_llseek, \
93};
94
95#define DEBUGFS_READ_WRITE_FILE_OPS(name) \
96 DEBUGFS_READ_FUNC(name); \
97 DEBUGFS_WRITE_FUNC(name); \
98static const struct file_operations iwl_legacy_dbgfs_##name##_ops = { \
99 .write = iwl_legacy_dbgfs_##name##_write, \
100 .read = iwl_legacy_dbgfs_##name##_read, \
101 .open = iwl_legacy_dbgfs_open_file_generic, \
102 .llseek = generic_file_llseek, \
103};
104
105static ssize_t iwl_legacy_dbgfs_tx_statistics_read(struct file *file,
106 char __user *user_buf,
107 size_t count, loff_t *ppos) {
108
109 struct iwl_priv *priv = file->private_data;
110 char *buf;
111 int pos = 0;
112
113 int cnt;
114 ssize_t ret;
115 const size_t bufsz = 100 +
116 sizeof(char) * 50 * (MANAGEMENT_MAX + CONTROL_MAX);
117 buf = kzalloc(bufsz, GFP_KERNEL);
118 if (!buf)
119 return -ENOMEM;
120 pos += scnprintf(buf + pos, bufsz - pos, "Management:\n");
121 for (cnt = 0; cnt < MANAGEMENT_MAX; cnt++) {
122 pos += scnprintf(buf + pos, bufsz - pos,
123 "\t%25s\t\t: %u\n",
124 iwl_legacy_get_mgmt_string(cnt),
125 priv->tx_stats.mgmt[cnt]);
126 }
127 pos += scnprintf(buf + pos, bufsz - pos, "Control\n");
128 for (cnt = 0; cnt < CONTROL_MAX; cnt++) {
129 pos += scnprintf(buf + pos, bufsz - pos,
130 "\t%25s\t\t: %u\n",
131 iwl_legacy_get_ctrl_string(cnt),
132 priv->tx_stats.ctrl[cnt]);
133 }
134 pos += scnprintf(buf + pos, bufsz - pos, "Data:\n");
135 pos += scnprintf(buf + pos, bufsz - pos, "\tcnt: %u\n",
136 priv->tx_stats.data_cnt);
137 pos += scnprintf(buf + pos, bufsz - pos, "\tbytes: %llu\n",
138 priv->tx_stats.data_bytes);
139 ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
140 kfree(buf);
141 return ret;
142}
143
144static ssize_t
145iwl_legacy_dbgfs_clear_traffic_statistics_write(struct file *file,
146 const char __user *user_buf,
147 size_t count, loff_t *ppos)
148{
149 struct iwl_priv *priv = file->private_data;
150 u32 clear_flag;
151 char buf[8];
152 int buf_size;
153
154 memset(buf, 0, sizeof(buf));
155 buf_size = min(count, sizeof(buf) - 1);
156 if (copy_from_user(buf, user_buf, buf_size))
157 return -EFAULT;
158 if (sscanf(buf, "%x", &clear_flag) != 1)
159 return -EFAULT;
160 iwl_legacy_clear_traffic_stats(priv);
161
162 return count;
163}
164
165static ssize_t iwl_legacy_dbgfs_rx_statistics_read(struct file *file,
166 char __user *user_buf,
167 size_t count, loff_t *ppos) {
168
169 struct iwl_priv *priv = file->private_data;
170 char *buf;
171 int pos = 0;
172 int cnt;
173 ssize_t ret;
174 const size_t bufsz = 100 +
175 sizeof(char) * 50 * (MANAGEMENT_MAX + CONTROL_MAX);
176 buf = kzalloc(bufsz, GFP_KERNEL);
177 if (!buf)
178 return -ENOMEM;
179
180 pos += scnprintf(buf + pos, bufsz - pos, "Management:\n");
181 for (cnt = 0; cnt < MANAGEMENT_MAX; cnt++) {
182 pos += scnprintf(buf + pos, bufsz - pos,
183 "\t%25s\t\t: %u\n",
184 iwl_legacy_get_mgmt_string(cnt),
185 priv->rx_stats.mgmt[cnt]);
186 }
187 pos += scnprintf(buf + pos, bufsz - pos, "Control:\n");
188 for (cnt = 0; cnt < CONTROL_MAX; cnt++) {
189 pos += scnprintf(buf + pos, bufsz - pos,
190 "\t%25s\t\t: %u\n",
191 iwl_legacy_get_ctrl_string(cnt),
192 priv->rx_stats.ctrl[cnt]);
193 }
194 pos += scnprintf(buf + pos, bufsz - pos, "Data:\n");
195 pos += scnprintf(buf + pos, bufsz - pos, "\tcnt: %u\n",
196 priv->rx_stats.data_cnt);
197 pos += scnprintf(buf + pos, bufsz - pos, "\tbytes: %llu\n",
198 priv->rx_stats.data_bytes);
199
200 ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
201 kfree(buf);
202 return ret;
203}
204
205#define BYTE1_MASK 0x000000ff;
206#define BYTE2_MASK 0x0000ffff;
207#define BYTE3_MASK 0x00ffffff;
208static ssize_t iwl_legacy_dbgfs_sram_read(struct file *file,
209 char __user *user_buf,
210 size_t count, loff_t *ppos)
211{
212 u32 val;
213 char *buf;
214 ssize_t ret;
215 int i;
216 int pos = 0;
217 struct iwl_priv *priv = file->private_data;
218 size_t bufsz;
219
220 /* default is to dump the entire data segment */
221 if (!priv->dbgfs_sram_offset && !priv->dbgfs_sram_len) {
222 priv->dbgfs_sram_offset = 0x800000;
223 if (priv->ucode_type == UCODE_INIT)
224 priv->dbgfs_sram_len = priv->ucode_init_data.len;
225 else
226 priv->dbgfs_sram_len = priv->ucode_data.len;
227 }
228 bufsz = 30 + priv->dbgfs_sram_len * sizeof(char) * 10;
229 buf = kmalloc(bufsz, GFP_KERNEL);
230 if (!buf)
231 return -ENOMEM;
232 pos += scnprintf(buf + pos, bufsz - pos, "sram_len: 0x%x\n",
233 priv->dbgfs_sram_len);
234 pos += scnprintf(buf + pos, bufsz - pos, "sram_offset: 0x%x\n",
235 priv->dbgfs_sram_offset);
236 for (i = priv->dbgfs_sram_len; i > 0; i -= 4) {
237 val = iwl_legacy_read_targ_mem(priv, priv->dbgfs_sram_offset + \
238 priv->dbgfs_sram_len - i);
239 if (i < 4) {
240 switch (i) {
241 case 1:
242 val &= BYTE1_MASK;
243 break;
244 case 2:
245 val &= BYTE2_MASK;
246 break;
247 case 3:
248 val &= BYTE3_MASK;
249 break;
250 }
251 }
252 if (!(i % 16))
253 pos += scnprintf(buf + pos, bufsz - pos, "\n");
254 pos += scnprintf(buf + pos, bufsz - pos, "0x%08x ", val);
255 }
256 pos += scnprintf(buf + pos, bufsz - pos, "\n");
257
258 ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
259 kfree(buf);
260 return ret;
261}
262
263static ssize_t iwl_legacy_dbgfs_sram_write(struct file *file,
264 const char __user *user_buf,
265 size_t count, loff_t *ppos)
266{
267 struct iwl_priv *priv = file->private_data;
268 char buf[64];
269 int buf_size;
270 u32 offset, len;
271
272 memset(buf, 0, sizeof(buf));
273 buf_size = min(count, sizeof(buf) - 1);
274 if (copy_from_user(buf, user_buf, buf_size))
275 return -EFAULT;
276
277 if (sscanf(buf, "%x,%x", &offset, &len) == 2) {
278 priv->dbgfs_sram_offset = offset;
279 priv->dbgfs_sram_len = len;
280 } else {
281 priv->dbgfs_sram_offset = 0;
282 priv->dbgfs_sram_len = 0;
283 }
284
285 return count;
286}
287
288static ssize_t
289iwl_legacy_dbgfs_stations_read(struct file *file, char __user *user_buf,
290 size_t count, loff_t *ppos)
291{
292 struct iwl_priv *priv = file->private_data;
293 struct iwl_station_entry *station;
294 int max_sta = priv->hw_params.max_stations;
295 char *buf;
296 int i, j, pos = 0;
297 ssize_t ret;
298 /* Add 30 for initial string */
299 const size_t bufsz = 30 + sizeof(char) * 500 * (priv->num_stations);
300
301 buf = kmalloc(bufsz, GFP_KERNEL);
302 if (!buf)
303 return -ENOMEM;
304
305 pos += scnprintf(buf + pos, bufsz - pos, "num of stations: %d\n\n",
306 priv->num_stations);
307
308 for (i = 0; i < max_sta; i++) {
309 station = &priv->stations[i];
310 if (!station->used)
311 continue;
312 pos += scnprintf(buf + pos, bufsz - pos,
313 "station %d - addr: %pM, flags: %#x\n",
314 i, station->sta.sta.addr,
315 station->sta.station_flags_msk);
316 pos += scnprintf(buf + pos, bufsz - pos,
317 "TID\tseq_num\ttxq_id\tframes\ttfds\t");
318 pos += scnprintf(buf + pos, bufsz - pos,
319 "start_idx\tbitmap\t\t\trate_n_flags\n");
320
321 for (j = 0; j < MAX_TID_COUNT; j++) {
322 pos += scnprintf(buf + pos, bufsz - pos,
323 "%d:\t%#x\t%#x\t%u\t%u\t%u\t\t%#.16llx\t%#x",
324 j, station->tid[j].seq_number,
325 station->tid[j].agg.txq_id,
326 station->tid[j].agg.frame_count,
327 station->tid[j].tfds_in_queue,
328 station->tid[j].agg.start_idx,
329 station->tid[j].agg.bitmap,
330 station->tid[j].agg.rate_n_flags);
331
332 if (station->tid[j].agg.wait_for_ba)
333 pos += scnprintf(buf + pos, bufsz - pos,
334 " - waitforba");
335 pos += scnprintf(buf + pos, bufsz - pos, "\n");
336 }
337
338 pos += scnprintf(buf + pos, bufsz - pos, "\n");
339 }
340
341 ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
342 kfree(buf);
343 return ret;
344}
345
346static ssize_t iwl_legacy_dbgfs_nvm_read(struct file *file,
347 char __user *user_buf,
348 size_t count,
349 loff_t *ppos)
350{
351 ssize_t ret;
352 struct iwl_priv *priv = file->private_data;
353 int pos = 0, ofs = 0, buf_size = 0;
354 const u8 *ptr;
355 char *buf;
356 u16 eeprom_ver;
357 size_t eeprom_len = priv->cfg->base_params->eeprom_size;
358 buf_size = 4 * eeprom_len + 256;
359
360 if (eeprom_len % 16) {
361 IWL_ERR(priv, "NVM size is not multiple of 16.\n");
362 return -ENODATA;
363 }
364
365 ptr = priv->eeprom;
366 if (!ptr) {
367 IWL_ERR(priv, "Invalid EEPROM memory\n");
368 return -ENOMEM;
369 }
370
371 /* 4 characters for byte 0xYY */
372 buf = kzalloc(buf_size, GFP_KERNEL);
373 if (!buf) {
374 IWL_ERR(priv, "Can not allocate Buffer\n");
375 return -ENOMEM;
376 }
377 eeprom_ver = iwl_legacy_eeprom_query16(priv, EEPROM_VERSION);
378 pos += scnprintf(buf + pos, buf_size - pos, "EEPROM "
379 "version: 0x%x\n", eeprom_ver);
380 for (ofs = 0 ; ofs < eeprom_len ; ofs += 16) {
381 pos += scnprintf(buf + pos, buf_size - pos, "0x%.4x ", ofs);
382 hex_dump_to_buffer(ptr + ofs, 16 , 16, 2, buf + pos,
383 buf_size - pos, 0);
384 pos += strlen(buf + pos);
385 if (buf_size - pos > 0)
386 buf[pos++] = '\n';
387 }
388
389 ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
390 kfree(buf);
391 return ret;
392}
393
394static ssize_t iwl_legacy_dbgfs_log_event_read(struct file *file,
395 char __user *user_buf,
396 size_t count, loff_t *ppos)
397{
398 struct iwl_priv *priv = file->private_data;
399 char *buf;
400 int pos = 0;
401 ssize_t ret = -ENOMEM;
402
403 ret = pos = priv->cfg->ops->lib->dump_nic_event_log(
404 priv, true, &buf, true);
405 if (buf) {
406 ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
407 kfree(buf);
408 }
409 return ret;
410}
411
412static ssize_t iwl_legacy_dbgfs_log_event_write(struct file *file,
413 const char __user *user_buf,
414 size_t count, loff_t *ppos)
415{
416 struct iwl_priv *priv = file->private_data;
417 u32 event_log_flag;
418 char buf[8];
419 int buf_size;
420
421 memset(buf, 0, sizeof(buf));
422 buf_size = min(count, sizeof(buf) - 1);
423 if (copy_from_user(buf, user_buf, buf_size))
424 return -EFAULT;
425 if (sscanf(buf, "%d", &event_log_flag) != 1)
426 return -EFAULT;
427 if (event_log_flag == 1)
428 priv->cfg->ops->lib->dump_nic_event_log(priv, true,
429 NULL, false);
430
431 return count;
432}
433
434
435
436static ssize_t
437iwl_legacy_dbgfs_channels_read(struct file *file, char __user *user_buf,
438 size_t count, loff_t *ppos)
439{
440 struct iwl_priv *priv = file->private_data;
441 struct ieee80211_channel *channels = NULL;
442 const struct ieee80211_supported_band *supp_band = NULL;
443 int pos = 0, i, bufsz = PAGE_SIZE;
444 char *buf;
445 ssize_t ret;
446
447 if (!test_bit(STATUS_GEO_CONFIGURED, &priv->status))
448 return -EAGAIN;
449
450 buf = kzalloc(bufsz, GFP_KERNEL);
451 if (!buf) {
452 IWL_ERR(priv, "Can not allocate Buffer\n");
453 return -ENOMEM;
454 }
455
456 supp_band = iwl_get_hw_mode(priv, IEEE80211_BAND_2GHZ);
457 if (supp_band) {
458 channels = supp_band->channels;
459
460 pos += scnprintf(buf + pos, bufsz - pos,
461 "Displaying %d channels in 2.4GHz band 802.11bg):\n",
462 supp_band->n_channels);
463
464 for (i = 0; i < supp_band->n_channels; i++)
465 pos += scnprintf(buf + pos, bufsz - pos,
466 "%d: %ddBm: BSS%s%s, %s.\n",
467 channels[i].hw_value,
468 channels[i].max_power,
469 channels[i].flags & IEEE80211_CHAN_RADAR ?
470 " (IEEE 802.11h required)" : "",
471 ((channels[i].flags & IEEE80211_CHAN_NO_IBSS)
472 || (channels[i].flags &
473 IEEE80211_CHAN_RADAR)) ? "" :
474 ", IBSS",
475 channels[i].flags &
476 IEEE80211_CHAN_PASSIVE_SCAN ?
477 "passive only" : "active/passive");
478 }
479 supp_band = iwl_get_hw_mode(priv, IEEE80211_BAND_5GHZ);
480 if (supp_band) {
481 channels = supp_band->channels;
482
483 pos += scnprintf(buf + pos, bufsz - pos,
484 "Displaying %d channels in 5.2GHz band (802.11a)\n",
485 supp_band->n_channels);
486
487 for (i = 0; i < supp_band->n_channels; i++)
488 pos += scnprintf(buf + pos, bufsz - pos,
489 "%d: %ddBm: BSS%s%s, %s.\n",
490 channels[i].hw_value,
491 channels[i].max_power,
492 channels[i].flags & IEEE80211_CHAN_RADAR ?
493 " (IEEE 802.11h required)" : "",
494 ((channels[i].flags & IEEE80211_CHAN_NO_IBSS)
495 || (channels[i].flags &
496 IEEE80211_CHAN_RADAR)) ? "" :
497 ", IBSS",
498 channels[i].flags &
499 IEEE80211_CHAN_PASSIVE_SCAN ?
500 "passive only" : "active/passive");
501 }
502 ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
503 kfree(buf);
504 return ret;
505}
506
507static ssize_t iwl_legacy_dbgfs_status_read(struct file *file,
508 char __user *user_buf,
509 size_t count, loff_t *ppos) {
510
511 struct iwl_priv *priv = file->private_data;
512 char buf[512];
513 int pos = 0;
514 const size_t bufsz = sizeof(buf);
515
516 pos += scnprintf(buf + pos, bufsz - pos, "STATUS_HCMD_ACTIVE:\t %d\n",
517 test_bit(STATUS_HCMD_ACTIVE, &priv->status));
518 pos += scnprintf(buf + pos, bufsz - pos, "STATUS_INT_ENABLED:\t %d\n",
519 test_bit(STATUS_INT_ENABLED, &priv->status));
520 pos += scnprintf(buf + pos, bufsz - pos, "STATUS_RF_KILL_HW:\t %d\n",
521 test_bit(STATUS_RF_KILL_HW, &priv->status));
522 pos += scnprintf(buf + pos, bufsz - pos, "STATUS_CT_KILL:\t\t %d\n",
523 test_bit(STATUS_CT_KILL, &priv->status));
524 pos += scnprintf(buf + pos, bufsz - pos, "STATUS_INIT:\t\t %d\n",
525 test_bit(STATUS_INIT, &priv->status));
526 pos += scnprintf(buf + pos, bufsz - pos, "STATUS_ALIVE:\t\t %d\n",
527 test_bit(STATUS_ALIVE, &priv->status));
528 pos += scnprintf(buf + pos, bufsz - pos, "STATUS_READY:\t\t %d\n",
529 test_bit(STATUS_READY, &priv->status));
530 pos += scnprintf(buf + pos, bufsz - pos, "STATUS_TEMPERATURE:\t %d\n",
531 test_bit(STATUS_TEMPERATURE, &priv->status));
532 pos += scnprintf(buf + pos, bufsz - pos, "STATUS_GEO_CONFIGURED:\t %d\n",
533 test_bit(STATUS_GEO_CONFIGURED, &priv->status));
534 pos += scnprintf(buf + pos, bufsz - pos, "STATUS_EXIT_PENDING:\t %d\n",
535 test_bit(STATUS_EXIT_PENDING, &priv->status));
536 pos += scnprintf(buf + pos, bufsz - pos, "STATUS_STATISTICS:\t %d\n",
537 test_bit(STATUS_STATISTICS, &priv->status));
538 pos += scnprintf(buf + pos, bufsz - pos, "STATUS_SCANNING:\t %d\n",
539 test_bit(STATUS_SCANNING, &priv->status));
540 pos += scnprintf(buf + pos, bufsz - pos, "STATUS_SCAN_ABORTING:\t %d\n",
541 test_bit(STATUS_SCAN_ABORTING, &priv->status));
542 pos += scnprintf(buf + pos, bufsz - pos, "STATUS_SCAN_HW:\t\t %d\n",
543 test_bit(STATUS_SCAN_HW, &priv->status));
544 pos += scnprintf(buf + pos, bufsz - pos, "STATUS_POWER_PMI:\t %d\n",
545 test_bit(STATUS_POWER_PMI, &priv->status));
546 pos += scnprintf(buf + pos, bufsz - pos, "STATUS_FW_ERROR:\t %d\n",
547 test_bit(STATUS_FW_ERROR, &priv->status));
548 return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
549}
550
551static ssize_t iwl_legacy_dbgfs_interrupt_read(struct file *file,
552 char __user *user_buf,
553 size_t count, loff_t *ppos) {
554
555 struct iwl_priv *priv = file->private_data;
556 int pos = 0;
557 int cnt = 0;
558 char *buf;
559 int bufsz = 24 * 64; /* 24 items * 64 char per item */
560 ssize_t ret;
561
562 buf = kzalloc(bufsz, GFP_KERNEL);
563 if (!buf) {
564 IWL_ERR(priv, "Can not allocate Buffer\n");
565 return -ENOMEM;
566 }
567
568 pos += scnprintf(buf + pos, bufsz - pos,
569 "Interrupt Statistics Report:\n");
570
571 pos += scnprintf(buf + pos, bufsz - pos, "HW Error:\t\t\t %u\n",
572 priv->isr_stats.hw);
573 pos += scnprintf(buf + pos, bufsz - pos, "SW Error:\t\t\t %u\n",
574 priv->isr_stats.sw);
575 if (priv->isr_stats.sw || priv->isr_stats.hw) {
576 pos += scnprintf(buf + pos, bufsz - pos,
577 "\tLast Restarting Code: 0x%X\n",
578 priv->isr_stats.err_code);
579 }
580#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
581 pos += scnprintf(buf + pos, bufsz - pos, "Frame transmitted:\t\t %u\n",
582 priv->isr_stats.sch);
583 pos += scnprintf(buf + pos, bufsz - pos, "Alive interrupt:\t\t %u\n",
584 priv->isr_stats.alive);
585#endif
586 pos += scnprintf(buf + pos, bufsz - pos,
587 "HW RF KILL switch toggled:\t %u\n",
588 priv->isr_stats.rfkill);
589
590 pos += scnprintf(buf + pos, bufsz - pos, "CT KILL:\t\t\t %u\n",
591 priv->isr_stats.ctkill);
592
593 pos += scnprintf(buf + pos, bufsz - pos, "Wakeup Interrupt:\t\t %u\n",
594 priv->isr_stats.wakeup);
595
596 pos += scnprintf(buf + pos, bufsz - pos,
597 "Rx command responses:\t\t %u\n",
598 priv->isr_stats.rx);
599 for (cnt = 0; cnt < REPLY_MAX; cnt++) {
600 if (priv->isr_stats.rx_handlers[cnt] > 0)
601 pos += scnprintf(buf + pos, bufsz - pos,
602 "\tRx handler[%36s]:\t\t %u\n",
603 iwl_legacy_get_cmd_string(cnt),
604 priv->isr_stats.rx_handlers[cnt]);
605 }
606
607 pos += scnprintf(buf + pos, bufsz - pos, "Tx/FH interrupt:\t\t %u\n",
608 priv->isr_stats.tx);
609
610 pos += scnprintf(buf + pos, bufsz - pos, "Unexpected INTA:\t\t %u\n",
611 priv->isr_stats.unhandled);
612
613 ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
614 kfree(buf);
615 return ret;
616}
617
618static ssize_t iwl_legacy_dbgfs_interrupt_write(struct file *file,
619 const char __user *user_buf,
620 size_t count, loff_t *ppos)
621{
622 struct iwl_priv *priv = file->private_data;
623 char buf[8];
624 int buf_size;
625 u32 reset_flag;
626
627 memset(buf, 0, sizeof(buf));
628 buf_size = min(count, sizeof(buf) - 1);
629 if (copy_from_user(buf, user_buf, buf_size))
630 return -EFAULT;
631 if (sscanf(buf, "%x", &reset_flag) != 1)
632 return -EFAULT;
633 if (reset_flag == 0)
634 iwl_legacy_clear_isr_stats(priv);
635
636 return count;
637}
638
639static ssize_t
640iwl_legacy_dbgfs_qos_read(struct file *file, char __user *user_buf,
641 size_t count, loff_t *ppos)
642{
643 struct iwl_priv *priv = file->private_data;
644 struct iwl_rxon_context *ctx;
645 int pos = 0, i;
646 char buf[256 * NUM_IWL_RXON_CTX];
647 const size_t bufsz = sizeof(buf);
648
649 for_each_context(priv, ctx) {
650 pos += scnprintf(buf + pos, bufsz - pos, "context %d:\n",
651 ctx->ctxid);
652 for (i = 0; i < AC_NUM; i++) {
653 pos += scnprintf(buf + pos, bufsz - pos,
654 "\tcw_min\tcw_max\taifsn\ttxop\n");
655 pos += scnprintf(buf + pos, bufsz - pos,
656 "AC[%d]\t%u\t%u\t%u\t%u\n", i,
657 ctx->qos_data.def_qos_parm.ac[i].cw_min,
658 ctx->qos_data.def_qos_parm.ac[i].cw_max,
659 ctx->qos_data.def_qos_parm.ac[i].aifsn,
660 ctx->qos_data.def_qos_parm.ac[i].edca_txop);
661 }
662 pos += scnprintf(buf + pos, bufsz - pos, "\n");
663 }
664 return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
665}
666
667static ssize_t iwl_legacy_dbgfs_disable_ht40_write(struct file *file,
668 const char __user *user_buf,
669 size_t count, loff_t *ppos)
670{
671 struct iwl_priv *priv = file->private_data;
672 char buf[8];
673 int buf_size;
674 int ht40;
675
676 memset(buf, 0, sizeof(buf));
677 buf_size = min(count, sizeof(buf) - 1);
678 if (copy_from_user(buf, user_buf, buf_size))
679 return -EFAULT;
680 if (sscanf(buf, "%d", &ht40) != 1)
681 return -EFAULT;
682 if (!iwl_legacy_is_any_associated(priv))
683 priv->disable_ht40 = ht40 ? true : false;
684 else {
685 IWL_ERR(priv, "Sta associated with AP - "
686 "Change to 40MHz channel support is not allowed\n");
687 return -EINVAL;
688 }
689
690 return count;
691}
692
693static ssize_t iwl_legacy_dbgfs_disable_ht40_read(struct file *file,
694 char __user *user_buf,
695 size_t count, loff_t *ppos)
696{
697 struct iwl_priv *priv = file->private_data;
698 char buf[100];
699 int pos = 0;
700 const size_t bufsz = sizeof(buf);
701
702 pos += scnprintf(buf + pos, bufsz - pos,
703 "11n 40MHz Mode: %s\n",
704 priv->disable_ht40 ? "Disabled" : "Enabled");
705 return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
706}
707
708DEBUGFS_READ_WRITE_FILE_OPS(sram);
709DEBUGFS_READ_WRITE_FILE_OPS(log_event);
710DEBUGFS_READ_FILE_OPS(nvm);
711DEBUGFS_READ_FILE_OPS(stations);
712DEBUGFS_READ_FILE_OPS(channels);
713DEBUGFS_READ_FILE_OPS(status);
714DEBUGFS_READ_WRITE_FILE_OPS(interrupt);
715DEBUGFS_READ_FILE_OPS(qos);
716DEBUGFS_READ_WRITE_FILE_OPS(disable_ht40);
717
718static ssize_t iwl_legacy_dbgfs_traffic_log_read(struct file *file,
719 char __user *user_buf,
720 size_t count, loff_t *ppos)
721{
722 struct iwl_priv *priv = file->private_data;
723 int pos = 0, ofs = 0;
724 int cnt = 0, entry;
725 struct iwl_tx_queue *txq;
726 struct iwl_queue *q;
727 struct iwl_rx_queue *rxq = &priv->rxq;
728 char *buf;
729 int bufsz = ((IWL_TRAFFIC_ENTRIES * IWL_TRAFFIC_ENTRY_SIZE * 64) * 2) +
730 (priv->cfg->base_params->num_of_queues * 32 * 8) + 400;
731 const u8 *ptr;
732 ssize_t ret;
733
734 if (!priv->txq) {
735 IWL_ERR(priv, "txq not ready\n");
736 return -EAGAIN;
737 }
738 buf = kzalloc(bufsz, GFP_KERNEL);
739 if (!buf) {
740 IWL_ERR(priv, "Can not allocate buffer\n");
741 return -ENOMEM;
742 }
743 pos += scnprintf(buf + pos, bufsz - pos, "Tx Queue\n");
744 for (cnt = 0; cnt < priv->hw_params.max_txq_num; cnt++) {
745 txq = &priv->txq[cnt];
746 q = &txq->q;
747 pos += scnprintf(buf + pos, bufsz - pos,
748 "q[%d]: read_ptr: %u, write_ptr: %u\n",
749 cnt, q->read_ptr, q->write_ptr);
750 }
751 if (priv->tx_traffic && (iwlegacy_debug_level & IWL_DL_TX)) {
752 ptr = priv->tx_traffic;
753 pos += scnprintf(buf + pos, bufsz - pos,
754 "Tx Traffic idx: %u\n", priv->tx_traffic_idx);
755 for (cnt = 0, ofs = 0; cnt < IWL_TRAFFIC_ENTRIES; cnt++) {
756 for (entry = 0; entry < IWL_TRAFFIC_ENTRY_SIZE / 16;
757 entry++, ofs += 16) {
758 pos += scnprintf(buf + pos, bufsz - pos,
759 "0x%.4x ", ofs);
760 hex_dump_to_buffer(ptr + ofs, 16, 16, 2,
761 buf + pos, bufsz - pos, 0);
762 pos += strlen(buf + pos);
763 if (bufsz - pos > 0)
764 buf[pos++] = '\n';
765 }
766 }
767 }
768
769 pos += scnprintf(buf + pos, bufsz - pos, "Rx Queue\n");
770 pos += scnprintf(buf + pos, bufsz - pos,
771 "read: %u, write: %u\n",
772 rxq->read, rxq->write);
773
774 if (priv->rx_traffic && (iwlegacy_debug_level & IWL_DL_RX)) {
775 ptr = priv->rx_traffic;
776 pos += scnprintf(buf + pos, bufsz - pos,
777 "Rx Traffic idx: %u\n", priv->rx_traffic_idx);
778 for (cnt = 0, ofs = 0; cnt < IWL_TRAFFIC_ENTRIES; cnt++) {
779 for (entry = 0; entry < IWL_TRAFFIC_ENTRY_SIZE / 16;
780 entry++, ofs += 16) {
781 pos += scnprintf(buf + pos, bufsz - pos,
782 "0x%.4x ", ofs);
783 hex_dump_to_buffer(ptr + ofs, 16, 16, 2,
784 buf + pos, bufsz - pos, 0);
785 pos += strlen(buf + pos);
786 if (bufsz - pos > 0)
787 buf[pos++] = '\n';
788 }
789 }
790 }
791
792 ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
793 kfree(buf);
794 return ret;
795}
796
797static ssize_t iwl_legacy_dbgfs_traffic_log_write(struct file *file,
798 const char __user *user_buf,
799 size_t count, loff_t *ppos)
800{
801 struct iwl_priv *priv = file->private_data;
802 char buf[8];
803 int buf_size;
804 int traffic_log;
805
806 memset(buf, 0, sizeof(buf));
807 buf_size = min(count, sizeof(buf) - 1);
808 if (copy_from_user(buf, user_buf, buf_size))
809 return -EFAULT;
810 if (sscanf(buf, "%d", &traffic_log) != 1)
811 return -EFAULT;
812 if (traffic_log == 0)
813 iwl_legacy_reset_traffic_log(priv);
814
815 return count;
816}
817
818static ssize_t iwl_legacy_dbgfs_tx_queue_read(struct file *file,
819 char __user *user_buf,
820 size_t count, loff_t *ppos) {
821
822 struct iwl_priv *priv = file->private_data;
823 struct iwl_tx_queue *txq;
824 struct iwl_queue *q;
825 char *buf;
826 int pos = 0;
827 int cnt;
828 int ret;
829 const size_t bufsz = sizeof(char) * 64 *
830 priv->cfg->base_params->num_of_queues;
831
832 if (!priv->txq) {
833 IWL_ERR(priv, "txq not ready\n");
834 return -EAGAIN;
835 }
836 buf = kzalloc(bufsz, GFP_KERNEL);
837 if (!buf)
838 return -ENOMEM;
839
840 for (cnt = 0; cnt < priv->hw_params.max_txq_num; cnt++) {
841 txq = &priv->txq[cnt];
842 q = &txq->q;
843 pos += scnprintf(buf + pos, bufsz - pos,
844 "hwq %.2d: read=%u write=%u stop=%d"
845 " swq_id=%#.2x (ac %d/hwq %d)\n",
846 cnt, q->read_ptr, q->write_ptr,
847 !!test_bit(cnt, priv->queue_stopped),
848 txq->swq_id, txq->swq_id & 3,
849 (txq->swq_id >> 2) & 0x1f);
850 if (cnt >= 4)
851 continue;
852 /* for the ACs, display the stop count too */
853 pos += scnprintf(buf + pos, bufsz - pos,
854 " stop-count: %d\n",
855 atomic_read(&priv->queue_stop_count[cnt]));
856 }
857 ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
858 kfree(buf);
859 return ret;
860}
861
862static ssize_t iwl_legacy_dbgfs_rx_queue_read(struct file *file,
863 char __user *user_buf,
864 size_t count, loff_t *ppos) {
865
866 struct iwl_priv *priv = file->private_data;
867 struct iwl_rx_queue *rxq = &priv->rxq;
868 char buf[256];
869 int pos = 0;
870 const size_t bufsz = sizeof(buf);
871
872 pos += scnprintf(buf + pos, bufsz - pos, "read: %u\n",
873 rxq->read);
874 pos += scnprintf(buf + pos, bufsz - pos, "write: %u\n",
875 rxq->write);
876 pos += scnprintf(buf + pos, bufsz - pos, "free_count: %u\n",
877 rxq->free_count);
878 if (rxq->rb_stts) {
879 pos += scnprintf(buf + pos, bufsz - pos, "closed_rb_num: %u\n",
880 le16_to_cpu(rxq->rb_stts->closed_rb_num) & 0x0FFF);
881 } else {
882 pos += scnprintf(buf + pos, bufsz - pos,
883 "closed_rb_num: Not Allocated\n");
884 }
885 return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
886}
887
888static ssize_t iwl_legacy_dbgfs_ucode_rx_stats_read(struct file *file,
889 char __user *user_buf,
890 size_t count, loff_t *ppos)
891{
892 struct iwl_priv *priv = file->private_data;
893 return priv->cfg->ops->lib->debugfs_ops.rx_stats_read(file,
894 user_buf, count, ppos);
895}
896
897static ssize_t iwl_legacy_dbgfs_ucode_tx_stats_read(struct file *file,
898 char __user *user_buf,
899 size_t count, loff_t *ppos)
900{
901 struct iwl_priv *priv = file->private_data;
902 return priv->cfg->ops->lib->debugfs_ops.tx_stats_read(file,
903 user_buf, count, ppos);
904}
905
906static ssize_t iwl_legacy_dbgfs_ucode_general_stats_read(struct file *file,
907 char __user *user_buf,
908 size_t count, loff_t *ppos)
909{
910 struct iwl_priv *priv = file->private_data;
911 return priv->cfg->ops->lib->debugfs_ops.general_stats_read(file,
912 user_buf, count, ppos);
913}
914
915static ssize_t iwl_legacy_dbgfs_sensitivity_read(struct file *file,
916 char __user *user_buf,
917 size_t count, loff_t *ppos) {
918
919 struct iwl_priv *priv = file->private_data;
920 int pos = 0;
921 int cnt = 0;
922 char *buf;
923 int bufsz = sizeof(struct iwl_sensitivity_data) * 4 + 100;
924 ssize_t ret;
925 struct iwl_sensitivity_data *data;
926
927 data = &priv->sensitivity_data;
928 buf = kzalloc(bufsz, GFP_KERNEL);
929 if (!buf) {
930 IWL_ERR(priv, "Can not allocate Buffer\n");
931 return -ENOMEM;
932 }
933
934 pos += scnprintf(buf + pos, bufsz - pos, "auto_corr_ofdm:\t\t\t %u\n",
935 data->auto_corr_ofdm);
936 pos += scnprintf(buf + pos, bufsz - pos,
937 "auto_corr_ofdm_mrc:\t\t %u\n",
938 data->auto_corr_ofdm_mrc);
939 pos += scnprintf(buf + pos, bufsz - pos, "auto_corr_ofdm_x1:\t\t %u\n",
940 data->auto_corr_ofdm_x1);
941 pos += scnprintf(buf + pos, bufsz - pos,
942 "auto_corr_ofdm_mrc_x1:\t\t %u\n",
943 data->auto_corr_ofdm_mrc_x1);
944 pos += scnprintf(buf + pos, bufsz - pos, "auto_corr_cck:\t\t\t %u\n",
945 data->auto_corr_cck);
946 pos += scnprintf(buf + pos, bufsz - pos, "auto_corr_cck_mrc:\t\t %u\n",
947 data->auto_corr_cck_mrc);
948 pos += scnprintf(buf + pos, bufsz - pos,
949 "last_bad_plcp_cnt_ofdm:\t\t %u\n",
950 data->last_bad_plcp_cnt_ofdm);
951 pos += scnprintf(buf + pos, bufsz - pos, "last_fa_cnt_ofdm:\t\t %u\n",
952 data->last_fa_cnt_ofdm);
953 pos += scnprintf(buf + pos, bufsz - pos,
954 "last_bad_plcp_cnt_cck:\t\t %u\n",
955 data->last_bad_plcp_cnt_cck);
956 pos += scnprintf(buf + pos, bufsz - pos, "last_fa_cnt_cck:\t\t %u\n",
957 data->last_fa_cnt_cck);
958 pos += scnprintf(buf + pos, bufsz - pos, "nrg_curr_state:\t\t\t %u\n",
959 data->nrg_curr_state);
960 pos += scnprintf(buf + pos, bufsz - pos, "nrg_prev_state:\t\t\t %u\n",
961 data->nrg_prev_state);
962 pos += scnprintf(buf + pos, bufsz - pos, "nrg_value:\t\t\t");
963 for (cnt = 0; cnt < 10; cnt++) {
964 pos += scnprintf(buf + pos, bufsz - pos, " %u",
965 data->nrg_value[cnt]);
966 }
967 pos += scnprintf(buf + pos, bufsz - pos, "\n");
968 pos += scnprintf(buf + pos, bufsz - pos, "nrg_silence_rssi:\t\t");
969 for (cnt = 0; cnt < NRG_NUM_PREV_STAT_L; cnt++) {
970 pos += scnprintf(buf + pos, bufsz - pos, " %u",
971 data->nrg_silence_rssi[cnt]);
972 }
973 pos += scnprintf(buf + pos, bufsz - pos, "\n");
974 pos += scnprintf(buf + pos, bufsz - pos, "nrg_silence_ref:\t\t %u\n",
975 data->nrg_silence_ref);
976 pos += scnprintf(buf + pos, bufsz - pos, "nrg_energy_idx:\t\t\t %u\n",
977 data->nrg_energy_idx);
978 pos += scnprintf(buf + pos, bufsz - pos, "nrg_silence_idx:\t\t %u\n",
979 data->nrg_silence_idx);
980 pos += scnprintf(buf + pos, bufsz - pos, "nrg_th_cck:\t\t\t %u\n",
981 data->nrg_th_cck);
982 pos += scnprintf(buf + pos, bufsz - pos,
983 "nrg_auto_corr_silence_diff:\t %u\n",
984 data->nrg_auto_corr_silence_diff);
985 pos += scnprintf(buf + pos, bufsz - pos, "num_in_cck_no_fa:\t\t %u\n",
986 data->num_in_cck_no_fa);
987 pos += scnprintf(buf + pos, bufsz - pos, "nrg_th_ofdm:\t\t\t %u\n",
988 data->nrg_th_ofdm);
989
990 ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
991 kfree(buf);
992 return ret;
993}
994
995
996static ssize_t iwl_legacy_dbgfs_chain_noise_read(struct file *file,
997 char __user *user_buf,
998 size_t count, loff_t *ppos) {
999
1000 struct iwl_priv *priv = file->private_data;
1001 int pos = 0;
1002 int cnt = 0;
1003 char *buf;
1004 int bufsz = sizeof(struct iwl_chain_noise_data) * 4 + 100;
1005 ssize_t ret;
1006 struct iwl_chain_noise_data *data;
1007
1008 data = &priv->chain_noise_data;
1009 buf = kzalloc(bufsz, GFP_KERNEL);
1010 if (!buf) {
1011 IWL_ERR(priv, "Can not allocate Buffer\n");
1012 return -ENOMEM;
1013 }
1014
1015 pos += scnprintf(buf + pos, bufsz - pos, "active_chains:\t\t\t %u\n",
1016 data->active_chains);
1017 pos += scnprintf(buf + pos, bufsz - pos, "chain_noise_a:\t\t\t %u\n",
1018 data->chain_noise_a);
1019 pos += scnprintf(buf + pos, bufsz - pos, "chain_noise_b:\t\t\t %u\n",
1020 data->chain_noise_b);
1021 pos += scnprintf(buf + pos, bufsz - pos, "chain_noise_c:\t\t\t %u\n",
1022 data->chain_noise_c);
1023 pos += scnprintf(buf + pos, bufsz - pos, "chain_signal_a:\t\t\t %u\n",
1024 data->chain_signal_a);
1025 pos += scnprintf(buf + pos, bufsz - pos, "chain_signal_b:\t\t\t %u\n",
1026 data->chain_signal_b);
1027 pos += scnprintf(buf + pos, bufsz - pos, "chain_signal_c:\t\t\t %u\n",
1028 data->chain_signal_c);
1029 pos += scnprintf(buf + pos, bufsz - pos, "beacon_count:\t\t\t %u\n",
1030 data->beacon_count);
1031
1032 pos += scnprintf(buf + pos, bufsz - pos, "disconn_array:\t\t\t");
1033 for (cnt = 0; cnt < NUM_RX_CHAINS; cnt++) {
1034 pos += scnprintf(buf + pos, bufsz - pos, " %u",
1035 data->disconn_array[cnt]);
1036 }
1037 pos += scnprintf(buf + pos, bufsz - pos, "\n");
1038 pos += scnprintf(buf + pos, bufsz - pos, "delta_gain_code:\t\t");
1039 for (cnt = 0; cnt < NUM_RX_CHAINS; cnt++) {
1040 pos += scnprintf(buf + pos, bufsz - pos, " %u",
1041 data->delta_gain_code[cnt]);
1042 }
1043 pos += scnprintf(buf + pos, bufsz - pos, "\n");
1044 pos += scnprintf(buf + pos, bufsz - pos, "radio_write:\t\t\t %u\n",
1045 data->radio_write);
1046 pos += scnprintf(buf + pos, bufsz - pos, "state:\t\t\t\t %u\n",
1047 data->state);
1048
1049 ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
1050 kfree(buf);
1051 return ret;
1052}
1053
1054static ssize_t iwl_legacy_dbgfs_power_save_status_read(struct file *file,
1055 char __user *user_buf,
1056 size_t count, loff_t *ppos)
1057{
1058 struct iwl_priv *priv = file->private_data;
1059 char buf[60];
1060 int pos = 0;
1061 const size_t bufsz = sizeof(buf);
1062 u32 pwrsave_status;
1063
1064 pwrsave_status = iwl_read32(priv, CSR_GP_CNTRL) &
1065 CSR_GP_REG_POWER_SAVE_STATUS_MSK;
1066
1067 pos += scnprintf(buf + pos, bufsz - pos, "Power Save Status: ");
1068 pos += scnprintf(buf + pos, bufsz - pos, "%s\n",
1069 (pwrsave_status == CSR_GP_REG_NO_POWER_SAVE) ? "none" :
1070 (pwrsave_status == CSR_GP_REG_MAC_POWER_SAVE) ? "MAC" :
1071 (pwrsave_status == CSR_GP_REG_PHY_POWER_SAVE) ? "PHY" :
1072 "error");
1073
1074 return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
1075}
1076
1077static ssize_t iwl_legacy_dbgfs_clear_ucode_statistics_write(struct file *file,
1078 const char __user *user_buf,
1079 size_t count, loff_t *ppos)
1080{
1081 struct iwl_priv *priv = file->private_data;
1082 char buf[8];
1083 int buf_size;
1084 int clear;
1085
1086 memset(buf, 0, sizeof(buf));
1087 buf_size = min(count, sizeof(buf) - 1);
1088 if (copy_from_user(buf, user_buf, buf_size))
1089 return -EFAULT;
1090 if (sscanf(buf, "%d", &clear) != 1)
1091 return -EFAULT;
1092
1093 /* make request to uCode to retrieve statistics information */
1094 mutex_lock(&priv->mutex);
1095 iwl_legacy_send_statistics_request(priv, CMD_SYNC, true);
1096 mutex_unlock(&priv->mutex);
1097
1098 return count;
1099}
1100
1101static ssize_t iwl_legacy_dbgfs_ucode_tracing_read(struct file *file,
1102 char __user *user_buf,
1103 size_t count, loff_t *ppos) {
1104
1105 struct iwl_priv *priv = file->private_data;
1106 int pos = 0;
1107 char buf[128];
1108 const size_t bufsz = sizeof(buf);
1109
1110 pos += scnprintf(buf + pos, bufsz - pos, "ucode trace timer is %s\n",
1111 priv->event_log.ucode_trace ? "On" : "Off");
1112 pos += scnprintf(buf + pos, bufsz - pos, "non_wraps_count:\t\t %u\n",
1113 priv->event_log.non_wraps_count);
1114 pos += scnprintf(buf + pos, bufsz - pos, "wraps_once_count:\t\t %u\n",
1115 priv->event_log.wraps_once_count);
1116 pos += scnprintf(buf + pos, bufsz - pos, "wraps_more_count:\t\t %u\n",
1117 priv->event_log.wraps_more_count);
1118
1119 return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
1120}
1121
1122static ssize_t iwl_legacy_dbgfs_ucode_tracing_write(struct file *file,
1123 const char __user *user_buf,
1124 size_t count, loff_t *ppos)
1125{
1126 struct iwl_priv *priv = file->private_data;
1127 char buf[8];
1128 int buf_size;
1129 int trace;
1130
1131 memset(buf, 0, sizeof(buf));
1132 buf_size = min(count, sizeof(buf) - 1);
1133 if (copy_from_user(buf, user_buf, buf_size))
1134 return -EFAULT;
1135 if (sscanf(buf, "%d", &trace) != 1)
1136 return -EFAULT;
1137
1138 if (trace) {
1139 priv->event_log.ucode_trace = true;
1140 /* schedule the ucode timer to occur in UCODE_TRACE_PERIOD */
1141 mod_timer(&priv->ucode_trace,
1142 jiffies + msecs_to_jiffies(UCODE_TRACE_PERIOD));
1143 } else {
1144 priv->event_log.ucode_trace = false;
1145 del_timer_sync(&priv->ucode_trace);
1146 }
1147
1148 return count;
1149}
1150
1151static ssize_t iwl_legacy_dbgfs_rxon_flags_read(struct file *file,
1152 char __user *user_buf,
1153 size_t count, loff_t *ppos) {
1154
1155 struct iwl_priv *priv = file->private_data;
1156 int len = 0;
1157 char buf[20];
1158
1159 len = sprintf(buf, "0x%04X\n",
1160 le32_to_cpu(priv->contexts[IWL_RXON_CTX_BSS].active.flags));
1161 return simple_read_from_buffer(user_buf, count, ppos, buf, len);
1162}
1163
1164static ssize_t iwl_legacy_dbgfs_rxon_filter_flags_read(struct file *file,
1165 char __user *user_buf,
1166 size_t count, loff_t *ppos) {
1167
1168 struct iwl_priv *priv = file->private_data;
1169 int len = 0;
1170 char buf[20];
1171
1172 len = sprintf(buf, "0x%04X\n",
1173 le32_to_cpu(priv->contexts[IWL_RXON_CTX_BSS].active.filter_flags));
1174 return simple_read_from_buffer(user_buf, count, ppos, buf, len);
1175}
1176
1177static ssize_t iwl_legacy_dbgfs_fh_reg_read(struct file *file,
1178 char __user *user_buf,
1179 size_t count, loff_t *ppos)
1180{
1181 struct iwl_priv *priv = file->private_data;
1182 char *buf;
1183 int pos = 0;
1184 ssize_t ret = -EFAULT;
1185
1186 if (priv->cfg->ops->lib->dump_fh) {
1187 ret = pos = priv->cfg->ops->lib->dump_fh(priv, &buf, true);
1188 if (buf) {
1189 ret = simple_read_from_buffer(user_buf,
1190 count, ppos, buf, pos);
1191 kfree(buf);
1192 }
1193 }
1194
1195 return ret;
1196}
1197
1198static ssize_t iwl_legacy_dbgfs_missed_beacon_read(struct file *file,
1199 char __user *user_buf,
1200 size_t count, loff_t *ppos) {
1201
1202 struct iwl_priv *priv = file->private_data;
1203 int pos = 0;
1204 char buf[12];
1205 const size_t bufsz = sizeof(buf);
1206
1207 pos += scnprintf(buf + pos, bufsz - pos, "%d\n",
1208 priv->missed_beacon_threshold);
1209
1210 return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
1211}
1212
1213static ssize_t iwl_legacy_dbgfs_missed_beacon_write(struct file *file,
1214 const char __user *user_buf,
1215 size_t count, loff_t *ppos)
1216{
1217 struct iwl_priv *priv = file->private_data;
1218 char buf[8];
1219 int buf_size;
1220 int missed;
1221
1222 memset(buf, 0, sizeof(buf));
1223 buf_size = min(count, sizeof(buf) - 1);
1224 if (copy_from_user(buf, user_buf, buf_size))
1225 return -EFAULT;
1226 if (sscanf(buf, "%d", &missed) != 1)
1227 return -EINVAL;
1228
1229 if (missed < IWL_MISSED_BEACON_THRESHOLD_MIN ||
1230 missed > IWL_MISSED_BEACON_THRESHOLD_MAX)
1231 priv->missed_beacon_threshold =
1232 IWL_MISSED_BEACON_THRESHOLD_DEF;
1233 else
1234 priv->missed_beacon_threshold = missed;
1235
1236 return count;
1237}
1238
1239static ssize_t iwl_legacy_dbgfs_plcp_delta_read(struct file *file,
1240 char __user *user_buf,
1241 size_t count, loff_t *ppos) {
1242
1243 struct iwl_priv *priv = file->private_data;
1244 int pos = 0;
1245 char buf[12];
1246 const size_t bufsz = sizeof(buf);
1247
1248 pos += scnprintf(buf + pos, bufsz - pos, "%u\n",
1249 priv->cfg->base_params->plcp_delta_threshold);
1250
1251 return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
1252}
1253
1254static ssize_t iwl_legacy_dbgfs_plcp_delta_write(struct file *file,
1255 const char __user *user_buf,
1256 size_t count, loff_t *ppos) {
1257
1258 struct iwl_priv *priv = file->private_data;
1259 char buf[8];
1260 int buf_size;
1261 int plcp;
1262
1263 memset(buf, 0, sizeof(buf));
1264 buf_size = min(count, sizeof(buf) - 1);
1265 if (copy_from_user(buf, user_buf, buf_size))
1266 return -EFAULT;
1267 if (sscanf(buf, "%d", &plcp) != 1)
1268 return -EINVAL;
1269 if ((plcp < IWL_MAX_PLCP_ERR_THRESHOLD_MIN) ||
1270 (plcp > IWL_MAX_PLCP_ERR_THRESHOLD_MAX))
1271 priv->cfg->base_params->plcp_delta_threshold =
1272 IWL_MAX_PLCP_ERR_THRESHOLD_DISABLE;
1273 else
1274 priv->cfg->base_params->plcp_delta_threshold = plcp;
1275 return count;
1276}
1277
1278static ssize_t iwl_legacy_dbgfs_force_reset_read(struct file *file,
1279 char __user *user_buf,
1280 size_t count, loff_t *ppos) {
1281
1282 struct iwl_priv *priv = file->private_data;
1283 int i, pos = 0;
1284 char buf[300];
1285 const size_t bufsz = sizeof(buf);
1286 struct iwl_force_reset *force_reset;
1287
1288 for (i = 0; i < IWL_MAX_FORCE_RESET; i++) {
1289 force_reset = &priv->force_reset[i];
1290 pos += scnprintf(buf + pos, bufsz - pos,
1291 "Force reset method %d\n", i);
1292 pos += scnprintf(buf + pos, bufsz - pos,
1293 "\tnumber of reset request: %d\n",
1294 force_reset->reset_request_count);
1295 pos += scnprintf(buf + pos, bufsz - pos,
1296 "\tnumber of reset request success: %d\n",
1297 force_reset->reset_success_count);
1298 pos += scnprintf(buf + pos, bufsz - pos,
1299 "\tnumber of reset request reject: %d\n",
1300 force_reset->reset_reject_count);
1301 pos += scnprintf(buf + pos, bufsz - pos,
1302 "\treset duration: %lu\n",
1303 force_reset->reset_duration);
1304 }
1305 return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
1306}
1307
1308static ssize_t iwl_legacy_dbgfs_force_reset_write(struct file *file,
1309 const char __user *user_buf,
1310 size_t count, loff_t *ppos) {
1311
1312 struct iwl_priv *priv = file->private_data;
1313 char buf[8];
1314 int buf_size;
1315 int reset, ret;
1316
1317 memset(buf, 0, sizeof(buf));
1318 buf_size = min(count, sizeof(buf) - 1);
1319 if (copy_from_user(buf, user_buf, buf_size))
1320 return -EFAULT;
1321 if (sscanf(buf, "%d", &reset) != 1)
1322 return -EINVAL;
1323 switch (reset) {
1324 case IWL_RF_RESET:
1325 case IWL_FW_RESET:
1326 ret = iwl_legacy_force_reset(priv, reset, true);
1327 break;
1328 default:
1329 return -EINVAL;
1330 }
1331 return ret ? ret : count;
1332}
1333
1334static ssize_t iwl_legacy_dbgfs_wd_timeout_write(struct file *file,
1335 const char __user *user_buf,
1336 size_t count, loff_t *ppos) {
1337
1338 struct iwl_priv *priv = file->private_data;
1339 char buf[8];
1340 int buf_size;
1341 int timeout;
1342
1343 memset(buf, 0, sizeof(buf));
1344 buf_size = min(count, sizeof(buf) - 1);
1345 if (copy_from_user(buf, user_buf, buf_size))
1346 return -EFAULT;
1347 if (sscanf(buf, "%d", &timeout) != 1)
1348 return -EINVAL;
1349 if (timeout < 0 || timeout > IWL_MAX_WD_TIMEOUT)
1350 timeout = IWL_DEF_WD_TIMEOUT;
1351
1352 priv->cfg->base_params->wd_timeout = timeout;
1353 iwl_legacy_setup_watchdog(priv);
1354 return count;
1355}
1356
1357DEBUGFS_READ_FILE_OPS(rx_statistics);
1358DEBUGFS_READ_FILE_OPS(tx_statistics);
1359DEBUGFS_READ_WRITE_FILE_OPS(traffic_log);
1360DEBUGFS_READ_FILE_OPS(rx_queue);
1361DEBUGFS_READ_FILE_OPS(tx_queue);
1362DEBUGFS_READ_FILE_OPS(ucode_rx_stats);
1363DEBUGFS_READ_FILE_OPS(ucode_tx_stats);
1364DEBUGFS_READ_FILE_OPS(ucode_general_stats);
1365DEBUGFS_READ_FILE_OPS(sensitivity);
1366DEBUGFS_READ_FILE_OPS(chain_noise);
1367DEBUGFS_READ_FILE_OPS(power_save_status);
1368DEBUGFS_WRITE_FILE_OPS(clear_ucode_statistics);
1369DEBUGFS_WRITE_FILE_OPS(clear_traffic_statistics);
1370DEBUGFS_READ_WRITE_FILE_OPS(ucode_tracing);
1371DEBUGFS_READ_FILE_OPS(fh_reg);
1372DEBUGFS_READ_WRITE_FILE_OPS(missed_beacon);
1373DEBUGFS_READ_WRITE_FILE_OPS(plcp_delta);
1374DEBUGFS_READ_WRITE_FILE_OPS(force_reset);
1375DEBUGFS_READ_FILE_OPS(rxon_flags);
1376DEBUGFS_READ_FILE_OPS(rxon_filter_flags);
1377DEBUGFS_WRITE_FILE_OPS(wd_timeout);
1378
1379/*
1380 * Create the debugfs files and directories
1381 *
1382 */
1383int iwl_legacy_dbgfs_register(struct iwl_priv *priv, const char *name)
1384{
1385 struct dentry *phyd = priv->hw->wiphy->debugfsdir;
1386 struct dentry *dir_drv, *dir_data, *dir_rf, *dir_debug;
1387
1388 dir_drv = debugfs_create_dir(name, phyd);
1389 if (!dir_drv)
1390 return -ENOMEM;
1391
1392 priv->debugfs_dir = dir_drv;
1393
1394 dir_data = debugfs_create_dir("data", dir_drv);
1395 if (!dir_data)
1396 goto err;
1397 dir_rf = debugfs_create_dir("rf", dir_drv);
1398 if (!dir_rf)
1399 goto err;
1400 dir_debug = debugfs_create_dir("debug", dir_drv);
1401 if (!dir_debug)
1402 goto err;
1403
1404 DEBUGFS_ADD_FILE(nvm, dir_data, S_IRUSR);
1405 DEBUGFS_ADD_FILE(sram, dir_data, S_IWUSR | S_IRUSR);
1406 DEBUGFS_ADD_FILE(log_event, dir_data, S_IWUSR | S_IRUSR);
1407 DEBUGFS_ADD_FILE(stations, dir_data, S_IRUSR);
1408 DEBUGFS_ADD_FILE(channels, dir_data, S_IRUSR);
1409 DEBUGFS_ADD_FILE(status, dir_data, S_IRUSR);
1410 DEBUGFS_ADD_FILE(interrupt, dir_data, S_IWUSR | S_IRUSR);
1411 DEBUGFS_ADD_FILE(qos, dir_data, S_IRUSR);
1412 DEBUGFS_ADD_FILE(disable_ht40, dir_data, S_IWUSR | S_IRUSR);
1413 DEBUGFS_ADD_FILE(rx_statistics, dir_debug, S_IRUSR);
1414 DEBUGFS_ADD_FILE(tx_statistics, dir_debug, S_IRUSR);
1415 DEBUGFS_ADD_FILE(traffic_log, dir_debug, S_IWUSR | S_IRUSR);
1416 DEBUGFS_ADD_FILE(rx_queue, dir_debug, S_IRUSR);
1417 DEBUGFS_ADD_FILE(tx_queue, dir_debug, S_IRUSR);
1418 DEBUGFS_ADD_FILE(power_save_status, dir_debug, S_IRUSR);
1419 DEBUGFS_ADD_FILE(clear_ucode_statistics, dir_debug, S_IWUSR);
1420 DEBUGFS_ADD_FILE(clear_traffic_statistics, dir_debug, S_IWUSR);
1421 DEBUGFS_ADD_FILE(fh_reg, dir_debug, S_IRUSR);
1422 DEBUGFS_ADD_FILE(missed_beacon, dir_debug, S_IWUSR);
1423 DEBUGFS_ADD_FILE(plcp_delta, dir_debug, S_IWUSR | S_IRUSR);
1424 DEBUGFS_ADD_FILE(force_reset, dir_debug, S_IWUSR | S_IRUSR);
1425 DEBUGFS_ADD_FILE(ucode_rx_stats, dir_debug, S_IRUSR);
1426 DEBUGFS_ADD_FILE(ucode_tx_stats, dir_debug, S_IRUSR);
1427 DEBUGFS_ADD_FILE(ucode_general_stats, dir_debug, S_IRUSR);
1428
1429 if (priv->cfg->base_params->sensitivity_calib_by_driver)
1430 DEBUGFS_ADD_FILE(sensitivity, dir_debug, S_IRUSR);
1431 if (priv->cfg->base_params->chain_noise_calib_by_driver)
1432 DEBUGFS_ADD_FILE(chain_noise, dir_debug, S_IRUSR);
1433 if (priv->cfg->base_params->ucode_tracing)
1434 DEBUGFS_ADD_FILE(ucode_tracing, dir_debug, S_IWUSR | S_IRUSR);
1435 DEBUGFS_ADD_FILE(rxon_flags, dir_debug, S_IWUSR);
1436 DEBUGFS_ADD_FILE(rxon_filter_flags, dir_debug, S_IWUSR);
1437 DEBUGFS_ADD_FILE(wd_timeout, dir_debug, S_IWUSR);
1438 if (priv->cfg->base_params->sensitivity_calib_by_driver)
1439 DEBUGFS_ADD_BOOL(disable_sensitivity, dir_rf,
1440 &priv->disable_sens_cal);
1441 if (priv->cfg->base_params->chain_noise_calib_by_driver)
1442 DEBUGFS_ADD_BOOL(disable_chain_noise, dir_rf,
1443 &priv->disable_chain_noise_cal);
1444 DEBUGFS_ADD_BOOL(disable_tx_power, dir_rf,
1445 &priv->disable_tx_power_cal);
1446 return 0;
1447
1448err:
1449 IWL_ERR(priv, "Can't create the debugfs directory\n");
1450 iwl_legacy_dbgfs_unregister(priv);
1451 return -ENOMEM;
1452}
1453EXPORT_SYMBOL(iwl_legacy_dbgfs_register);
1454
1455/**
1456 * Remove the debugfs files and directories
1457 *
1458 */
1459void iwl_legacy_dbgfs_unregister(struct iwl_priv *priv)
1460{
1461 if (!priv->debugfs_dir)
1462 return;
1463
1464 debugfs_remove_recursive(priv->debugfs_dir);
1465 priv->debugfs_dir = NULL;
1466}
1467EXPORT_SYMBOL(iwl_legacy_dbgfs_unregister);
diff --git a/drivers/net/wireless/iwlegacy/iwl-dev.h b/drivers/net/wireless/iwlegacy/iwl-dev.h
new file mode 100644
index 000000000000..9ee849d669f3
--- /dev/null
+++ b/drivers/net/wireless/iwlegacy/iwl-dev.h
@@ -0,0 +1,1426 @@
1/******************************************************************************
2 *
3 * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of version 2 of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
17 *
18 * The full GNU General Public License is included in this distribution in the
19 * file called LICENSE.
20 *
21 * Contact Information:
22 * Intel Linux Wireless <ilw@linux.intel.com>
23 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
24 *
25 *****************************************************************************/
26/*
27 * Please use this file (iwl-dev.h) for driver implementation definitions.
28 * Please use iwl-commands.h for uCode API definitions.
29 * Please use iwl-4965-hw.h for hardware-related definitions.
30 */
31
32#ifndef __iwl_legacy_dev_h__
33#define __iwl_legacy_dev_h__
34
35#include <linux/pci.h> /* for struct pci_device_id */
36#include <linux/kernel.h>
37#include <linux/leds.h>
38#include <linux/wait.h>
39#include <net/ieee80211_radiotap.h>
40
41#include "iwl-eeprom.h"
42#include "iwl-csr.h"
43#include "iwl-prph.h"
44#include "iwl-fh.h"
45#include "iwl-debug.h"
46#include "iwl-4965-hw.h"
47#include "iwl-3945-hw.h"
48#include "iwl-led.h"
49#include "iwl-power.h"
50#include "iwl-legacy-rs.h"
51
52struct iwl_tx_queue;
53
54/* CT-KILL constants */
55#define CT_KILL_THRESHOLD_LEGACY 110 /* in Celsius */
56
57/* Default noise level to report when noise measurement is not available.
58 * This may be because we're:
59 * 1) Not associated (4965, no beacon statistics being sent to driver)
60 * 2) Scanning (noise measurement does not apply to associated channel)
61 * 3) Receiving CCK (3945 delivers noise info only for OFDM frames)
62 * Use default noise value of -127 ... this is below the range of measurable
63 * Rx dBm for either 3945 or 4965, so it can indicate "unmeasurable" to user.
64 * Also, -127 works better than 0 when averaging frames with/without
65 * noise info (e.g. averaging might be done in app); measured dBm values are
66 * always negative ... using a negative value as the default keeps all
67 * averages within an s8's (used in some apps) range of negative values. */
68#define IWL_NOISE_MEAS_NOT_AVAILABLE (-127)
69
70/*
71 * RTS threshold here is total size [2347] minus 4 FCS bytes
72 * Per spec:
73 * a value of 0 means RTS on all data/management packets
74 * a value > max MSDU size means no RTS
75 * else RTS for data/management frames where MPDU is larger
76 * than RTS value.
77 */
78#define DEFAULT_RTS_THRESHOLD 2347U
79#define MIN_RTS_THRESHOLD 0U
80#define MAX_RTS_THRESHOLD 2347U
81#define MAX_MSDU_SIZE 2304U
82#define MAX_MPDU_SIZE 2346U
83#define DEFAULT_BEACON_INTERVAL 100U
84#define DEFAULT_SHORT_RETRY_LIMIT 7U
85#define DEFAULT_LONG_RETRY_LIMIT 4U
86
87struct iwl_rx_mem_buffer {
88 dma_addr_t page_dma;
89 struct page *page;
90 struct list_head list;
91};
92
93#define rxb_addr(r) page_address(r->page)
94
95/* defined below */
96struct iwl_device_cmd;
97
98struct iwl_cmd_meta {
99 /* only for SYNC commands, iff the reply skb is wanted */
100 struct iwl_host_cmd *source;
101 /*
102 * only for ASYNC commands
103 * (which is somewhat stupid -- look at iwl-sta.c for instance
104 * which duplicates a bunch of code because the callback isn't
105 * invoked for SYNC commands, if it were and its result passed
106 * through it would be simpler...)
107 */
108 void (*callback)(struct iwl_priv *priv,
109 struct iwl_device_cmd *cmd,
110 struct iwl_rx_packet *pkt);
111
112 /* The CMD_SIZE_HUGE flag bit indicates that the command
113 * structure is stored at the end of the shared queue memory. */
114 u32 flags;
115
116 DEFINE_DMA_UNMAP_ADDR(mapping);
117 DEFINE_DMA_UNMAP_LEN(len);
118};
119
120/*
121 * Generic queue structure
122 *
123 * Contains common data for Rx and Tx queues
124 */
125struct iwl_queue {
126 int n_bd; /* number of BDs in this queue */
127 int write_ptr; /* 1-st empty entry (index) host_w*/
128 int read_ptr; /* last used entry (index) host_r*/
129 /* use for monitoring and recovering the stuck queue */
130 dma_addr_t dma_addr; /* physical addr for BD's */
131 int n_window; /* safe queue window */
132 u32 id;
133 int low_mark; /* low watermark, resume queue if free
134 * space more than this */
135 int high_mark; /* high watermark, stop queue if free
136 * space less than this */
137} __packed;
138
139/* One for each TFD */
140struct iwl_tx_info {
141 struct sk_buff *skb;
142 struct iwl_rxon_context *ctx;
143};
144
145/**
146 * struct iwl_tx_queue - Tx Queue for DMA
147 * @q: generic Rx/Tx queue descriptor
148 * @bd: base of circular buffer of TFDs
149 * @cmd: array of command/TX buffer pointers
150 * @meta: array of meta data for each command/tx buffer
151 * @dma_addr_cmd: physical address of cmd/tx buffer array
152 * @txb: array of per-TFD driver data
153 * @time_stamp: time (in jiffies) of last read_ptr change
154 * @need_update: indicates need to update read/write index
155 * @sched_retry: indicates queue is high-throughput aggregation (HT AGG) enabled
156 *
157 * A Tx queue consists of circular buffer of BDs (a.k.a. TFDs, transmit frame
158 * descriptors) and required locking structures.
159 */
160#define TFD_TX_CMD_SLOTS 256
161#define TFD_CMD_SLOTS 32
162
163struct iwl_tx_queue {
164 struct iwl_queue q;
165 void *tfds;
166 struct iwl_device_cmd **cmd;
167 struct iwl_cmd_meta *meta;
168 struct iwl_tx_info *txb;
169 unsigned long time_stamp;
170 u8 need_update;
171 u8 sched_retry;
172 u8 active;
173 u8 swq_id;
174};
175
176#define IWL_NUM_SCAN_RATES (2)
177
178struct iwl4965_channel_tgd_info {
179 u8 type;
180 s8 max_power;
181};
182
183struct iwl4965_channel_tgh_info {
184 s64 last_radar_time;
185};
186
187#define IWL4965_MAX_RATE (33)
188
189struct iwl3945_clip_group {
190 /* maximum power level to prevent clipping for each rate, derived by
191 * us from this band's saturation power in EEPROM */
192 const s8 clip_powers[IWL_MAX_RATES];
193};
194
195/* current Tx power values to use, one for each rate for each channel.
196 * requested power is limited by:
197 * -- regulatory EEPROM limits for this channel
198 * -- hardware capabilities (clip-powers)
199 * -- spectrum management
200 * -- user preference (e.g. iwconfig)
201 * when requested power is set, base power index must also be set. */
202struct iwl3945_channel_power_info {
203 struct iwl3945_tx_power tpc; /* actual radio and DSP gain settings */
204 s8 power_table_index; /* actual (compenst'd) index into gain table */
205 s8 base_power_index; /* gain index for power at factory temp. */
206 s8 requested_power; /* power (dBm) requested for this chnl/rate */
207};
208
209/* current scan Tx power values to use, one for each scan rate for each
210 * channel. */
211struct iwl3945_scan_power_info {
212 struct iwl3945_tx_power tpc; /* actual radio and DSP gain settings */
213 s8 power_table_index; /* actual (compenst'd) index into gain table */
214 s8 requested_power; /* scan pwr (dBm) requested for chnl/rate */
215};
216
217/*
218 * One for each channel, holds all channel setup data
219 * Some of the fields (e.g. eeprom and flags/max_power_avg) are redundant
220 * with one another!
221 */
222struct iwl_channel_info {
223 struct iwl4965_channel_tgd_info tgd;
224 struct iwl4965_channel_tgh_info tgh;
225 struct iwl_eeprom_channel eeprom; /* EEPROM regulatory limit */
226 struct iwl_eeprom_channel ht40_eeprom; /* EEPROM regulatory limit for
227 * HT40 channel */
228
229 u8 channel; /* channel number */
230 u8 flags; /* flags copied from EEPROM */
231 s8 max_power_avg; /* (dBm) regul. eeprom, normal Tx, any rate */
232 s8 curr_txpow; /* (dBm) regulatory/spectrum/user (not h/w) limit */
233 s8 min_power; /* always 0 */
234 s8 scan_power; /* (dBm) regul. eeprom, direct scans, any rate */
235
236 u8 group_index; /* 0-4, maps channel to group1/2/3/4/5 */
237 u8 band_index; /* 0-4, maps channel to band1/2/3/4/5 */
238 enum ieee80211_band band;
239
240 /* HT40 channel info */
241 s8 ht40_max_power_avg; /* (dBm) regul. eeprom, normal Tx, any rate */
242 u8 ht40_flags; /* flags copied from EEPROM */
243 u8 ht40_extension_channel; /* HT_IE_EXT_CHANNEL_* */
244
245 /* Radio/DSP gain settings for each "normal" data Tx rate.
246 * These include, in addition to RF and DSP gain, a few fields for
247 * remembering/modifying gain settings (indexes). */
248 struct iwl3945_channel_power_info power_info[IWL4965_MAX_RATE];
249
250 /* Radio/DSP gain settings for each scan rate, for directed scans. */
251 struct iwl3945_scan_power_info scan_pwr_info[IWL_NUM_SCAN_RATES];
252};
253
254#define IWL_TX_FIFO_BK 0 /* shared */
255#define IWL_TX_FIFO_BE 1
256#define IWL_TX_FIFO_VI 2 /* shared */
257#define IWL_TX_FIFO_VO 3
258#define IWL_TX_FIFO_UNUSED -1
259
260/* Minimum number of queues. MAX_NUM is defined in hw specific files.
261 * Set the minimum to accommodate the 4 standard TX queues, 1 command
262 * queue, 2 (unused) HCCA queues, and 4 HT queues (one for each AC) */
263#define IWL_MIN_NUM_QUEUES 10
264
265#define IWL_DEFAULT_CMD_QUEUE_NUM 4
266
267#define IEEE80211_DATA_LEN 2304
268#define IEEE80211_4ADDR_LEN 30
269#define IEEE80211_HLEN (IEEE80211_4ADDR_LEN)
270#define IEEE80211_FRAME_LEN (IEEE80211_DATA_LEN + IEEE80211_HLEN)
271
272struct iwl_frame {
273 union {
274 struct ieee80211_hdr frame;
275 struct iwl_tx_beacon_cmd beacon;
276 u8 raw[IEEE80211_FRAME_LEN];
277 u8 cmd[360];
278 } u;
279 struct list_head list;
280};
281
282#define SEQ_TO_SN(seq) (((seq) & IEEE80211_SCTL_SEQ) >> 4)
283#define SN_TO_SEQ(ssn) (((ssn) << 4) & IEEE80211_SCTL_SEQ)
284#define MAX_SN ((IEEE80211_SCTL_SEQ) >> 4)
285
286enum {
287 CMD_SYNC = 0,
288 CMD_SIZE_NORMAL = 0,
289 CMD_NO_SKB = 0,
290 CMD_SIZE_HUGE = (1 << 0),
291 CMD_ASYNC = (1 << 1),
292 CMD_WANT_SKB = (1 << 2),
293};
294
295#define DEF_CMD_PAYLOAD_SIZE 320
296
297/**
298 * struct iwl_device_cmd
299 *
300 * For allocation of the command and tx queues, this establishes the overall
301 * size of the largest command we send to uCode, except for a scan command
302 * (which is relatively huge; space is allocated separately).
303 */
304struct iwl_device_cmd {
305 struct iwl_cmd_header hdr; /* uCode API */
306 union {
307 u32 flags;
308 u8 val8;
309 u16 val16;
310 u32 val32;
311 struct iwl_tx_cmd tx;
312 u8 payload[DEF_CMD_PAYLOAD_SIZE];
313 } __packed cmd;
314} __packed;
315
316#define TFD_MAX_PAYLOAD_SIZE (sizeof(struct iwl_device_cmd))
317
318
319struct iwl_host_cmd {
320 const void *data;
321 unsigned long reply_page;
322 void (*callback)(struct iwl_priv *priv,
323 struct iwl_device_cmd *cmd,
324 struct iwl_rx_packet *pkt);
325 u32 flags;
326 u16 len;
327 u8 id;
328};
329
330#define SUP_RATE_11A_MAX_NUM_CHANNELS 8
331#define SUP_RATE_11B_MAX_NUM_CHANNELS 4
332#define SUP_RATE_11G_MAX_NUM_CHANNELS 12
333
334/**
335 * struct iwl_rx_queue - Rx queue
336 * @bd: driver's pointer to buffer of receive buffer descriptors (rbd)
337 * @bd_dma: bus address of buffer of receive buffer descriptors (rbd)
338 * @read: Shared index to newest available Rx buffer
339 * @write: Shared index to oldest written Rx packet
340 * @free_count: Number of pre-allocated buffers in rx_free
341 * @rx_free: list of free SKBs for use
342 * @rx_used: List of Rx buffers with no SKB
343 * @need_update: flag to indicate we need to update read/write index
344 * @rb_stts: driver's pointer to receive buffer status
345 * @rb_stts_dma: bus address of receive buffer status
346 *
347 * NOTE: rx_free and rx_used are used as a FIFO for iwl_rx_mem_buffers
348 */
349struct iwl_rx_queue {
350 __le32 *bd;
351 dma_addr_t bd_dma;
352 struct iwl_rx_mem_buffer pool[RX_QUEUE_SIZE + RX_FREE_BUFFERS];
353 struct iwl_rx_mem_buffer *queue[RX_QUEUE_SIZE];
354 u32 read;
355 u32 write;
356 u32 free_count;
357 u32 write_actual;
358 struct list_head rx_free;
359 struct list_head rx_used;
360 int need_update;
361 struct iwl_rb_status *rb_stts;
362 dma_addr_t rb_stts_dma;
363 spinlock_t lock;
364};
365
366#define IWL_SUPPORTED_RATES_IE_LEN 8
367
368#define MAX_TID_COUNT 9
369
370#define IWL_INVALID_RATE 0xFF
371#define IWL_INVALID_VALUE -1
372
373/**
374 * struct iwl_ht_agg -- aggregation status while waiting for block-ack
375 * @txq_id: Tx queue used for Tx attempt
376 * @frame_count: # frames attempted by Tx command
377 * @wait_for_ba: Expect block-ack before next Tx reply
378 * @start_idx: Index of 1st Transmit Frame Descriptor (TFD) in Tx window
379 * @bitmap0: Low order bitmap, one bit for each frame pending ACK in Tx window
380 * @bitmap1: High order, one bit for each frame pending ACK in Tx window
381 * @rate_n_flags: Rate at which Tx was attempted
382 *
383 * If REPLY_TX indicates that aggregation was attempted, driver must wait
384 * for block ack (REPLY_COMPRESSED_BA). This struct stores tx reply info
385 * until block ack arrives.
386 */
387struct iwl_ht_agg {
388 u16 txq_id;
389 u16 frame_count;
390 u16 wait_for_ba;
391 u16 start_idx;
392 u64 bitmap;
393 u32 rate_n_flags;
394#define IWL_AGG_OFF 0
395#define IWL_AGG_ON 1
396#define IWL_EMPTYING_HW_QUEUE_ADDBA 2
397#define IWL_EMPTYING_HW_QUEUE_DELBA 3
398 u8 state;
399};
400
401
402struct iwl_tid_data {
403 u16 seq_number; /* 4965 only */
404 u16 tfds_in_queue;
405 struct iwl_ht_agg agg;
406};
407
408struct iwl_hw_key {
409 u32 cipher;
410 int keylen;
411 u8 keyidx;
412 u8 key[32];
413};
414
415union iwl_ht_rate_supp {
416 u16 rates;
417 struct {
418 u8 siso_rate;
419 u8 mimo_rate;
420 };
421};
422
423#define CFG_HT_RX_AMPDU_FACTOR_8K (0x0)
424#define CFG_HT_RX_AMPDU_FACTOR_16K (0x1)
425#define CFG_HT_RX_AMPDU_FACTOR_32K (0x2)
426#define CFG_HT_RX_AMPDU_FACTOR_64K (0x3)
427#define CFG_HT_RX_AMPDU_FACTOR_DEF CFG_HT_RX_AMPDU_FACTOR_64K
428#define CFG_HT_RX_AMPDU_FACTOR_MAX CFG_HT_RX_AMPDU_FACTOR_64K
429#define CFG_HT_RX_AMPDU_FACTOR_MIN CFG_HT_RX_AMPDU_FACTOR_8K
430
431/*
432 * Maximal MPDU density for TX aggregation
433 * 4 - 2us density
434 * 5 - 4us density
435 * 6 - 8us density
436 * 7 - 16us density
437 */
438#define CFG_HT_MPDU_DENSITY_2USEC (0x4)
439#define CFG_HT_MPDU_DENSITY_4USEC (0x5)
440#define CFG_HT_MPDU_DENSITY_8USEC (0x6)
441#define CFG_HT_MPDU_DENSITY_16USEC (0x7)
442#define CFG_HT_MPDU_DENSITY_DEF CFG_HT_MPDU_DENSITY_4USEC
443#define CFG_HT_MPDU_DENSITY_MAX CFG_HT_MPDU_DENSITY_16USEC
444#define CFG_HT_MPDU_DENSITY_MIN (0x1)
445
446struct iwl_ht_config {
447 bool single_chain_sufficient;
448 enum ieee80211_smps_mode smps; /* current smps mode */
449};
450
451/* QoS structures */
452struct iwl_qos_info {
453 int qos_active;
454 struct iwl_qosparam_cmd def_qos_parm;
455};
456
457/*
458 * Structure should be accessed with sta_lock held. When station addition
459 * is in progress (IWL_STA_UCODE_INPROGRESS) it is possible to access only
460 * the commands (iwl_legacy_addsta_cmd and iwl_link_quality_cmd) without
461 * sta_lock held.
462 */
463struct iwl_station_entry {
464 struct iwl_legacy_addsta_cmd sta;
465 struct iwl_tid_data tid[MAX_TID_COUNT];
466 u8 used, ctxid;
467 struct iwl_hw_key keyinfo;
468 struct iwl_link_quality_cmd *lq;
469};
470
471struct iwl_station_priv_common {
472 struct iwl_rxon_context *ctx;
473 u8 sta_id;
474};
475
476/*
477 * iwl_station_priv: Driver's private station information
478 *
479 * When mac80211 creates a station it reserves some space (hw->sta_data_size)
480 * in the structure for use by driver. This structure is places in that
481 * space.
482 *
483 * The common struct MUST be first because it is shared between
484 * 3945 and 4965!
485 */
486struct iwl_station_priv {
487 struct iwl_station_priv_common common;
488 struct iwl_lq_sta lq_sta;
489 atomic_t pending_frames;
490 bool client;
491 bool asleep;
492};
493
494/**
495 * struct iwl_vif_priv - driver's private per-interface information
496 *
497 * When mac80211 allocates a virtual interface, it can allocate
498 * space for us to put data into.
499 */
500struct iwl_vif_priv {
501 struct iwl_rxon_context *ctx;
502 u8 ibss_bssid_sta_id;
503};
504
505/* one for each uCode image (inst/data, boot/init/runtime) */
506struct fw_desc {
507 void *v_addr; /* access by driver */
508 dma_addr_t p_addr; /* access by card's busmaster DMA */
509 u32 len; /* bytes */
510};
511
512/* uCode file layout */
513struct iwl_ucode_header {
514 __le32 ver; /* major/minor/API/serial */
515 struct {
516 __le32 inst_size; /* bytes of runtime code */
517 __le32 data_size; /* bytes of runtime data */
518 __le32 init_size; /* bytes of init code */
519 __le32 init_data_size; /* bytes of init data */
520 __le32 boot_size; /* bytes of bootstrap code */
521 u8 data[0]; /* in same order as sizes */
522 } v1;
523};
524
525struct iwl4965_ibss_seq {
526 u8 mac[ETH_ALEN];
527 u16 seq_num;
528 u16 frag_num;
529 unsigned long packet_time;
530 struct list_head list;
531};
532
533struct iwl_sensitivity_ranges {
534 u16 min_nrg_cck;
535 u16 max_nrg_cck;
536
537 u16 nrg_th_cck;
538 u16 nrg_th_ofdm;
539
540 u16 auto_corr_min_ofdm;
541 u16 auto_corr_min_ofdm_mrc;
542 u16 auto_corr_min_ofdm_x1;
543 u16 auto_corr_min_ofdm_mrc_x1;
544
545 u16 auto_corr_max_ofdm;
546 u16 auto_corr_max_ofdm_mrc;
547 u16 auto_corr_max_ofdm_x1;
548 u16 auto_corr_max_ofdm_mrc_x1;
549
550 u16 auto_corr_max_cck;
551 u16 auto_corr_max_cck_mrc;
552 u16 auto_corr_min_cck;
553 u16 auto_corr_min_cck_mrc;
554
555 u16 barker_corr_th_min;
556 u16 barker_corr_th_min_mrc;
557 u16 nrg_th_cca;
558};
559
560
561#define KELVIN_TO_CELSIUS(x) ((x)-273)
562#define CELSIUS_TO_KELVIN(x) ((x)+273)
563
564
565/**
566 * struct iwl_hw_params
567 * @max_txq_num: Max # Tx queues supported
568 * @dma_chnl_num: Number of Tx DMA/FIFO channels
569 * @scd_bc_tbls_size: size of scheduler byte count tables
570 * @tfd_size: TFD size
571 * @tx/rx_chains_num: Number of TX/RX chains
572 * @valid_tx/rx_ant: usable antennas
573 * @max_rxq_size: Max # Rx frames in Rx queue (must be power-of-2)
574 * @max_rxq_log: Log-base-2 of max_rxq_size
575 * @rx_page_order: Rx buffer page order
576 * @rx_wrt_ptr_reg: FH{39}_RSCSR_CHNL0_WPTR
577 * @max_stations:
578 * @ht40_channel: is 40MHz width possible in band 2.4
579 * BIT(IEEE80211_BAND_5GHZ) BIT(IEEE80211_BAND_5GHZ)
580 * @sw_crypto: 0 for hw, 1 for sw
581 * @max_xxx_size: for ucode uses
582 * @ct_kill_threshold: temperature threshold
583 * @beacon_time_tsf_bits: number of valid tsf bits for beacon time
584 * @struct iwl_sensitivity_ranges: range of sensitivity values
585 */
586struct iwl_hw_params {
587 u8 max_txq_num;
588 u8 dma_chnl_num;
589 u16 scd_bc_tbls_size;
590 u32 tfd_size;
591 u8 tx_chains_num;
592 u8 rx_chains_num;
593 u8 valid_tx_ant;
594 u8 valid_rx_ant;
595 u16 max_rxq_size;
596 u16 max_rxq_log;
597 u32 rx_page_order;
598 u32 rx_wrt_ptr_reg;
599 u8 max_stations;
600 u8 ht40_channel;
601 u8 max_beacon_itrvl; /* in 1024 ms */
602 u32 max_inst_size;
603 u32 max_data_size;
604 u32 max_bsm_size;
605 u32 ct_kill_threshold; /* value in hw-dependent units */
606 u16 beacon_time_tsf_bits;
607 const struct iwl_sensitivity_ranges *sens;
608};
609
610
611/******************************************************************************
612 *
613 * Functions implemented in core module which are forward declared here
614 * for use by iwl-[4-5].c
615 *
616 * NOTE: The implementation of these functions are not hardware specific
617 * which is why they are in the core module files.
618 *
619 * Naming convention --
620 * iwl_ <-- Is part of iwlwifi
621 * iwlXXXX_ <-- Hardware specific (implemented in iwl-XXXX.c for XXXX)
622 * iwl4965_bg_ <-- Called from work queue context
623 * iwl4965_mac_ <-- mac80211 callback
624 *
625 ****************************************************************************/
626extern void iwl4965_update_chain_flags(struct iwl_priv *priv);
627extern const u8 iwlegacy_bcast_addr[ETH_ALEN];
628extern int iwl_legacy_queue_space(const struct iwl_queue *q);
629static inline int iwl_legacy_queue_used(const struct iwl_queue *q, int i)
630{
631 return q->write_ptr >= q->read_ptr ?
632 (i >= q->read_ptr && i < q->write_ptr) :
633 !(i < q->read_ptr && i >= q->write_ptr);
634}
635
636
637static inline u8 iwl_legacy_get_cmd_index(struct iwl_queue *q, u32 index,
638 int is_huge)
639{
640 /*
641 * This is for init calibration result and scan command which
642 * required buffer > TFD_MAX_PAYLOAD_SIZE,
643 * the big buffer at end of command array
644 */
645 if (is_huge)
646 return q->n_window; /* must be power of 2 */
647
648 /* Otherwise, use normal size buffers */
649 return index & (q->n_window - 1);
650}
651
652
653struct iwl_dma_ptr {
654 dma_addr_t dma;
655 void *addr;
656 size_t size;
657};
658
659#define IWL_OPERATION_MODE_AUTO 0
660#define IWL_OPERATION_MODE_HT_ONLY 1
661#define IWL_OPERATION_MODE_MIXED 2
662#define IWL_OPERATION_MODE_20MHZ 3
663
664#define IWL_TX_CRC_SIZE 4
665#define IWL_TX_DELIMITER_SIZE 4
666
667#define TX_POWER_IWL_ILLEGAL_VOLTAGE -10000
668
669/* Sensitivity and chain noise calibration */
670#define INITIALIZATION_VALUE 0xFFFF
671#define IWL4965_CAL_NUM_BEACONS 20
672#define IWL_CAL_NUM_BEACONS 16
673#define MAXIMUM_ALLOWED_PATHLOSS 15
674
675#define CHAIN_NOISE_MAX_DELTA_GAIN_CODE 3
676
677#define MAX_FA_OFDM 50
678#define MIN_FA_OFDM 5
679#define MAX_FA_CCK 50
680#define MIN_FA_CCK 5
681
682#define AUTO_CORR_STEP_OFDM 1
683
684#define AUTO_CORR_STEP_CCK 3
685#define AUTO_CORR_MAX_TH_CCK 160
686
687#define NRG_DIFF 2
688#define NRG_STEP_CCK 2
689#define NRG_MARGIN 8
690#define MAX_NUMBER_CCK_NO_FA 100
691
692#define AUTO_CORR_CCK_MIN_VAL_DEF (125)
693
694#define CHAIN_A 0
695#define CHAIN_B 1
696#define CHAIN_C 2
697#define CHAIN_NOISE_DELTA_GAIN_INIT_VAL 4
698#define ALL_BAND_FILTER 0xFF00
699#define IN_BAND_FILTER 0xFF
700#define MIN_AVERAGE_NOISE_MAX_VALUE 0xFFFFFFFF
701
702#define NRG_NUM_PREV_STAT_L 20
703#define NUM_RX_CHAINS 3
704
705enum iwl4965_false_alarm_state {
706 IWL_FA_TOO_MANY = 0,
707 IWL_FA_TOO_FEW = 1,
708 IWL_FA_GOOD_RANGE = 2,
709};
710
711enum iwl4965_chain_noise_state {
712 IWL_CHAIN_NOISE_ALIVE = 0, /* must be 0 */
713 IWL_CHAIN_NOISE_ACCUMULATE,
714 IWL_CHAIN_NOISE_CALIBRATED,
715 IWL_CHAIN_NOISE_DONE,
716};
717
718enum iwl4965_calib_enabled_state {
719 IWL_CALIB_DISABLED = 0, /* must be 0 */
720 IWL_CALIB_ENABLED = 1,
721};
722
723/*
724 * enum iwl_calib
725 * defines the order in which results of initial calibrations
726 * should be sent to the runtime uCode
727 */
728enum iwl_calib {
729 IWL_CALIB_MAX,
730};
731
732/* Opaque calibration results */
733struct iwl_calib_result {
734 void *buf;
735 size_t buf_len;
736};
737
738enum ucode_type {
739 UCODE_NONE = 0,
740 UCODE_INIT,
741 UCODE_RT
742};
743
744/* Sensitivity calib data */
745struct iwl_sensitivity_data {
746 u32 auto_corr_ofdm;
747 u32 auto_corr_ofdm_mrc;
748 u32 auto_corr_ofdm_x1;
749 u32 auto_corr_ofdm_mrc_x1;
750 u32 auto_corr_cck;
751 u32 auto_corr_cck_mrc;
752
753 u32 last_bad_plcp_cnt_ofdm;
754 u32 last_fa_cnt_ofdm;
755 u32 last_bad_plcp_cnt_cck;
756 u32 last_fa_cnt_cck;
757
758 u32 nrg_curr_state;
759 u32 nrg_prev_state;
760 u32 nrg_value[10];
761 u8 nrg_silence_rssi[NRG_NUM_PREV_STAT_L];
762 u32 nrg_silence_ref;
763 u32 nrg_energy_idx;
764 u32 nrg_silence_idx;
765 u32 nrg_th_cck;
766 s32 nrg_auto_corr_silence_diff;
767 u32 num_in_cck_no_fa;
768 u32 nrg_th_ofdm;
769
770 u16 barker_corr_th_min;
771 u16 barker_corr_th_min_mrc;
772 u16 nrg_th_cca;
773};
774
775/* Chain noise (differential Rx gain) calib data */
776struct iwl_chain_noise_data {
777 u32 active_chains;
778 u32 chain_noise_a;
779 u32 chain_noise_b;
780 u32 chain_noise_c;
781 u32 chain_signal_a;
782 u32 chain_signal_b;
783 u32 chain_signal_c;
784 u16 beacon_count;
785 u8 disconn_array[NUM_RX_CHAINS];
786 u8 delta_gain_code[NUM_RX_CHAINS];
787 u8 radio_write;
788 u8 state;
789};
790
791#define EEPROM_SEM_TIMEOUT 10 /* milliseconds */
792#define EEPROM_SEM_RETRY_LIMIT 1000 /* number of attempts (not time) */
793
794#define IWL_TRAFFIC_ENTRIES (256)
795#define IWL_TRAFFIC_ENTRY_SIZE (64)
796
797enum {
798 MEASUREMENT_READY = (1 << 0),
799 MEASUREMENT_ACTIVE = (1 << 1),
800};
801
802/* interrupt statistics */
803struct isr_statistics {
804 u32 hw;
805 u32 sw;
806 u32 err_code;
807 u32 sch;
808 u32 alive;
809 u32 rfkill;
810 u32 ctkill;
811 u32 wakeup;
812 u32 rx;
813 u32 rx_handlers[REPLY_MAX];
814 u32 tx;
815 u32 unhandled;
816};
817
818/* management statistics */
819enum iwl_mgmt_stats {
820 MANAGEMENT_ASSOC_REQ = 0,
821 MANAGEMENT_ASSOC_RESP,
822 MANAGEMENT_REASSOC_REQ,
823 MANAGEMENT_REASSOC_RESP,
824 MANAGEMENT_PROBE_REQ,
825 MANAGEMENT_PROBE_RESP,
826 MANAGEMENT_BEACON,
827 MANAGEMENT_ATIM,
828 MANAGEMENT_DISASSOC,
829 MANAGEMENT_AUTH,
830 MANAGEMENT_DEAUTH,
831 MANAGEMENT_ACTION,
832 MANAGEMENT_MAX,
833};
834/* control statistics */
835enum iwl_ctrl_stats {
836 CONTROL_BACK_REQ = 0,
837 CONTROL_BACK,
838 CONTROL_PSPOLL,
839 CONTROL_RTS,
840 CONTROL_CTS,
841 CONTROL_ACK,
842 CONTROL_CFEND,
843 CONTROL_CFENDACK,
844 CONTROL_MAX,
845};
846
847struct traffic_stats {
848#ifdef CONFIG_IWLWIFI_LEGACY_DEBUGFS
849 u32 mgmt[MANAGEMENT_MAX];
850 u32 ctrl[CONTROL_MAX];
851 u32 data_cnt;
852 u64 data_bytes;
853#endif
854};
855
856/*
857 * iwl_switch_rxon: "channel switch" structure
858 *
859 * @ switch_in_progress: channel switch in progress
860 * @ channel: new channel
861 */
862struct iwl_switch_rxon {
863 bool switch_in_progress;
864 __le16 channel;
865};
866
867/*
868 * schedule the timer to wake up every UCODE_TRACE_PERIOD milliseconds
869 * to perform continuous uCode event logging operation if enabled
870 */
871#define UCODE_TRACE_PERIOD (100)
872
873/*
874 * iwl_event_log: current uCode event log position
875 *
876 * @ucode_trace: enable/disable ucode continuous trace timer
877 * @num_wraps: how many times the event buffer wraps
878 * @next_entry: the entry just before the next one that uCode would fill
879 * @non_wraps_count: counter for no wrap detected when dump ucode events
880 * @wraps_once_count: counter for wrap once detected when dump ucode events
881 * @wraps_more_count: counter for wrap more than once detected
882 * when dump ucode events
883 */
884struct iwl_event_log {
885 bool ucode_trace;
886 u32 num_wraps;
887 u32 next_entry;
888 int non_wraps_count;
889 int wraps_once_count;
890 int wraps_more_count;
891};
892
893/*
894 * host interrupt timeout value
895 * used with setting interrupt coalescing timer
896 * the CSR_INT_COALESCING is an 8 bit register in 32-usec unit
897 *
898 * default interrupt coalescing timer is 64 x 32 = 2048 usecs
899 * default interrupt coalescing calibration timer is 16 x 32 = 512 usecs
900 */
901#define IWL_HOST_INT_TIMEOUT_MAX (0xFF)
902#define IWL_HOST_INT_TIMEOUT_DEF (0x40)
903#define IWL_HOST_INT_TIMEOUT_MIN (0x0)
904#define IWL_HOST_INT_CALIB_TIMEOUT_MAX (0xFF)
905#define IWL_HOST_INT_CALIB_TIMEOUT_DEF (0x10)
906#define IWL_HOST_INT_CALIB_TIMEOUT_MIN (0x0)
907
908/*
909 * This is the threshold value of plcp error rate per 100mSecs. It is
910 * used to set and check for the validity of plcp_delta.
911 */
912#define IWL_MAX_PLCP_ERR_THRESHOLD_MIN (1)
913#define IWL_MAX_PLCP_ERR_THRESHOLD_DEF (50)
914#define IWL_MAX_PLCP_ERR_LONG_THRESHOLD_DEF (100)
915#define IWL_MAX_PLCP_ERR_EXT_LONG_THRESHOLD_DEF (200)
916#define IWL_MAX_PLCP_ERR_THRESHOLD_MAX (255)
917#define IWL_MAX_PLCP_ERR_THRESHOLD_DISABLE (0)
918
919#define IWL_DELAY_NEXT_FORCE_RF_RESET (HZ*3)
920#define IWL_DELAY_NEXT_FORCE_FW_RELOAD (HZ*5)
921
922/* TX queue watchdog timeouts in mSecs */
923#define IWL_DEF_WD_TIMEOUT (2000)
924#define IWL_LONG_WD_TIMEOUT (10000)
925#define IWL_MAX_WD_TIMEOUT (120000)
926
927enum iwl_reset {
928 IWL_RF_RESET = 0,
929 IWL_FW_RESET,
930 IWL_MAX_FORCE_RESET,
931};
932
933struct iwl_force_reset {
934 int reset_request_count;
935 int reset_success_count;
936 int reset_reject_count;
937 unsigned long reset_duration;
938 unsigned long last_force_reset_jiffies;
939};
940
941/* extend beacon time format bit shifting */
942/*
943 * for _3945 devices
944 * bits 31:24 - extended
945 * bits 23:0 - interval
946 */
947#define IWL3945_EXT_BEACON_TIME_POS 24
948/*
949 * for _4965 devices
950 * bits 31:22 - extended
951 * bits 21:0 - interval
952 */
953#define IWL4965_EXT_BEACON_TIME_POS 22
954
955enum iwl_rxon_context_id {
956 IWL_RXON_CTX_BSS,
957
958 NUM_IWL_RXON_CTX
959};
960
961struct iwl_rxon_context {
962 struct ieee80211_vif *vif;
963
964 const u8 *ac_to_fifo;
965 const u8 *ac_to_queue;
966 u8 mcast_queue;
967
968 /*
969 * We could use the vif to indicate active, but we
970 * also need it to be active during disabling when
971 * we already removed the vif for type setting.
972 */
973 bool always_active, is_active;
974
975 bool ht_need_multiple_chains;
976
977 enum iwl_rxon_context_id ctxid;
978
979 u32 interface_modes, exclusive_interface_modes;
980 u8 unused_devtype, ap_devtype, ibss_devtype, station_devtype;
981
982 /*
983 * We declare this const so it can only be
984 * changed via explicit cast within the
985 * routines that actually update the physical
986 * hardware.
987 */
988 const struct iwl_legacy_rxon_cmd active;
989 struct iwl_legacy_rxon_cmd staging;
990
991 struct iwl_rxon_time_cmd timing;
992
993 struct iwl_qos_info qos_data;
994
995 u8 bcast_sta_id, ap_sta_id;
996
997 u8 rxon_cmd, rxon_assoc_cmd, rxon_timing_cmd;
998 u8 qos_cmd;
999 u8 wep_key_cmd;
1000
1001 struct iwl_wep_key wep_keys[WEP_KEYS_MAX];
1002 u8 key_mapping_keys;
1003
1004 __le32 station_flags;
1005
1006 struct {
1007 bool non_gf_sta_present;
1008 u8 protection;
1009 bool enabled, is_40mhz;
1010 u8 extension_chan_offset;
1011 } ht;
1012};
1013
1014struct iwl_priv {
1015
1016 /* ieee device used by generic ieee processing code */
1017 struct ieee80211_hw *hw;
1018 struct ieee80211_channel *ieee_channels;
1019 struct ieee80211_rate *ieee_rates;
1020 struct iwl_cfg *cfg;
1021
1022 /* temporary frame storage list */
1023 struct list_head free_frames;
1024 int frames_count;
1025
1026 enum ieee80211_band band;
1027 int alloc_rxb_page;
1028
1029 void (*rx_handlers[REPLY_MAX])(struct iwl_priv *priv,
1030 struct iwl_rx_mem_buffer *rxb);
1031
1032 struct ieee80211_supported_band bands[IEEE80211_NUM_BANDS];
1033
1034 /* spectrum measurement report caching */
1035 struct iwl_spectrum_notification measure_report;
1036 u8 measurement_status;
1037
1038 /* ucode beacon time */
1039 u32 ucode_beacon_time;
1040 int missed_beacon_threshold;
1041
1042 /* track IBSS manager (last beacon) status */
1043 u32 ibss_manager;
1044
1045 /* storing the jiffies when the plcp error rate is received */
1046 unsigned long plcp_jiffies;
1047
1048 /* force reset */
1049 struct iwl_force_reset force_reset[IWL_MAX_FORCE_RESET];
1050
1051 /* we allocate array of iwl_channel_info for NIC's valid channels.
1052 * Access via channel # using indirect index array */
1053 struct iwl_channel_info *channel_info; /* channel info array */
1054 u8 channel_count; /* # of channels */
1055
1056 /* thermal calibration */
1057 s32 temperature; /* degrees Kelvin */
1058 s32 last_temperature;
1059
1060 /* init calibration results */
1061 struct iwl_calib_result calib_results[IWL_CALIB_MAX];
1062
1063 /* Scan related variables */
1064 unsigned long scan_start;
1065 unsigned long scan_start_tsf;
1066 void *scan_cmd;
1067 enum ieee80211_band scan_band;
1068 struct cfg80211_scan_request *scan_request;
1069 struct ieee80211_vif *scan_vif;
1070 bool is_internal_short_scan;
1071 u8 scan_tx_ant[IEEE80211_NUM_BANDS];
1072 u8 mgmt_tx_ant;
1073
1074 /* spinlock */
1075 spinlock_t lock; /* protect general shared data */
1076 spinlock_t hcmd_lock; /* protect hcmd */
1077 spinlock_t reg_lock; /* protect hw register access */
1078 struct mutex mutex;
1079 struct mutex sync_cmd_mutex; /* enable serialization of sync commands */
1080
1081 /* basic pci-network driver stuff */
1082 struct pci_dev *pci_dev;
1083
1084 /* pci hardware address support */
1085 void __iomem *hw_base;
1086 u32 hw_rev;
1087 u32 hw_wa_rev;
1088 u8 rev_id;
1089
1090 /* microcode/device supports multiple contexts */
1091 u8 valid_contexts;
1092
1093 /* command queue number */
1094 u8 cmd_queue;
1095
1096 /* max number of station keys */
1097 u8 sta_key_max_num;
1098
1099 /* EEPROM MAC addresses */
1100 struct mac_address addresses[1];
1101
1102 /* uCode images, save to reload in case of failure */
1103 int fw_index; /* firmware we're trying to load */
1104 u32 ucode_ver; /* version of ucode, copy of
1105 iwl_ucode.ver */
1106 struct fw_desc ucode_code; /* runtime inst */
1107 struct fw_desc ucode_data; /* runtime data original */
1108 struct fw_desc ucode_data_backup; /* runtime data save/restore */
1109 struct fw_desc ucode_init; /* initialization inst */
1110 struct fw_desc ucode_init_data; /* initialization data */
1111 struct fw_desc ucode_boot; /* bootstrap inst */
1112 enum ucode_type ucode_type;
1113 u8 ucode_write_complete; /* the image write is complete */
1114 char firmware_name[25];
1115
1116 struct iwl_rxon_context contexts[NUM_IWL_RXON_CTX];
1117
1118 struct iwl_switch_rxon switch_rxon;
1119
1120 /* 1st responses from initialize and runtime uCode images.
1121 * _4965's initialize alive response contains some calibration data. */
1122 struct iwl_init_alive_resp card_alive_init;
1123 struct iwl_alive_resp card_alive;
1124
1125 u16 active_rate;
1126
1127 u8 start_calib;
1128 struct iwl_sensitivity_data sensitivity_data;
1129 struct iwl_chain_noise_data chain_noise_data;
1130 __le16 sensitivity_tbl[HD_TABLE_SIZE];
1131
1132 struct iwl_ht_config current_ht_config;
1133
1134 /* Rate scaling data */
1135 u8 retry_rate;
1136
1137 wait_queue_head_t wait_command_queue;
1138
1139 int activity_timer_active;
1140
1141 /* Rx and Tx DMA processing queues */
1142 struct iwl_rx_queue rxq;
1143 struct iwl_tx_queue *txq;
1144 unsigned long txq_ctx_active_msk;
1145 struct iwl_dma_ptr kw; /* keep warm address */
1146 struct iwl_dma_ptr scd_bc_tbls;
1147
1148 u32 scd_base_addr; /* scheduler sram base address */
1149
1150 unsigned long status;
1151
1152 /* counts mgmt, ctl, and data packets */
1153 struct traffic_stats tx_stats;
1154 struct traffic_stats rx_stats;
1155
1156 /* counts interrupts */
1157 struct isr_statistics isr_stats;
1158
1159 struct iwl_power_mgr power_data;
1160
1161 /* context information */
1162 u8 bssid[ETH_ALEN]; /* used only on 3945 but filled by core */
1163
1164 /* station table variables */
1165
1166 /* Note: if lock and sta_lock are needed, lock must be acquired first */
1167 spinlock_t sta_lock;
1168 int num_stations;
1169 struct iwl_station_entry stations[IWL_STATION_COUNT];
1170 unsigned long ucode_key_table;
1171
1172 /* queue refcounts */
1173#define IWL_MAX_HW_QUEUES 32
1174 unsigned long queue_stopped[BITS_TO_LONGS(IWL_MAX_HW_QUEUES)];
1175 /* for each AC */
1176 atomic_t queue_stop_count[4];
1177
1178 /* Indication if ieee80211_ops->open has been called */
1179 u8 is_open;
1180
1181 u8 mac80211_registered;
1182
1183 /* eeprom -- this is in the card's little endian byte order */
1184 u8 *eeprom;
1185 struct iwl_eeprom_calib_info *calib_info;
1186
1187 enum nl80211_iftype iw_mode;
1188
1189 /* Last Rx'd beacon timestamp */
1190 u64 timestamp;
1191
1192 union {
1193#if defined(CONFIG_IWL3945) || defined(CONFIG_IWL3945_MODULE)
1194 struct {
1195 void *shared_virt;
1196 dma_addr_t shared_phys;
1197
1198 struct delayed_work thermal_periodic;
1199 struct delayed_work rfkill_poll;
1200
1201 struct iwl3945_notif_statistics statistics;
1202#ifdef CONFIG_IWLWIFI_LEGACY_DEBUGFS
1203 struct iwl3945_notif_statistics accum_statistics;
1204 struct iwl3945_notif_statistics delta_statistics;
1205 struct iwl3945_notif_statistics max_delta;
1206#endif
1207
1208 u32 sta_supp_rates;
1209 int last_rx_rssi; /* From Rx packet statistics */
1210
1211 /* Rx'd packet timing information */
1212 u32 last_beacon_time;
1213 u64 last_tsf;
1214
1215 /*
1216 * each calibration channel group in the
1217 * EEPROM has a derived clip setting for
1218 * each rate.
1219 */
1220 const struct iwl3945_clip_group clip_groups[5];
1221
1222 } _3945;
1223#endif
1224#if defined(CONFIG_IWL4965) || defined(CONFIG_IWL4965_MODULE)
1225 struct {
1226 /*
1227 * reporting the number of tids has AGG on. 0 means
1228 * no AGGREGATION
1229 */
1230 u8 agg_tids_count;
1231
1232 struct iwl_rx_phy_res last_phy_res;
1233 bool last_phy_res_valid;
1234
1235 struct completion firmware_loading_complete;
1236
1237 /*
1238 * chain noise reset and gain commands are the
1239 * two extra calibration commands follows the standard
1240 * phy calibration commands
1241 */
1242 u8 phy_calib_chain_noise_reset_cmd;
1243 u8 phy_calib_chain_noise_gain_cmd;
1244
1245 struct iwl_notif_statistics statistics;
1246#ifdef CONFIG_IWLWIFI_LEGACY_DEBUGFS
1247 struct iwl_notif_statistics accum_statistics;
1248 struct iwl_notif_statistics delta_statistics;
1249 struct iwl_notif_statistics max_delta;
1250#endif
1251
1252 } _4965;
1253#endif
1254 };
1255
1256 struct iwl_hw_params hw_params;
1257
1258 u32 inta_mask;
1259
1260 struct workqueue_struct *workqueue;
1261
1262 struct work_struct restart;
1263 struct work_struct scan_completed;
1264 struct work_struct rx_replenish;
1265 struct work_struct abort_scan;
1266
1267 struct iwl_rxon_context *beacon_ctx;
1268 struct sk_buff *beacon_skb;
1269
1270 struct work_struct start_internal_scan;
1271 struct work_struct tx_flush;
1272
1273 struct tasklet_struct irq_tasklet;
1274
1275 struct delayed_work init_alive_start;
1276 struct delayed_work alive_start;
1277 struct delayed_work scan_check;
1278
1279 /* TX Power */
1280 s8 tx_power_user_lmt;
1281 s8 tx_power_device_lmt;
1282 s8 tx_power_next;
1283
1284
1285#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
1286 /* debugging info */
1287 u32 debug_level; /* per device debugging will override global
1288 iwlegacy_debug_level if set */
1289#endif /* CONFIG_IWLWIFI_LEGACY_DEBUG */
1290#ifdef CONFIG_IWLWIFI_LEGACY_DEBUGFS
1291 /* debugfs */
1292 u16 tx_traffic_idx;
1293 u16 rx_traffic_idx;
1294 u8 *tx_traffic;
1295 u8 *rx_traffic;
1296 struct dentry *debugfs_dir;
1297 u32 dbgfs_sram_offset, dbgfs_sram_len;
1298 bool disable_ht40;
1299#endif /* CONFIG_IWLWIFI_LEGACY_DEBUGFS */
1300
1301 struct work_struct txpower_work;
1302 u32 disable_sens_cal;
1303 u32 disable_chain_noise_cal;
1304 u32 disable_tx_power_cal;
1305 struct work_struct run_time_calib_work;
1306 struct timer_list statistics_periodic;
1307 struct timer_list ucode_trace;
1308 struct timer_list watchdog;
1309 bool hw_ready;
1310
1311 struct iwl_event_log event_log;
1312
1313 struct led_classdev led;
1314 unsigned long blink_on, blink_off;
1315 bool led_registered;
1316}; /*iwl_priv */
1317
1318static inline void iwl_txq_ctx_activate(struct iwl_priv *priv, int txq_id)
1319{
1320 set_bit(txq_id, &priv->txq_ctx_active_msk);
1321}
1322
1323static inline void iwl_txq_ctx_deactivate(struct iwl_priv *priv, int txq_id)
1324{
1325 clear_bit(txq_id, &priv->txq_ctx_active_msk);
1326}
1327
1328#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
1329/*
1330 * iwl_legacy_get_debug_level: Return active debug level for device
1331 *
1332 * Using sysfs it is possible to set per device debug level. This debug
1333 * level will be used if set, otherwise the global debug level which can be
1334 * set via module parameter is used.
1335 */
1336static inline u32 iwl_legacy_get_debug_level(struct iwl_priv *priv)
1337{
1338 if (priv->debug_level)
1339 return priv->debug_level;
1340 else
1341 return iwlegacy_debug_level;
1342}
1343#else
1344static inline u32 iwl_legacy_get_debug_level(struct iwl_priv *priv)
1345{
1346 return iwlegacy_debug_level;
1347}
1348#endif
1349
1350
1351static inline struct ieee80211_hdr *
1352iwl_legacy_tx_queue_get_hdr(struct iwl_priv *priv,
1353 int txq_id, int idx)
1354{
1355 if (priv->txq[txq_id].txb[idx].skb)
1356 return (struct ieee80211_hdr *)priv->txq[txq_id].
1357 txb[idx].skb->data;
1358 return NULL;
1359}
1360
1361static inline struct iwl_rxon_context *
1362iwl_legacy_rxon_ctx_from_vif(struct ieee80211_vif *vif)
1363{
1364 struct iwl_vif_priv *vif_priv = (void *)vif->drv_priv;
1365
1366 return vif_priv->ctx;
1367}
1368
1369#define for_each_context(priv, ctx) \
1370 for (ctx = &priv->contexts[IWL_RXON_CTX_BSS]; \
1371 ctx < &priv->contexts[NUM_IWL_RXON_CTX]; ctx++) \
1372 if (priv->valid_contexts & BIT(ctx->ctxid))
1373
1374static inline int iwl_legacy_is_associated(struct iwl_priv *priv,
1375 enum iwl_rxon_context_id ctxid)
1376{
1377 return (priv->contexts[ctxid].active.filter_flags &
1378 RXON_FILTER_ASSOC_MSK) ? 1 : 0;
1379}
1380
1381static inline int iwl_legacy_is_any_associated(struct iwl_priv *priv)
1382{
1383 return iwl_legacy_is_associated(priv, IWL_RXON_CTX_BSS);
1384}
1385
1386static inline int iwl_legacy_is_associated_ctx(struct iwl_rxon_context *ctx)
1387{
1388 return (ctx->active.filter_flags & RXON_FILTER_ASSOC_MSK) ? 1 : 0;
1389}
1390
1391static inline int iwl_legacy_is_channel_valid(const struct iwl_channel_info *ch_info)
1392{
1393 if (ch_info == NULL)
1394 return 0;
1395 return (ch_info->flags & EEPROM_CHANNEL_VALID) ? 1 : 0;
1396}
1397
1398static inline int iwl_legacy_is_channel_radar(const struct iwl_channel_info *ch_info)
1399{
1400 return (ch_info->flags & EEPROM_CHANNEL_RADAR) ? 1 : 0;
1401}
1402
1403static inline u8 iwl_legacy_is_channel_a_band(const struct iwl_channel_info *ch_info)
1404{
1405 return ch_info->band == IEEE80211_BAND_5GHZ;
1406}
1407
1408static inline int
1409iwl_legacy_is_channel_passive(const struct iwl_channel_info *ch)
1410{
1411 return (!(ch->flags & EEPROM_CHANNEL_ACTIVE)) ? 1 : 0;
1412}
1413
1414static inline void
1415__iwl_legacy_free_pages(struct iwl_priv *priv, struct page *page)
1416{
1417 __free_pages(page, priv->hw_params.rx_page_order);
1418 priv->alloc_rxb_page--;
1419}
1420
1421static inline void iwl_legacy_free_pages(struct iwl_priv *priv, unsigned long page)
1422{
1423 free_pages(page, priv->hw_params.rx_page_order);
1424 priv->alloc_rxb_page--;
1425}
1426#endif /* __iwl_legacy_dev_h__ */
diff --git a/drivers/net/wireless/iwlegacy/iwl-devtrace.c b/drivers/net/wireless/iwlegacy/iwl-devtrace.c
new file mode 100644
index 000000000000..080b852b33bd
--- /dev/null
+++ b/drivers/net/wireless/iwlegacy/iwl-devtrace.c
@@ -0,0 +1,45 @@
1/******************************************************************************
2 *
3 * Copyright(c) 2009 - 2011 Intel Corporation. All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of version 2 of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
17 *
18 * The full GNU General Public License is included in this distribution in the
19 * file called LICENSE.
20 *
21 * Contact Information:
22 * Intel Linux Wireless <ilw@linux.intel.com>
23 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
24 *
25 *****************************************************************************/
26
27#include <linux/module.h>
28
29/* sparse doesn't like tracepoint macros */
30#ifndef __CHECKER__
31#include "iwl-dev.h"
32
33#define CREATE_TRACE_POINTS
34#include "iwl-devtrace.h"
35
36EXPORT_TRACEPOINT_SYMBOL(iwlwifi_legacy_dev_iowrite8);
37EXPORT_TRACEPOINT_SYMBOL(iwlwifi_legacy_dev_ioread32);
38EXPORT_TRACEPOINT_SYMBOL(iwlwifi_legacy_dev_iowrite32);
39EXPORT_TRACEPOINT_SYMBOL(iwlwifi_legacy_dev_rx);
40EXPORT_TRACEPOINT_SYMBOL(iwlwifi_legacy_dev_tx);
41EXPORT_TRACEPOINT_SYMBOL(iwlwifi_legacy_dev_ucode_event);
42EXPORT_TRACEPOINT_SYMBOL(iwlwifi_legacy_dev_ucode_error);
43EXPORT_TRACEPOINT_SYMBOL(iwlwifi_legacy_dev_ucode_cont_event);
44EXPORT_TRACEPOINT_SYMBOL(iwlwifi_legacy_dev_ucode_wrap_event);
45#endif
diff --git a/drivers/net/wireless/iwlegacy/iwl-devtrace.h b/drivers/net/wireless/iwlegacy/iwl-devtrace.h
new file mode 100644
index 000000000000..9612aa0f6ec4
--- /dev/null
+++ b/drivers/net/wireless/iwlegacy/iwl-devtrace.h
@@ -0,0 +1,270 @@
1/******************************************************************************
2 *
3 * Copyright(c) 2009 - 2011 Intel Corporation. All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of version 2 of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
17 *
18 * The full GNU General Public License is included in this distribution in the
19 * file called LICENSE.
20 *
21 * Contact Information:
22 * Intel Linux Wireless <ilw@linux.intel.com>
23 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
24 *
25 *****************************************************************************/
26
27#if !defined(__IWLWIFI_LEGACY_DEVICE_TRACE) || defined(TRACE_HEADER_MULTI_READ)
28#define __IWLWIFI_LEGACY_DEVICE_TRACE
29
30#include <linux/tracepoint.h>
31
32#if !defined(CONFIG_IWLWIFI_LEGACY_DEVICE_TRACING) || defined(__CHECKER__)
33#undef TRACE_EVENT
34#define TRACE_EVENT(name, proto, ...) \
35static inline void trace_ ## name(proto) {}
36#endif
37
38
39#define PRIV_ENTRY __field(struct iwl_priv *, priv)
40#define PRIV_ASSIGN (__entry->priv = priv)
41
42#undef TRACE_SYSTEM
43#define TRACE_SYSTEM iwlwifi_legacy_io
44
45TRACE_EVENT(iwlwifi_legacy_dev_ioread32,
46 TP_PROTO(struct iwl_priv *priv, u32 offs, u32 val),
47 TP_ARGS(priv, offs, val),
48 TP_STRUCT__entry(
49 PRIV_ENTRY
50 __field(u32, offs)
51 __field(u32, val)
52 ),
53 TP_fast_assign(
54 PRIV_ASSIGN;
55 __entry->offs = offs;
56 __entry->val = val;
57 ),
58 TP_printk("[%p] read io[%#x] = %#x", __entry->priv,
59 __entry->offs, __entry->val)
60);
61
62TRACE_EVENT(iwlwifi_legacy_dev_iowrite8,
63 TP_PROTO(struct iwl_priv *priv, u32 offs, u8 val),
64 TP_ARGS(priv, offs, val),
65 TP_STRUCT__entry(
66 PRIV_ENTRY
67 __field(u32, offs)
68 __field(u8, val)
69 ),
70 TP_fast_assign(
71 PRIV_ASSIGN;
72 __entry->offs = offs;
73 __entry->val = val;
74 ),
75 TP_printk("[%p] write io[%#x] = %#x)", __entry->priv,
76 __entry->offs, __entry->val)
77);
78
79TRACE_EVENT(iwlwifi_legacy_dev_iowrite32,
80 TP_PROTO(struct iwl_priv *priv, u32 offs, u32 val),
81 TP_ARGS(priv, offs, val),
82 TP_STRUCT__entry(
83 PRIV_ENTRY
84 __field(u32, offs)
85 __field(u32, val)
86 ),
87 TP_fast_assign(
88 PRIV_ASSIGN;
89 __entry->offs = offs;
90 __entry->val = val;
91 ),
92 TP_printk("[%p] write io[%#x] = %#x)", __entry->priv,
93 __entry->offs, __entry->val)
94);
95
96#undef TRACE_SYSTEM
97#define TRACE_SYSTEM iwlwifi_legacy_ucode
98
99TRACE_EVENT(iwlwifi_legacy_dev_ucode_cont_event,
100 TP_PROTO(struct iwl_priv *priv, u32 time, u32 data, u32 ev),
101 TP_ARGS(priv, time, data, ev),
102 TP_STRUCT__entry(
103 PRIV_ENTRY
104
105 __field(u32, time)
106 __field(u32, data)
107 __field(u32, ev)
108 ),
109 TP_fast_assign(
110 PRIV_ASSIGN;
111 __entry->time = time;
112 __entry->data = data;
113 __entry->ev = ev;
114 ),
115 TP_printk("[%p] EVT_LOGT:%010u:0x%08x:%04u",
116 __entry->priv, __entry->time, __entry->data, __entry->ev)
117);
118
119TRACE_EVENT(iwlwifi_legacy_dev_ucode_wrap_event,
120 TP_PROTO(struct iwl_priv *priv, u32 wraps, u32 n_entry, u32 p_entry),
121 TP_ARGS(priv, wraps, n_entry, p_entry),
122 TP_STRUCT__entry(
123 PRIV_ENTRY
124
125 __field(u32, wraps)
126 __field(u32, n_entry)
127 __field(u32, p_entry)
128 ),
129 TP_fast_assign(
130 PRIV_ASSIGN;
131 __entry->wraps = wraps;
132 __entry->n_entry = n_entry;
133 __entry->p_entry = p_entry;
134 ),
135 TP_printk("[%p] wraps=#%02d n=0x%X p=0x%X",
136 __entry->priv, __entry->wraps, __entry->n_entry,
137 __entry->p_entry)
138);
139
140#undef TRACE_SYSTEM
141#define TRACE_SYSTEM iwlwifi
142
143TRACE_EVENT(iwlwifi_legacy_dev_hcmd,
144 TP_PROTO(struct iwl_priv *priv, void *hcmd, size_t len, u32 flags),
145 TP_ARGS(priv, hcmd, len, flags),
146 TP_STRUCT__entry(
147 PRIV_ENTRY
148 __dynamic_array(u8, hcmd, len)
149 __field(u32, flags)
150 ),
151 TP_fast_assign(
152 PRIV_ASSIGN;
153 memcpy(__get_dynamic_array(hcmd), hcmd, len);
154 __entry->flags = flags;
155 ),
156 TP_printk("[%p] hcmd %#.2x (%ssync)",
157 __entry->priv, ((u8 *)__get_dynamic_array(hcmd))[0],
158 __entry->flags & CMD_ASYNC ? "a" : "")
159);
160
161TRACE_EVENT(iwlwifi_legacy_dev_rx,
162 TP_PROTO(struct iwl_priv *priv, void *rxbuf, size_t len),
163 TP_ARGS(priv, rxbuf, len),
164 TP_STRUCT__entry(
165 PRIV_ENTRY
166 __dynamic_array(u8, rxbuf, len)
167 ),
168 TP_fast_assign(
169 PRIV_ASSIGN;
170 memcpy(__get_dynamic_array(rxbuf), rxbuf, len);
171 ),
172 TP_printk("[%p] RX cmd %#.2x",
173 __entry->priv, ((u8 *)__get_dynamic_array(rxbuf))[4])
174);
175
176TRACE_EVENT(iwlwifi_legacy_dev_tx,
177 TP_PROTO(struct iwl_priv *priv, void *tfd, size_t tfdlen,
178 void *buf0, size_t buf0_len,
179 void *buf1, size_t buf1_len),
180 TP_ARGS(priv, tfd, tfdlen, buf0, buf0_len, buf1, buf1_len),
181 TP_STRUCT__entry(
182 PRIV_ENTRY
183
184 __field(size_t, framelen)
185 __dynamic_array(u8, tfd, tfdlen)
186
187 /*
188 * Do not insert between or below these items,
189 * we want to keep the frame together (except
190 * for the possible padding).
191 */
192 __dynamic_array(u8, buf0, buf0_len)
193 __dynamic_array(u8, buf1, buf1_len)
194 ),
195 TP_fast_assign(
196 PRIV_ASSIGN;
197 __entry->framelen = buf0_len + buf1_len;
198 memcpy(__get_dynamic_array(tfd), tfd, tfdlen);
199 memcpy(__get_dynamic_array(buf0), buf0, buf0_len);
200 memcpy(__get_dynamic_array(buf1), buf1, buf1_len);
201 ),
202 TP_printk("[%p] TX %.2x (%zu bytes)",
203 __entry->priv,
204 ((u8 *)__get_dynamic_array(buf0))[0],
205 __entry->framelen)
206);
207
208TRACE_EVENT(iwlwifi_legacy_dev_ucode_error,
209 TP_PROTO(struct iwl_priv *priv, u32 desc, u32 time,
210 u32 data1, u32 data2, u32 line, u32 blink1,
211 u32 blink2, u32 ilink1, u32 ilink2),
212 TP_ARGS(priv, desc, time, data1, data2, line,
213 blink1, blink2, ilink1, ilink2),
214 TP_STRUCT__entry(
215 PRIV_ENTRY
216 __field(u32, desc)
217 __field(u32, time)
218 __field(u32, data1)
219 __field(u32, data2)
220 __field(u32, line)
221 __field(u32, blink1)
222 __field(u32, blink2)
223 __field(u32, ilink1)
224 __field(u32, ilink2)
225 ),
226 TP_fast_assign(
227 PRIV_ASSIGN;
228 __entry->desc = desc;
229 __entry->time = time;
230 __entry->data1 = data1;
231 __entry->data2 = data2;
232 __entry->line = line;
233 __entry->blink1 = blink1;
234 __entry->blink2 = blink2;
235 __entry->ilink1 = ilink1;
236 __entry->ilink2 = ilink2;
237 ),
238 TP_printk("[%p] #%02d %010u data 0x%08X 0x%08X line %u, "
239 "blink 0x%05X 0x%05X ilink 0x%05X 0x%05X",
240 __entry->priv, __entry->desc, __entry->time, __entry->data1,
241 __entry->data2, __entry->line, __entry->blink1,
242 __entry->blink2, __entry->ilink1, __entry->ilink2)
243);
244
245TRACE_EVENT(iwlwifi_legacy_dev_ucode_event,
246 TP_PROTO(struct iwl_priv *priv, u32 time, u32 data, u32 ev),
247 TP_ARGS(priv, time, data, ev),
248 TP_STRUCT__entry(
249 PRIV_ENTRY
250
251 __field(u32, time)
252 __field(u32, data)
253 __field(u32, ev)
254 ),
255 TP_fast_assign(
256 PRIV_ASSIGN;
257 __entry->time = time;
258 __entry->data = data;
259 __entry->ev = ev;
260 ),
261 TP_printk("[%p] EVT_LOGT:%010u:0x%08x:%04u",
262 __entry->priv, __entry->time, __entry->data, __entry->ev)
263);
264#endif /* __IWLWIFI_DEVICE_TRACE */
265
266#undef TRACE_INCLUDE_PATH
267#define TRACE_INCLUDE_PATH .
268#undef TRACE_INCLUDE_FILE
269#define TRACE_INCLUDE_FILE iwl-devtrace
270#include <trace/define_trace.h>
diff --git a/drivers/net/wireless/iwlegacy/iwl-eeprom.c b/drivers/net/wireless/iwlegacy/iwl-eeprom.c
new file mode 100644
index 000000000000..04c5648027df
--- /dev/null
+++ b/drivers/net/wireless/iwlegacy/iwl-eeprom.c
@@ -0,0 +1,561 @@
1/******************************************************************************
2 *
3 * This file is provided under a dual BSD/GPLv2 license. When using or
4 * redistributing this file, you may do so under either license.
5 *
6 * GPL LICENSE SUMMARY
7 *
8 * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of version 2 of the GNU General Public License as
12 * published by the Free Software Foundation.
13 *
14 * This program is distributed in the hope that it will be useful, but
15 * WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
22 * USA
23 *
24 * The full GNU General Public License is included in this distribution
25 * in the file called LICENSE.GPL.
26 *
27 * Contact Information:
28 * Intel Linux Wireless <ilw@linux.intel.com>
29 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
30 *
31 * BSD LICENSE
32 *
33 * Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved.
34 * All rights reserved.
35 *
36 * Redistribution and use in source and binary forms, with or without
37 * modification, are permitted provided that the following conditions
38 * are met:
39 *
40 * * Redistributions of source code must retain the above copyright
41 * notice, this list of conditions and the following disclaimer.
42 * * Redistributions in binary form must reproduce the above copyright
43 * notice, this list of conditions and the following disclaimer in
44 * the documentation and/or other materials provided with the
45 * distribution.
46 * * Neither the name Intel Corporation nor the names of its
47 * contributors may be used to endorse or promote products derived
48 * from this software without specific prior written permission.
49 *
50 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
51 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
52 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
53 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
54 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
55 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
56 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
57 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
58 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
59 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
60 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
61 *****************************************************************************/
62
63
64#include <linux/kernel.h>
65#include <linux/module.h>
66#include <linux/slab.h>
67#include <linux/init.h>
68
69#include <net/mac80211.h>
70
71#include "iwl-commands.h"
72#include "iwl-dev.h"
73#include "iwl-core.h"
74#include "iwl-debug.h"
75#include "iwl-eeprom.h"
76#include "iwl-io.h"
77
78/************************** EEPROM BANDS ****************************
79 *
80 * The iwlegacy_eeprom_band definitions below provide the mapping from the
81 * EEPROM contents to the specific channel number supported for each
82 * band.
83 *
84 * For example, iwl_priv->eeprom.band_3_channels[4] from the band_3
85 * definition below maps to physical channel 42 in the 5.2GHz spectrum.
86 * The specific geography and calibration information for that channel
87 * is contained in the eeprom map itself.
88 *
89 * During init, we copy the eeprom information and channel map
90 * information into priv->channel_info_24/52 and priv->channel_map_24/52
91 *
92 * channel_map_24/52 provides the index in the channel_info array for a
93 * given channel. We have to have two separate maps as there is channel
94 * overlap with the 2.4GHz and 5.2GHz spectrum as seen in band_1 and
95 * band_2
96 *
97 * A value of 0xff stored in the channel_map indicates that the channel
98 * is not supported by the hardware at all.
99 *
100 * A value of 0xfe in the channel_map indicates that the channel is not
101 * valid for Tx with the current hardware. This means that
102 * while the system can tune and receive on a given channel, it may not
103 * be able to associate or transmit any frames on that
104 * channel. There is no corresponding channel information for that
105 * entry.
106 *
107 *********************************************************************/
108
109/* 2.4 GHz */
110const u8 iwlegacy_eeprom_band_1[14] = {
111 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14
112};
113
114/* 5.2 GHz bands */
115static const u8 iwlegacy_eeprom_band_2[] = { /* 4915-5080MHz */
116 183, 184, 185, 187, 188, 189, 192, 196, 7, 8, 11, 12, 16
117};
118
119static const u8 iwlegacy_eeprom_band_3[] = { /* 5170-5320MHz */
120 34, 36, 38, 40, 42, 44, 46, 48, 52, 56, 60, 64
121};
122
123static const u8 iwlegacy_eeprom_band_4[] = { /* 5500-5700MHz */
124 100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140
125};
126
127static const u8 iwlegacy_eeprom_band_5[] = { /* 5725-5825MHz */
128 145, 149, 153, 157, 161, 165
129};
130
131static const u8 iwlegacy_eeprom_band_6[] = { /* 2.4 ht40 channel */
132 1, 2, 3, 4, 5, 6, 7
133};
134
135static const u8 iwlegacy_eeprom_band_7[] = { /* 5.2 ht40 channel */
136 36, 44, 52, 60, 100, 108, 116, 124, 132, 149, 157
137};
138
139/******************************************************************************
140 *
141 * EEPROM related functions
142 *
143******************************************************************************/
144
145static int iwl_legacy_eeprom_verify_signature(struct iwl_priv *priv)
146{
147 u32 gp = iwl_read32(priv, CSR_EEPROM_GP) & CSR_EEPROM_GP_VALID_MSK;
148 int ret = 0;
149
150 IWL_DEBUG_EEPROM(priv, "EEPROM signature=0x%08x\n", gp);
151 switch (gp) {
152 case CSR_EEPROM_GP_GOOD_SIG_EEP_LESS_THAN_4K:
153 case CSR_EEPROM_GP_GOOD_SIG_EEP_MORE_THAN_4K:
154 break;
155 default:
156 IWL_ERR(priv, "bad EEPROM signature,"
157 "EEPROM_GP=0x%08x\n", gp);
158 ret = -ENOENT;
159 break;
160 }
161 return ret;
162}
163
164const u8
165*iwl_legacy_eeprom_query_addr(const struct iwl_priv *priv, size_t offset)
166{
167 BUG_ON(offset >= priv->cfg->base_params->eeprom_size);
168 return &priv->eeprom[offset];
169}
170EXPORT_SYMBOL(iwl_legacy_eeprom_query_addr);
171
172u16 iwl_legacy_eeprom_query16(const struct iwl_priv *priv, size_t offset)
173{
174 if (!priv->eeprom)
175 return 0;
176 return (u16)priv->eeprom[offset] | ((u16)priv->eeprom[offset + 1] << 8);
177}
178EXPORT_SYMBOL(iwl_legacy_eeprom_query16);
179
180/**
181 * iwl_legacy_eeprom_init - read EEPROM contents
182 *
183 * Load the EEPROM contents from adapter into priv->eeprom
184 *
185 * NOTE: This routine uses the non-debug IO access functions.
186 */
187int iwl_legacy_eeprom_init(struct iwl_priv *priv)
188{
189 __le16 *e;
190 u32 gp = iwl_read32(priv, CSR_EEPROM_GP);
191 int sz;
192 int ret;
193 u16 addr;
194
195 /* allocate eeprom */
196 sz = priv->cfg->base_params->eeprom_size;
197 IWL_DEBUG_EEPROM(priv, "NVM size = %d\n", sz);
198 priv->eeprom = kzalloc(sz, GFP_KERNEL);
199 if (!priv->eeprom) {
200 ret = -ENOMEM;
201 goto alloc_err;
202 }
203 e = (__le16 *)priv->eeprom;
204
205 priv->cfg->ops->lib->apm_ops.init(priv);
206
207 ret = iwl_legacy_eeprom_verify_signature(priv);
208 if (ret < 0) {
209 IWL_ERR(priv, "EEPROM not found, EEPROM_GP=0x%08x\n", gp);
210 ret = -ENOENT;
211 goto err;
212 }
213
214 /* Make sure driver (instead of uCode) is allowed to read EEPROM */
215 ret = priv->cfg->ops->lib->eeprom_ops.acquire_semaphore(priv);
216 if (ret < 0) {
217 IWL_ERR(priv, "Failed to acquire EEPROM semaphore.\n");
218 ret = -ENOENT;
219 goto err;
220 }
221
222 /* eeprom is an array of 16bit values */
223 for (addr = 0; addr < sz; addr += sizeof(u16)) {
224 u32 r;
225
226 _iwl_legacy_write32(priv, CSR_EEPROM_REG,
227 CSR_EEPROM_REG_MSK_ADDR & (addr << 1));
228
229 ret = iwl_poll_bit(priv, CSR_EEPROM_REG,
230 CSR_EEPROM_REG_READ_VALID_MSK,
231 CSR_EEPROM_REG_READ_VALID_MSK,
232 IWL_EEPROM_ACCESS_TIMEOUT);
233 if (ret < 0) {
234 IWL_ERR(priv, "Time out reading EEPROM[%d]\n",
235 addr);
236 goto done;
237 }
238 r = _iwl_legacy_read_direct32(priv, CSR_EEPROM_REG);
239 e[addr / 2] = cpu_to_le16(r >> 16);
240 }
241
242 IWL_DEBUG_EEPROM(priv, "NVM Type: %s, version: 0x%x\n",
243 "EEPROM",
244 iwl_legacy_eeprom_query16(priv, EEPROM_VERSION));
245
246 ret = 0;
247done:
248 priv->cfg->ops->lib->eeprom_ops.release_semaphore(priv);
249
250err:
251 if (ret)
252 iwl_legacy_eeprom_free(priv);
253 /* Reset chip to save power until we load uCode during "up". */
254 iwl_legacy_apm_stop(priv);
255alloc_err:
256 return ret;
257}
258EXPORT_SYMBOL(iwl_legacy_eeprom_init);
259
260void iwl_legacy_eeprom_free(struct iwl_priv *priv)
261{
262 kfree(priv->eeprom);
263 priv->eeprom = NULL;
264}
265EXPORT_SYMBOL(iwl_legacy_eeprom_free);
266
267static void iwl_legacy_init_band_reference(const struct iwl_priv *priv,
268 int eep_band, int *eeprom_ch_count,
269 const struct iwl_eeprom_channel **eeprom_ch_info,
270 const u8 **eeprom_ch_index)
271{
272 u32 offset = priv->cfg->ops->lib->
273 eeprom_ops.regulatory_bands[eep_band - 1];
274 switch (eep_band) {
275 case 1: /* 2.4GHz band */
276 *eeprom_ch_count = ARRAY_SIZE(iwlegacy_eeprom_band_1);
277 *eeprom_ch_info = (struct iwl_eeprom_channel *)
278 iwl_legacy_eeprom_query_addr(priv, offset);
279 *eeprom_ch_index = iwlegacy_eeprom_band_1;
280 break;
281 case 2: /* 4.9GHz band */
282 *eeprom_ch_count = ARRAY_SIZE(iwlegacy_eeprom_band_2);
283 *eeprom_ch_info = (struct iwl_eeprom_channel *)
284 iwl_legacy_eeprom_query_addr(priv, offset);
285 *eeprom_ch_index = iwlegacy_eeprom_band_2;
286 break;
287 case 3: /* 5.2GHz band */
288 *eeprom_ch_count = ARRAY_SIZE(iwlegacy_eeprom_band_3);
289 *eeprom_ch_info = (struct iwl_eeprom_channel *)
290 iwl_legacy_eeprom_query_addr(priv, offset);
291 *eeprom_ch_index = iwlegacy_eeprom_band_3;
292 break;
293 case 4: /* 5.5GHz band */
294 *eeprom_ch_count = ARRAY_SIZE(iwlegacy_eeprom_band_4);
295 *eeprom_ch_info = (struct iwl_eeprom_channel *)
296 iwl_legacy_eeprom_query_addr(priv, offset);
297 *eeprom_ch_index = iwlegacy_eeprom_band_4;
298 break;
299 case 5: /* 5.7GHz band */
300 *eeprom_ch_count = ARRAY_SIZE(iwlegacy_eeprom_band_5);
301 *eeprom_ch_info = (struct iwl_eeprom_channel *)
302 iwl_legacy_eeprom_query_addr(priv, offset);
303 *eeprom_ch_index = iwlegacy_eeprom_band_5;
304 break;
305 case 6: /* 2.4GHz ht40 channels */
306 *eeprom_ch_count = ARRAY_SIZE(iwlegacy_eeprom_band_6);
307 *eeprom_ch_info = (struct iwl_eeprom_channel *)
308 iwl_legacy_eeprom_query_addr(priv, offset);
309 *eeprom_ch_index = iwlegacy_eeprom_band_6;
310 break;
311 case 7: /* 5 GHz ht40 channels */
312 *eeprom_ch_count = ARRAY_SIZE(iwlegacy_eeprom_band_7);
313 *eeprom_ch_info = (struct iwl_eeprom_channel *)
314 iwl_legacy_eeprom_query_addr(priv, offset);
315 *eeprom_ch_index = iwlegacy_eeprom_band_7;
316 break;
317 default:
318 BUG();
319 return;
320 }
321}
322
323#define CHECK_AND_PRINT(x) ((eeprom_ch->flags & EEPROM_CHANNEL_##x) \
324 ? # x " " : "")
325/**
326 * iwl_legacy_mod_ht40_chan_info - Copy ht40 channel info into driver's priv.
327 *
328 * Does not set up a command, or touch hardware.
329 */
330static int iwl_legacy_mod_ht40_chan_info(struct iwl_priv *priv,
331 enum ieee80211_band band, u16 channel,
332 const struct iwl_eeprom_channel *eeprom_ch,
333 u8 clear_ht40_extension_channel)
334{
335 struct iwl_channel_info *ch_info;
336
337 ch_info = (struct iwl_channel_info *)
338 iwl_legacy_get_channel_info(priv, band, channel);
339
340 if (!iwl_legacy_is_channel_valid(ch_info))
341 return -1;
342
343 IWL_DEBUG_EEPROM(priv, "HT40 Ch. %d [%sGHz] %s%s%s%s%s(0x%02x %ddBm):"
344 " Ad-Hoc %ssupported\n",
345 ch_info->channel,
346 iwl_legacy_is_channel_a_band(ch_info) ?
347 "5.2" : "2.4",
348 CHECK_AND_PRINT(IBSS),
349 CHECK_AND_PRINT(ACTIVE),
350 CHECK_AND_PRINT(RADAR),
351 CHECK_AND_PRINT(WIDE),
352 CHECK_AND_PRINT(DFS),
353 eeprom_ch->flags,
354 eeprom_ch->max_power_avg,
355 ((eeprom_ch->flags & EEPROM_CHANNEL_IBSS)
356 && !(eeprom_ch->flags & EEPROM_CHANNEL_RADAR)) ?
357 "" : "not ");
358
359 ch_info->ht40_eeprom = *eeprom_ch;
360 ch_info->ht40_max_power_avg = eeprom_ch->max_power_avg;
361 ch_info->ht40_flags = eeprom_ch->flags;
362 if (eeprom_ch->flags & EEPROM_CHANNEL_VALID)
363 ch_info->ht40_extension_channel &=
364 ~clear_ht40_extension_channel;
365
366 return 0;
367}
368
369#define CHECK_AND_PRINT_I(x) ((eeprom_ch_info[ch].flags & EEPROM_CHANNEL_##x) \
370 ? # x " " : "")
371
372/**
373 * iwl_legacy_init_channel_map - Set up driver's info for all possible channels
374 */
375int iwl_legacy_init_channel_map(struct iwl_priv *priv)
376{
377 int eeprom_ch_count = 0;
378 const u8 *eeprom_ch_index = NULL;
379 const struct iwl_eeprom_channel *eeprom_ch_info = NULL;
380 int band, ch;
381 struct iwl_channel_info *ch_info;
382
383 if (priv->channel_count) {
384 IWL_DEBUG_EEPROM(priv, "Channel map already initialized.\n");
385 return 0;
386 }
387
388 IWL_DEBUG_EEPROM(priv, "Initializing regulatory info from EEPROM\n");
389
390 priv->channel_count =
391 ARRAY_SIZE(iwlegacy_eeprom_band_1) +
392 ARRAY_SIZE(iwlegacy_eeprom_band_2) +
393 ARRAY_SIZE(iwlegacy_eeprom_band_3) +
394 ARRAY_SIZE(iwlegacy_eeprom_band_4) +
395 ARRAY_SIZE(iwlegacy_eeprom_band_5);
396
397 IWL_DEBUG_EEPROM(priv, "Parsing data for %d channels.\n",
398 priv->channel_count);
399
400 priv->channel_info = kzalloc(sizeof(struct iwl_channel_info) *
401 priv->channel_count, GFP_KERNEL);
402 if (!priv->channel_info) {
403 IWL_ERR(priv, "Could not allocate channel_info\n");
404 priv->channel_count = 0;
405 return -ENOMEM;
406 }
407
408 ch_info = priv->channel_info;
409
410 /* Loop through the 5 EEPROM bands adding them in order to the
411 * channel map we maintain (that contains additional information than
412 * what just in the EEPROM) */
413 for (band = 1; band <= 5; band++) {
414
415 iwl_legacy_init_band_reference(priv, band, &eeprom_ch_count,
416 &eeprom_ch_info, &eeprom_ch_index);
417
418 /* Loop through each band adding each of the channels */
419 for (ch = 0; ch < eeprom_ch_count; ch++) {
420 ch_info->channel = eeprom_ch_index[ch];
421 ch_info->band = (band == 1) ? IEEE80211_BAND_2GHZ :
422 IEEE80211_BAND_5GHZ;
423
424 /* permanently store EEPROM's channel regulatory flags
425 * and max power in channel info database. */
426 ch_info->eeprom = eeprom_ch_info[ch];
427
428 /* Copy the run-time flags so they are there even on
429 * invalid channels */
430 ch_info->flags = eeprom_ch_info[ch].flags;
431 /* First write that ht40 is not enabled, and then enable
432 * one by one */
433 ch_info->ht40_extension_channel =
434 IEEE80211_CHAN_NO_HT40;
435
436 if (!(iwl_legacy_is_channel_valid(ch_info))) {
437 IWL_DEBUG_EEPROM(priv,
438 "Ch. %d Flags %x [%sGHz] - "
439 "No traffic\n",
440 ch_info->channel,
441 ch_info->flags,
442 iwl_legacy_is_channel_a_band(ch_info) ?
443 "5.2" : "2.4");
444 ch_info++;
445 continue;
446 }
447
448 /* Initialize regulatory-based run-time data */
449 ch_info->max_power_avg = ch_info->curr_txpow =
450 eeprom_ch_info[ch].max_power_avg;
451 ch_info->scan_power = eeprom_ch_info[ch].max_power_avg;
452 ch_info->min_power = 0;
453
454 IWL_DEBUG_EEPROM(priv, "Ch. %d [%sGHz] "
455 "%s%s%s%s%s%s(0x%02x %ddBm):"
456 " Ad-Hoc %ssupported\n",
457 ch_info->channel,
458 iwl_legacy_is_channel_a_band(ch_info) ?
459 "5.2" : "2.4",
460 CHECK_AND_PRINT_I(VALID),
461 CHECK_AND_PRINT_I(IBSS),
462 CHECK_AND_PRINT_I(ACTIVE),
463 CHECK_AND_PRINT_I(RADAR),
464 CHECK_AND_PRINT_I(WIDE),
465 CHECK_AND_PRINT_I(DFS),
466 eeprom_ch_info[ch].flags,
467 eeprom_ch_info[ch].max_power_avg,
468 ((eeprom_ch_info[ch].
469 flags & EEPROM_CHANNEL_IBSS)
470 && !(eeprom_ch_info[ch].
471 flags & EEPROM_CHANNEL_RADAR))
472 ? "" : "not ");
473
474 /* Set the tx_power_user_lmt to the highest power
475 * supported by any channel */
476 if (eeprom_ch_info[ch].max_power_avg >
477 priv->tx_power_user_lmt)
478 priv->tx_power_user_lmt =
479 eeprom_ch_info[ch].max_power_avg;
480
481 ch_info++;
482 }
483 }
484
485 /* Check if we do have HT40 channels */
486 if (priv->cfg->ops->lib->eeprom_ops.regulatory_bands[5] ==
487 EEPROM_REGULATORY_BAND_NO_HT40 &&
488 priv->cfg->ops->lib->eeprom_ops.regulatory_bands[6] ==
489 EEPROM_REGULATORY_BAND_NO_HT40)
490 return 0;
491
492 /* Two additional EEPROM bands for 2.4 and 5 GHz HT40 channels */
493 for (band = 6; band <= 7; band++) {
494 enum ieee80211_band ieeeband;
495
496 iwl_legacy_init_band_reference(priv, band, &eeprom_ch_count,
497 &eeprom_ch_info, &eeprom_ch_index);
498
499 /* EEPROM band 6 is 2.4, band 7 is 5 GHz */
500 ieeeband =
501 (band == 6) ? IEEE80211_BAND_2GHZ : IEEE80211_BAND_5GHZ;
502
503 /* Loop through each band adding each of the channels */
504 for (ch = 0; ch < eeprom_ch_count; ch++) {
505 /* Set up driver's info for lower half */
506 iwl_legacy_mod_ht40_chan_info(priv, ieeeband,
507 eeprom_ch_index[ch],
508 &eeprom_ch_info[ch],
509 IEEE80211_CHAN_NO_HT40PLUS);
510
511 /* Set up driver's info for upper half */
512 iwl_legacy_mod_ht40_chan_info(priv, ieeeband,
513 eeprom_ch_index[ch] + 4,
514 &eeprom_ch_info[ch],
515 IEEE80211_CHAN_NO_HT40MINUS);
516 }
517 }
518
519 return 0;
520}
521EXPORT_SYMBOL(iwl_legacy_init_channel_map);
522
523/*
524 * iwl_legacy_free_channel_map - undo allocations in iwl_legacy_init_channel_map
525 */
526void iwl_legacy_free_channel_map(struct iwl_priv *priv)
527{
528 kfree(priv->channel_info);
529 priv->channel_count = 0;
530}
531EXPORT_SYMBOL(iwl_legacy_free_channel_map);
532
533/**
534 * iwl_legacy_get_channel_info - Find driver's private channel info
535 *
536 * Based on band and channel number.
537 */
538const struct
539iwl_channel_info *iwl_legacy_get_channel_info(const struct iwl_priv *priv,
540 enum ieee80211_band band, u16 channel)
541{
542 int i;
543
544 switch (band) {
545 case IEEE80211_BAND_5GHZ:
546 for (i = 14; i < priv->channel_count; i++) {
547 if (priv->channel_info[i].channel == channel)
548 return &priv->channel_info[i];
549 }
550 break;
551 case IEEE80211_BAND_2GHZ:
552 if (channel >= 1 && channel <= 14)
553 return &priv->channel_info[channel - 1];
554 break;
555 default:
556 BUG();
557 }
558
559 return NULL;
560}
561EXPORT_SYMBOL(iwl_legacy_get_channel_info);
diff --git a/drivers/net/wireless/iwlegacy/iwl-eeprom.h b/drivers/net/wireless/iwlegacy/iwl-eeprom.h
new file mode 100644
index 000000000000..c59c81002022
--- /dev/null
+++ b/drivers/net/wireless/iwlegacy/iwl-eeprom.h
@@ -0,0 +1,344 @@
1/******************************************************************************
2 *
3 * This file is provided under a dual BSD/GPLv2 license. When using or
4 * redistributing this file, you may do so under either license.
5 *
6 * GPL LICENSE SUMMARY
7 *
8 * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of version 2 of the GNU General Public License as
12 * published by the Free Software Foundation.
13 *
14 * This program is distributed in the hope that it will be useful, but
15 * WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
22 * USA
23 *
24 * The full GNU General Public License is included in this distribution
25 * in the file called LICENSE.GPL.
26 *
27 * Contact Information:
28 * Intel Linux Wireless <ilw@linux.intel.com>
29 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
30 *
31 * BSD LICENSE
32 *
33 * Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved.
34 * All rights reserved.
35 *
36 * Redistribution and use in source and binary forms, with or without
37 * modification, are permitted provided that the following conditions
38 * are met:
39 *
40 * * Redistributions of source code must retain the above copyright
41 * notice, this list of conditions and the following disclaimer.
42 * * Redistributions in binary form must reproduce the above copyright
43 * notice, this list of conditions and the following disclaimer in
44 * the documentation and/or other materials provided with the
45 * distribution.
46 * * Neither the name Intel Corporation nor the names of its
47 * contributors may be used to endorse or promote products derived
48 * from this software without specific prior written permission.
49 *
50 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
51 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
52 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
53 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
54 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
55 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
56 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
57 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
58 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
59 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
60 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
61 *****************************************************************************/
62
63#ifndef __iwl_legacy_eeprom_h__
64#define __iwl_legacy_eeprom_h__
65
66#include <net/mac80211.h>
67
68struct iwl_priv;
69
70/*
71 * EEPROM access time values:
72 *
73 * Driver initiates EEPROM read by writing byte address << 1 to CSR_EEPROM_REG.
74 * Driver then polls CSR_EEPROM_REG for CSR_EEPROM_REG_READ_VALID_MSK (0x1).
75 * When polling, wait 10 uSec between polling loops, up to a maximum 5000 uSec.
76 * Driver reads 16-bit value from bits 31-16 of CSR_EEPROM_REG.
77 */
78#define IWL_EEPROM_ACCESS_TIMEOUT 5000 /* uSec */
79
80#define IWL_EEPROM_SEM_TIMEOUT 10 /* microseconds */
81#define IWL_EEPROM_SEM_RETRY_LIMIT 1000 /* number of attempts (not time) */
82
83
84/*
85 * Regulatory channel usage flags in EEPROM struct iwl4965_eeprom_channel.flags.
86 *
87 * IBSS and/or AP operation is allowed *only* on those channels with
88 * (VALID && IBSS && ACTIVE && !RADAR). This restriction is in place because
89 * RADAR detection is not supported by the 4965 driver, but is a
90 * requirement for establishing a new network for legal operation on channels
91 * requiring RADAR detection or restricting ACTIVE scanning.
92 *
93 * NOTE: "WIDE" flag does not indicate anything about "HT40" 40 MHz channels.
94 * It only indicates that 20 MHz channel use is supported; HT40 channel
95 * usage is indicated by a separate set of regulatory flags for each
96 * HT40 channel pair.
97 *
98 * NOTE: Using a channel inappropriately will result in a uCode error!
99 */
100#define IWL_NUM_TX_CALIB_GROUPS 5
101enum {
102 EEPROM_CHANNEL_VALID = (1 << 0), /* usable for this SKU/geo */
103 EEPROM_CHANNEL_IBSS = (1 << 1), /* usable as an IBSS channel */
104 /* Bit 2 Reserved */
105 EEPROM_CHANNEL_ACTIVE = (1 << 3), /* active scanning allowed */
106 EEPROM_CHANNEL_RADAR = (1 << 4), /* radar detection required */
107 EEPROM_CHANNEL_WIDE = (1 << 5), /* 20 MHz channel okay */
108 /* Bit 6 Reserved (was Narrow Channel) */
109 EEPROM_CHANNEL_DFS = (1 << 7), /* dynamic freq selection candidate */
110};
111
112/* SKU Capabilities */
113/* 3945 only */
114#define EEPROM_SKU_CAP_SW_RF_KILL_ENABLE (1 << 0)
115#define EEPROM_SKU_CAP_HW_RF_KILL_ENABLE (1 << 1)
116
117/* *regulatory* channel data format in eeprom, one for each channel.
118 * There are separate entries for HT40 (40 MHz) vs. normal (20 MHz) channels. */
119struct iwl_eeprom_channel {
120 u8 flags; /* EEPROM_CHANNEL_* flags copied from EEPROM */
121 s8 max_power_avg; /* max power (dBm) on this chnl, limit 31 */
122} __packed;
123
124/* 3945 Specific */
125#define EEPROM_3945_EEPROM_VERSION (0x2f)
126
127/* 4965 has two radio transmitters (and 3 radio receivers) */
128#define EEPROM_TX_POWER_TX_CHAINS (2)
129
130/* 4965 has room for up to 8 sets of txpower calibration data */
131#define EEPROM_TX_POWER_BANDS (8)
132
133/* 4965 factory calibration measures txpower gain settings for
134 * each of 3 target output levels */
135#define EEPROM_TX_POWER_MEASUREMENTS (3)
136
137/* 4965 Specific */
138/* 4965 driver does not work with txpower calibration version < 5 */
139#define EEPROM_4965_TX_POWER_VERSION (5)
140#define EEPROM_4965_EEPROM_VERSION (0x2f)
141#define EEPROM_4965_CALIB_VERSION_OFFSET (2*0xB6) /* 2 bytes */
142#define EEPROM_4965_CALIB_TXPOWER_OFFSET (2*0xE8) /* 48 bytes */
143#define EEPROM_4965_BOARD_REVISION (2*0x4F) /* 2 bytes */
144#define EEPROM_4965_BOARD_PBA (2*0x56+1) /* 9 bytes */
145
146/* 2.4 GHz */
147extern const u8 iwlegacy_eeprom_band_1[14];
148
149/*
150 * factory calibration data for one txpower level, on one channel,
151 * measured on one of the 2 tx chains (radio transmitter and associated
152 * antenna). EEPROM contains:
153 *
154 * 1) Temperature (degrees Celsius) of device when measurement was made.
155 *
156 * 2) Gain table index used to achieve the target measurement power.
157 * This refers to the "well-known" gain tables (see iwl-4965-hw.h).
158 *
159 * 3) Actual measured output power, in half-dBm ("34" = 17 dBm).
160 *
161 * 4) RF power amplifier detector level measurement (not used).
162 */
163struct iwl_eeprom_calib_measure {
164 u8 temperature; /* Device temperature (Celsius) */
165 u8 gain_idx; /* Index into gain table */
166 u8 actual_pow; /* Measured RF output power, half-dBm */
167 s8 pa_det; /* Power amp detector level (not used) */
168} __packed;
169
170
171/*
172 * measurement set for one channel. EEPROM contains:
173 *
174 * 1) Channel number measured
175 *
176 * 2) Measurements for each of 3 power levels for each of 2 radio transmitters
177 * (a.k.a. "tx chains") (6 measurements altogether)
178 */
179struct iwl_eeprom_calib_ch_info {
180 u8 ch_num;
181 struct iwl_eeprom_calib_measure
182 measurements[EEPROM_TX_POWER_TX_CHAINS]
183 [EEPROM_TX_POWER_MEASUREMENTS];
184} __packed;
185
186/*
187 * txpower subband info.
188 *
189 * For each frequency subband, EEPROM contains the following:
190 *
191 * 1) First and last channels within range of the subband. "0" values
192 * indicate that this sample set is not being used.
193 *
194 * 2) Sample measurement sets for 2 channels close to the range endpoints.
195 */
196struct iwl_eeprom_calib_subband_info {
197 u8 ch_from; /* channel number of lowest channel in subband */
198 u8 ch_to; /* channel number of highest channel in subband */
199 struct iwl_eeprom_calib_ch_info ch1;
200 struct iwl_eeprom_calib_ch_info ch2;
201} __packed;
202
203
204/*
205 * txpower calibration info. EEPROM contains:
206 *
207 * 1) Factory-measured saturation power levels (maximum levels at which
208 * tx power amplifier can output a signal without too much distortion).
209 * There is one level for 2.4 GHz band and one for 5 GHz band. These
210 * values apply to all channels within each of the bands.
211 *
212 * 2) Factory-measured power supply voltage level. This is assumed to be
213 * constant (i.e. same value applies to all channels/bands) while the
214 * factory measurements are being made.
215 *
216 * 3) Up to 8 sets of factory-measured txpower calibration values.
217 * These are for different frequency ranges, since txpower gain
218 * characteristics of the analog radio circuitry vary with frequency.
219 *
220 * Not all sets need to be filled with data;
221 * struct iwl_eeprom_calib_subband_info contains range of channels
222 * (0 if unused) for each set of data.
223 */
224struct iwl_eeprom_calib_info {
225 u8 saturation_power24; /* half-dBm (e.g. "34" = 17 dBm) */
226 u8 saturation_power52; /* half-dBm */
227 __le16 voltage; /* signed */
228 struct iwl_eeprom_calib_subband_info
229 band_info[EEPROM_TX_POWER_BANDS];
230} __packed;
231
232
233/* General */
234#define EEPROM_DEVICE_ID (2*0x08) /* 2 bytes */
235#define EEPROM_MAC_ADDRESS (2*0x15) /* 6 bytes */
236#define EEPROM_BOARD_REVISION (2*0x35) /* 2 bytes */
237#define EEPROM_BOARD_PBA_NUMBER (2*0x3B+1) /* 9 bytes */
238#define EEPROM_VERSION (2*0x44) /* 2 bytes */
239#define EEPROM_SKU_CAP (2*0x45) /* 2 bytes */
240#define EEPROM_OEM_MODE (2*0x46) /* 2 bytes */
241#define EEPROM_WOWLAN_MODE (2*0x47) /* 2 bytes */
242#define EEPROM_RADIO_CONFIG (2*0x48) /* 2 bytes */
243#define EEPROM_NUM_MAC_ADDRESS (2*0x4C) /* 2 bytes */
244
245/* The following masks are to be applied on EEPROM_RADIO_CONFIG */
246#define EEPROM_RF_CFG_TYPE_MSK(x) (x & 0x3) /* bits 0-1 */
247#define EEPROM_RF_CFG_STEP_MSK(x) ((x >> 2) & 0x3) /* bits 2-3 */
248#define EEPROM_RF_CFG_DASH_MSK(x) ((x >> 4) & 0x3) /* bits 4-5 */
249#define EEPROM_RF_CFG_PNUM_MSK(x) ((x >> 6) & 0x3) /* bits 6-7 */
250#define EEPROM_RF_CFG_TX_ANT_MSK(x) ((x >> 8) & 0xF) /* bits 8-11 */
251#define EEPROM_RF_CFG_RX_ANT_MSK(x) ((x >> 12) & 0xF) /* bits 12-15 */
252
253#define EEPROM_3945_RF_CFG_TYPE_MAX 0x0
254#define EEPROM_4965_RF_CFG_TYPE_MAX 0x1
255
256/*
257 * Per-channel regulatory data.
258 *
259 * Each channel that *might* be supported by iwl has a fixed location
260 * in EEPROM containing EEPROM_CHANNEL_* usage flags (LSB) and max regulatory
261 * txpower (MSB).
262 *
263 * Entries immediately below are for 20 MHz channel width. HT40 (40 MHz)
264 * channels (only for 4965, not supported by 3945) appear later in the EEPROM.
265 *
266 * 2.4 GHz channels 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14
267 */
268#define EEPROM_REGULATORY_SKU_ID (2*0x60) /* 4 bytes */
269#define EEPROM_REGULATORY_BAND_1 (2*0x62) /* 2 bytes */
270#define EEPROM_REGULATORY_BAND_1_CHANNELS (2*0x63) /* 28 bytes */
271
272/*
273 * 4.9 GHz channels 183, 184, 185, 187, 188, 189, 192, 196,
274 * 5.0 GHz channels 7, 8, 11, 12, 16
275 * (4915-5080MHz) (none of these is ever supported)
276 */
277#define EEPROM_REGULATORY_BAND_2 (2*0x71) /* 2 bytes */
278#define EEPROM_REGULATORY_BAND_2_CHANNELS (2*0x72) /* 26 bytes */
279
280/*
281 * 5.2 GHz channels 34, 36, 38, 40, 42, 44, 46, 48, 52, 56, 60, 64
282 * (5170-5320MHz)
283 */
284#define EEPROM_REGULATORY_BAND_3 (2*0x7F) /* 2 bytes */
285#define EEPROM_REGULATORY_BAND_3_CHANNELS (2*0x80) /* 24 bytes */
286
287/*
288 * 5.5 GHz channels 100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140
289 * (5500-5700MHz)
290 */
291#define EEPROM_REGULATORY_BAND_4 (2*0x8C) /* 2 bytes */
292#define EEPROM_REGULATORY_BAND_4_CHANNELS (2*0x8D) /* 22 bytes */
293
294/*
295 * 5.7 GHz channels 145, 149, 153, 157, 161, 165
296 * (5725-5825MHz)
297 */
298#define EEPROM_REGULATORY_BAND_5 (2*0x98) /* 2 bytes */
299#define EEPROM_REGULATORY_BAND_5_CHANNELS (2*0x99) /* 12 bytes */
300
301/*
302 * 2.4 GHz HT40 channels 1 (5), 2 (6), 3 (7), 4 (8), 5 (9), 6 (10), 7 (11)
303 *
304 * The channel listed is the center of the lower 20 MHz half of the channel.
305 * The overall center frequency is actually 2 channels (10 MHz) above that,
306 * and the upper half of each HT40 channel is centered 4 channels (20 MHz) away
307 * from the lower half; e.g. the upper half of HT40 channel 1 is channel 5,
308 * and the overall HT40 channel width centers on channel 3.
309 *
310 * NOTE: The RXON command uses 20 MHz channel numbers to specify the
311 * control channel to which to tune. RXON also specifies whether the
312 * control channel is the upper or lower half of a HT40 channel.
313 *
314 * NOTE: 4965 does not support HT40 channels on 2.4 GHz.
315 */
316#define EEPROM_4965_REGULATORY_BAND_24_HT40_CHANNELS (2*0xA0) /* 14 bytes */
317
318/*
319 * 5.2 GHz HT40 channels 36 (40), 44 (48), 52 (56), 60 (64),
320 * 100 (104), 108 (112), 116 (120), 124 (128), 132 (136), 149 (153), 157 (161)
321 */
322#define EEPROM_4965_REGULATORY_BAND_52_HT40_CHANNELS (2*0xA8) /* 22 bytes */
323
324#define EEPROM_REGULATORY_BAND_NO_HT40 (0)
325
326struct iwl_eeprom_ops {
327 const u32 regulatory_bands[7];
328 int (*acquire_semaphore) (struct iwl_priv *priv);
329 void (*release_semaphore) (struct iwl_priv *priv);
330};
331
332
333int iwl_legacy_eeprom_init(struct iwl_priv *priv);
334void iwl_legacy_eeprom_free(struct iwl_priv *priv);
335const u8 *iwl_legacy_eeprom_query_addr(const struct iwl_priv *priv,
336 size_t offset);
337u16 iwl_legacy_eeprom_query16(const struct iwl_priv *priv, size_t offset);
338int iwl_legacy_init_channel_map(struct iwl_priv *priv);
339void iwl_legacy_free_channel_map(struct iwl_priv *priv);
340const struct iwl_channel_info *iwl_legacy_get_channel_info(
341 const struct iwl_priv *priv,
342 enum ieee80211_band band, u16 channel);
343
344#endif /* __iwl_legacy_eeprom_h__ */
diff --git a/drivers/net/wireless/iwlegacy/iwl-fh.h b/drivers/net/wireless/iwlegacy/iwl-fh.h
new file mode 100644
index 000000000000..4e20c7e5c883
--- /dev/null
+++ b/drivers/net/wireless/iwlegacy/iwl-fh.h
@@ -0,0 +1,513 @@
1/******************************************************************************
2 *
3 * This file is provided under a dual BSD/GPLv2 license. When using or
4 * redistributing this file, you may do so under either license.
5 *
6 * GPL LICENSE SUMMARY
7 *
8 * Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved.
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of version 2 of the GNU General Public License as
12 * published by the Free Software Foundation.
13 *
14 * This program is distributed in the hope that it will be useful, but
15 * WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
22 * USA
23 *
24 * The full GNU General Public License is included in this distribution
25 * in the file called LICENSE.GPL.
26 *
27 * Contact Information:
28 * Intel Linux Wireless <ilw@linux.intel.com>
29 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
30 *
31 * BSD LICENSE
32 *
33 * Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved.
34 * All rights reserved.
35 *
36 * Redistribution and use in source and binary forms, with or without
37 * modification, are permitted provided that the following conditions
38 * are met:
39 *
40 * * Redistributions of source code must retain the above copyright
41 * notice, this list of conditions and the following disclaimer.
42 * * Redistributions in binary form must reproduce the above copyright
43 * notice, this list of conditions and the following disclaimer in
44 * the documentation and/or other materials provided with the
45 * distribution.
46 * * Neither the name Intel Corporation nor the names of its
47 * contributors may be used to endorse or promote products derived
48 * from this software without specific prior written permission.
49 *
50 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
51 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
52 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
53 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
54 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
55 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
56 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
57 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
58 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
59 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
60 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
61 *
62 *****************************************************************************/
63#ifndef __iwl_legacy_fh_h__
64#define __iwl_legacy_fh_h__
65
66/****************************/
67/* Flow Handler Definitions */
68/****************************/
69
70/**
71 * This I/O area is directly read/writable by driver (e.g. Linux uses writel())
72 * Addresses are offsets from device's PCI hardware base address.
73 */
74#define FH_MEM_LOWER_BOUND (0x1000)
75#define FH_MEM_UPPER_BOUND (0x2000)
76
77/**
78 * Keep-Warm (KW) buffer base address.
79 *
80 * Driver must allocate a 4KByte buffer that is used by 4965 for keeping the
81 * host DRAM powered on (via dummy accesses to DRAM) to maintain low-latency
82 * DRAM access when 4965 is Txing or Rxing. The dummy accesses prevent host
83 * from going into a power-savings mode that would cause higher DRAM latency,
84 * and possible data over/under-runs, before all Tx/Rx is complete.
85 *
86 * Driver loads FH_KW_MEM_ADDR_REG with the physical address (bits 35:4)
87 * of the buffer, which must be 4K aligned. Once this is set up, the 4965
88 * automatically invokes keep-warm accesses when normal accesses might not
89 * be sufficient to maintain fast DRAM response.
90 *
91 * Bit fields:
92 * 31-0: Keep-warm buffer physical base address [35:4], must be 4K aligned
93 */
94#define FH_KW_MEM_ADDR_REG (FH_MEM_LOWER_BOUND + 0x97C)
95
96
97/**
98 * TFD Circular Buffers Base (CBBC) addresses
99 *
100 * 4965 has 16 base pointer registers, one for each of 16 host-DRAM-resident
101 * circular buffers (CBs/queues) containing Transmit Frame Descriptors (TFDs)
102 * (see struct iwl_tfd_frame). These 16 pointer registers are offset by 0x04
103 * bytes from one another. Each TFD circular buffer in DRAM must be 256-byte
104 * aligned (address bits 0-7 must be 0).
105 *
106 * Bit fields in each pointer register:
107 * 27-0: TFD CB physical base address [35:8], must be 256-byte aligned
108 */
109#define FH_MEM_CBBC_LOWER_BOUND (FH_MEM_LOWER_BOUND + 0x9D0)
110#define FH_MEM_CBBC_UPPER_BOUND (FH_MEM_LOWER_BOUND + 0xA10)
111
112/* Find TFD CB base pointer for given queue (range 0-15). */
113#define FH_MEM_CBBC_QUEUE(x) (FH_MEM_CBBC_LOWER_BOUND + (x) * 0x4)
114
115
116/**
117 * Rx SRAM Control and Status Registers (RSCSR)
118 *
119 * These registers provide handshake between driver and 4965 for the Rx queue
120 * (this queue handles *all* command responses, notifications, Rx data, etc.
121 * sent from 4965 uCode to host driver). Unlike Tx, there is only one Rx
122 * queue, and only one Rx DMA/FIFO channel. Also unlike Tx, which can
123 * concatenate up to 20 DRAM buffers to form a Tx frame, each Receive Buffer
124 * Descriptor (RBD) points to only one Rx Buffer (RB); there is a 1:1
125 * mapping between RBDs and RBs.
126 *
127 * Driver must allocate host DRAM memory for the following, and set the
128 * physical address of each into 4965 registers:
129 *
130 * 1) Receive Buffer Descriptor (RBD) circular buffer (CB), typically with 256
131 * entries (although any power of 2, up to 4096, is selectable by driver).
132 * Each entry (1 dword) points to a receive buffer (RB) of consistent size
133 * (typically 4K, although 8K or 16K are also selectable by driver).
134 * Driver sets up RB size and number of RBDs in the CB via Rx config
135 * register FH_MEM_RCSR_CHNL0_CONFIG_REG.
136 *
137 * Bit fields within one RBD:
138 * 27-0: Receive Buffer physical address bits [35:8], 256-byte aligned
139 *
140 * Driver sets physical address [35:8] of base of RBD circular buffer
141 * into FH_RSCSR_CHNL0_RBDCB_BASE_REG [27:0].
142 *
143 * 2) Rx status buffer, 8 bytes, in which 4965 indicates which Rx Buffers
144 * (RBs) have been filled, via a "write pointer", actually the index of
145 * the RB's corresponding RBD within the circular buffer. Driver sets
146 * physical address [35:4] into FH_RSCSR_CHNL0_STTS_WPTR_REG [31:0].
147 *
148 * Bit fields in lower dword of Rx status buffer (upper dword not used
149 * by driver; see struct iwl4965_shared, val0):
150 * 31-12: Not used by driver
151 * 11- 0: Index of last filled Rx buffer descriptor
152 * (4965 writes, driver reads this value)
153 *
154 * As the driver prepares Receive Buffers (RBs) for 4965 to fill, driver must
155 * enter pointers to these RBs into contiguous RBD circular buffer entries,
156 * and update the 4965's "write" index register,
157 * FH_RSCSR_CHNL0_RBDCB_WPTR_REG.
158 *
159 * This "write" index corresponds to the *next* RBD that the driver will make
160 * available, i.e. one RBD past the tail of the ready-to-fill RBDs within
161 * the circular buffer. This value should initially be 0 (before preparing any
162 * RBs), should be 8 after preparing the first 8 RBs (for example), and must
163 * wrap back to 0 at the end of the circular buffer (but don't wrap before
164 * "read" index has advanced past 1! See below).
165 * NOTE: 4965 EXPECTS THE WRITE INDEX TO BE INCREMENTED IN MULTIPLES OF 8.
166 *
167 * As the 4965 fills RBs (referenced from contiguous RBDs within the circular
168 * buffer), it updates the Rx status buffer in host DRAM, 2) described above,
169 * to tell the driver the index of the latest filled RBD. The driver must
170 * read this "read" index from DRAM after receiving an Rx interrupt from 4965.
171 *
172 * The driver must also internally keep track of a third index, which is the
173 * next RBD to process. When receiving an Rx interrupt, driver should process
174 * all filled but unprocessed RBs up to, but not including, the RB
175 * corresponding to the "read" index. For example, if "read" index becomes "1",
176 * driver may process the RB pointed to by RBD 0. Depending on volume of
177 * traffic, there may be many RBs to process.
178 *
179 * If read index == write index, 4965 thinks there is no room to put new data.
180 * Due to this, the maximum number of filled RBs is 255, instead of 256. To
181 * be safe, make sure that there is a gap of at least 2 RBDs between "write"
182 * and "read" indexes; that is, make sure that there are no more than 254
183 * buffers waiting to be filled.
184 */
185#define FH_MEM_RSCSR_LOWER_BOUND (FH_MEM_LOWER_BOUND + 0xBC0)
186#define FH_MEM_RSCSR_UPPER_BOUND (FH_MEM_LOWER_BOUND + 0xC00)
187#define FH_MEM_RSCSR_CHNL0 (FH_MEM_RSCSR_LOWER_BOUND)
188
189/**
190 * Physical base address of 8-byte Rx Status buffer.
191 * Bit fields:
192 * 31-0: Rx status buffer physical base address [35:4], must 16-byte aligned.
193 */
194#define FH_RSCSR_CHNL0_STTS_WPTR_REG (FH_MEM_RSCSR_CHNL0)
195
196/**
197 * Physical base address of Rx Buffer Descriptor Circular Buffer.
198 * Bit fields:
199 * 27-0: RBD CD physical base address [35:8], must be 256-byte aligned.
200 */
201#define FH_RSCSR_CHNL0_RBDCB_BASE_REG (FH_MEM_RSCSR_CHNL0 + 0x004)
202
203/**
204 * Rx write pointer (index, really!).
205 * Bit fields:
206 * 11-0: Index of driver's most recent prepared-to-be-filled RBD, + 1.
207 * NOTE: For 256-entry circular buffer, use only bits [7:0].
208 */
209#define FH_RSCSR_CHNL0_RBDCB_WPTR_REG (FH_MEM_RSCSR_CHNL0 + 0x008)
210#define FH_RSCSR_CHNL0_WPTR (FH_RSCSR_CHNL0_RBDCB_WPTR_REG)
211
212
213/**
214 * Rx Config/Status Registers (RCSR)
215 * Rx Config Reg for channel 0 (only channel used)
216 *
217 * Driver must initialize FH_MEM_RCSR_CHNL0_CONFIG_REG as follows for
218 * normal operation (see bit fields).
219 *
220 * Clearing FH_MEM_RCSR_CHNL0_CONFIG_REG to 0 turns off Rx DMA.
221 * Driver should poll FH_MEM_RSSR_RX_STATUS_REG for
222 * FH_RSSR_CHNL0_RX_STATUS_CHNL_IDLE (bit 24) before continuing.
223 *
224 * Bit fields:
225 * 31-30: Rx DMA channel enable: '00' off/pause, '01' pause at end of frame,
226 * '10' operate normally
227 * 29-24: reserved
228 * 23-20: # RBDs in circular buffer = 2^value; use "8" for 256 RBDs (normal),
229 * min "5" for 32 RBDs, max "12" for 4096 RBDs.
230 * 19-18: reserved
231 * 17-16: size of each receive buffer; '00' 4K (normal), '01' 8K,
232 * '10' 12K, '11' 16K.
233 * 15-14: reserved
234 * 13-12: IRQ destination; '00' none, '01' host driver (normal operation)
235 * 11- 4: timeout for closing Rx buffer and interrupting host (units 32 usec)
236 * typical value 0x10 (about 1/2 msec)
237 * 3- 0: reserved
238 */
239#define FH_MEM_RCSR_LOWER_BOUND (FH_MEM_LOWER_BOUND + 0xC00)
240#define FH_MEM_RCSR_UPPER_BOUND (FH_MEM_LOWER_BOUND + 0xCC0)
241#define FH_MEM_RCSR_CHNL0 (FH_MEM_RCSR_LOWER_BOUND)
242
243#define FH_MEM_RCSR_CHNL0_CONFIG_REG (FH_MEM_RCSR_CHNL0)
244
245#define FH_RCSR_CHNL0_RX_CONFIG_RB_TIMEOUT_MSK (0x00000FF0) /* bits 4-11 */
246#define FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_MSK (0x00001000) /* bits 12 */
247#define FH_RCSR_CHNL0_RX_CONFIG_SINGLE_FRAME_MSK (0x00008000) /* bit 15 */
248#define FH_RCSR_CHNL0_RX_CONFIG_RB_SIZE_MSK (0x00030000) /* bits 16-17 */
249#define FH_RCSR_CHNL0_RX_CONFIG_RBDBC_SIZE_MSK (0x00F00000) /* bits 20-23 */
250#define FH_RCSR_CHNL0_RX_CONFIG_DMA_CHNL_EN_MSK (0xC0000000) /* bits 30-31*/
251
252#define FH_RCSR_RX_CONFIG_RBDCB_SIZE_POS (20)
253#define FH_RCSR_RX_CONFIG_REG_IRQ_RBTH_POS (4)
254#define RX_RB_TIMEOUT (0x10)
255
256#define FH_RCSR_RX_CONFIG_CHNL_EN_PAUSE_VAL (0x00000000)
257#define FH_RCSR_RX_CONFIG_CHNL_EN_PAUSE_EOF_VAL (0x40000000)
258#define FH_RCSR_RX_CONFIG_CHNL_EN_ENABLE_VAL (0x80000000)
259
260#define FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_4K (0x00000000)
261#define FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_8K (0x00010000)
262#define FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_12K (0x00020000)
263#define FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_16K (0x00030000)
264
265#define FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY (0x00000004)
266#define FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_NO_INT_VAL (0x00000000)
267#define FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_INT_HOST_VAL (0x00001000)
268
269#define FH_RSCSR_FRAME_SIZE_MSK (0x00003FFF) /* bits 0-13 */
270
271/**
272 * Rx Shared Status Registers (RSSR)
273 *
274 * After stopping Rx DMA channel (writing 0 to
275 * FH_MEM_RCSR_CHNL0_CONFIG_REG), driver must poll
276 * FH_MEM_RSSR_RX_STATUS_REG until Rx channel is idle.
277 *
278 * Bit fields:
279 * 24: 1 = Channel 0 is idle
280 *
281 * FH_MEM_RSSR_SHARED_CTRL_REG and FH_MEM_RSSR_RX_ENABLE_ERR_IRQ2DRV
282 * contain default values that should not be altered by the driver.
283 */
284#define FH_MEM_RSSR_LOWER_BOUND (FH_MEM_LOWER_BOUND + 0xC40)
285#define FH_MEM_RSSR_UPPER_BOUND (FH_MEM_LOWER_BOUND + 0xD00)
286
287#define FH_MEM_RSSR_SHARED_CTRL_REG (FH_MEM_RSSR_LOWER_BOUND)
288#define FH_MEM_RSSR_RX_STATUS_REG (FH_MEM_RSSR_LOWER_BOUND + 0x004)
289#define FH_MEM_RSSR_RX_ENABLE_ERR_IRQ2DRV\
290 (FH_MEM_RSSR_LOWER_BOUND + 0x008)
291
292#define FH_RSSR_CHNL0_RX_STATUS_CHNL_IDLE (0x01000000)
293
294#define FH_MEM_TFDIB_REG1_ADDR_BITSHIFT 28
295
296/* TFDB Area - TFDs buffer table */
297#define FH_MEM_TFDIB_DRAM_ADDR_LSB_MSK (0xFFFFFFFF)
298#define FH_TFDIB_LOWER_BOUND (FH_MEM_LOWER_BOUND + 0x900)
299#define FH_TFDIB_UPPER_BOUND (FH_MEM_LOWER_BOUND + 0x958)
300#define FH_TFDIB_CTRL0_REG(_chnl) (FH_TFDIB_LOWER_BOUND + 0x8 * (_chnl))
301#define FH_TFDIB_CTRL1_REG(_chnl) (FH_TFDIB_LOWER_BOUND + 0x8 * (_chnl) + 0x4)
302
303/**
304 * Transmit DMA Channel Control/Status Registers (TCSR)
305 *
306 * 4965 has one configuration register for each of 8 Tx DMA/FIFO channels
307 * supported in hardware (don't confuse these with the 16 Tx queues in DRAM,
308 * which feed the DMA/FIFO channels); config regs are separated by 0x20 bytes.
309 *
310 * To use a Tx DMA channel, driver must initialize its
311 * FH_TCSR_CHNL_TX_CONFIG_REG(chnl) with:
312 *
313 * FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE |
314 * FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE_VAL
315 *
316 * All other bits should be 0.
317 *
318 * Bit fields:
319 * 31-30: Tx DMA channel enable: '00' off/pause, '01' pause at end of frame,
320 * '10' operate normally
321 * 29- 4: Reserved, set to "0"
322 * 3: Enable internal DMA requests (1, normal operation), disable (0)
323 * 2- 0: Reserved, set to "0"
324 */
325#define FH_TCSR_LOWER_BOUND (FH_MEM_LOWER_BOUND + 0xD00)
326#define FH_TCSR_UPPER_BOUND (FH_MEM_LOWER_BOUND + 0xE60)
327
328/* Find Control/Status reg for given Tx DMA/FIFO channel */
329#define FH49_TCSR_CHNL_NUM (7)
330#define FH50_TCSR_CHNL_NUM (8)
331
332/* TCSR: tx_config register values */
333#define FH_TCSR_CHNL_TX_CONFIG_REG(_chnl) \
334 (FH_TCSR_LOWER_BOUND + 0x20 * (_chnl))
335#define FH_TCSR_CHNL_TX_CREDIT_REG(_chnl) \
336 (FH_TCSR_LOWER_BOUND + 0x20 * (_chnl) + 0x4)
337#define FH_TCSR_CHNL_TX_BUF_STS_REG(_chnl) \
338 (FH_TCSR_LOWER_BOUND + 0x20 * (_chnl) + 0x8)
339
340#define FH_TCSR_TX_CONFIG_REG_VAL_MSG_MODE_TXF (0x00000000)
341#define FH_TCSR_TX_CONFIG_REG_VAL_MSG_MODE_DRV (0x00000001)
342
343#define FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_DISABLE (0x00000000)
344#define FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE (0x00000008)
345
346#define FH_TCSR_TX_CONFIG_REG_VAL_CIRQ_HOST_NOINT (0x00000000)
347#define FH_TCSR_TX_CONFIG_REG_VAL_CIRQ_HOST_ENDTFD (0x00100000)
348#define FH_TCSR_TX_CONFIG_REG_VAL_CIRQ_HOST_IFTFD (0x00200000)
349
350#define FH_TCSR_TX_CONFIG_REG_VAL_CIRQ_RTC_NOINT (0x00000000)
351#define FH_TCSR_TX_CONFIG_REG_VAL_CIRQ_RTC_ENDTFD (0x00400000)
352#define FH_TCSR_TX_CONFIG_REG_VAL_CIRQ_RTC_IFTFD (0x00800000)
353
354#define FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_PAUSE (0x00000000)
355#define FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_PAUSE_EOF (0x40000000)
356#define FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE (0x80000000)
357
358#define FH_TCSR_CHNL_TX_BUF_STS_REG_VAL_TFDB_EMPTY (0x00000000)
359#define FH_TCSR_CHNL_TX_BUF_STS_REG_VAL_TFDB_WAIT (0x00002000)
360#define FH_TCSR_CHNL_TX_BUF_STS_REG_VAL_TFDB_VALID (0x00000003)
361
362#define FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_NUM (20)
363#define FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_IDX (12)
364
365/**
366 * Tx Shared Status Registers (TSSR)
367 *
368 * After stopping Tx DMA channel (writing 0 to
369 * FH_TCSR_CHNL_TX_CONFIG_REG(chnl)), driver must poll
370 * FH_TSSR_TX_STATUS_REG until selected Tx channel is idle
371 * (channel's buffers empty | no pending requests).
372 *
373 * Bit fields:
374 * 31-24: 1 = Channel buffers empty (channel 7:0)
375 * 23-16: 1 = No pending requests (channel 7:0)
376 */
377#define FH_TSSR_LOWER_BOUND (FH_MEM_LOWER_BOUND + 0xEA0)
378#define FH_TSSR_UPPER_BOUND (FH_MEM_LOWER_BOUND + 0xEC0)
379
380#define FH_TSSR_TX_STATUS_REG (FH_TSSR_LOWER_BOUND + 0x010)
381
382/**
383 * Bit fields for TSSR(Tx Shared Status & Control) error status register:
384 * 31: Indicates an address error when accessed to internal memory
385 * uCode/driver must write "1" in order to clear this flag
386 * 30: Indicates that Host did not send the expected number of dwords to FH
387 * uCode/driver must write "1" in order to clear this flag
388 * 16-9:Each status bit is for one channel. Indicates that an (Error) ActDMA
389 * command was received from the scheduler while the TRB was already full
390 * with previous command
391 * uCode/driver must write "1" in order to clear this flag
392 * 7-0: Each status bit indicates a channel's TxCredit error. When an error
393 * bit is set, it indicates that the FH has received a full indication
394 * from the RTC TxFIFO and the current value of the TxCredit counter was
395 * not equal to zero. This mean that the credit mechanism was not
396 * synchronized to the TxFIFO status
397 * uCode/driver must write "1" in order to clear this flag
398 */
399#define FH_TSSR_TX_ERROR_REG (FH_TSSR_LOWER_BOUND + 0x018)
400
401#define FH_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(_chnl) ((1 << (_chnl)) << 16)
402
403/* Tx service channels */
404#define FH_SRVC_CHNL (9)
405#define FH_SRVC_LOWER_BOUND (FH_MEM_LOWER_BOUND + 0x9C8)
406#define FH_SRVC_UPPER_BOUND (FH_MEM_LOWER_BOUND + 0x9D0)
407#define FH_SRVC_CHNL_SRAM_ADDR_REG(_chnl) \
408 (FH_SRVC_LOWER_BOUND + ((_chnl) - 9) * 0x4)
409
410#define FH_TX_CHICKEN_BITS_REG (FH_MEM_LOWER_BOUND + 0xE98)
411/* Instruct FH to increment the retry count of a packet when
412 * it is brought from the memory to TX-FIFO
413 */
414#define FH_TX_CHICKEN_BITS_SCD_AUTO_RETRY_EN (0x00000002)
415
416#define RX_QUEUE_SIZE 256
417#define RX_QUEUE_MASK 255
418#define RX_QUEUE_SIZE_LOG 8
419
420/*
421 * RX related structures and functions
422 */
423#define RX_FREE_BUFFERS 64
424#define RX_LOW_WATERMARK 8
425
426/* Size of one Rx buffer in host DRAM */
427#define IWL_RX_BUF_SIZE_3K (3 * 1000) /* 3945 only */
428#define IWL_RX_BUF_SIZE_4K (4 * 1024)
429#define IWL_RX_BUF_SIZE_8K (8 * 1024)
430
431/**
432 * struct iwl_rb_status - reseve buffer status
433 * host memory mapped FH registers
434 * @closed_rb_num [0:11] - Indicates the index of the RB which was closed
435 * @closed_fr_num [0:11] - Indicates the index of the RX Frame which was closed
436 * @finished_rb_num [0:11] - Indicates the index of the current RB
437 * in which the last frame was written to
438 * @finished_fr_num [0:11] - Indicates the index of the RX Frame
439 * which was transfered
440 */
441struct iwl_rb_status {
442 __le16 closed_rb_num;
443 __le16 closed_fr_num;
444 __le16 finished_rb_num;
445 __le16 finished_fr_nam;
446 __le32 __unused; /* 3945 only */
447} __packed;
448
449
450#define TFD_QUEUE_SIZE_MAX (256)
451#define TFD_QUEUE_SIZE_BC_DUP (64)
452#define TFD_QUEUE_BC_SIZE (TFD_QUEUE_SIZE_MAX + TFD_QUEUE_SIZE_BC_DUP)
453#define IWL_TX_DMA_MASK DMA_BIT_MASK(36)
454#define IWL_NUM_OF_TBS 20
455
456static inline u8 iwl_legacy_get_dma_hi_addr(dma_addr_t addr)
457{
458 return (sizeof(addr) > sizeof(u32) ? (addr >> 16) >> 16 : 0) & 0xF;
459}
460/**
461 * struct iwl_tfd_tb transmit buffer descriptor within transmit frame descriptor
462 *
463 * This structure contains dma address and length of transmission address
464 *
465 * @lo: low [31:0] portion of the dma address of TX buffer
466 * every even is unaligned on 16 bit boundary
467 * @hi_n_len 0-3 [35:32] portion of dma
468 * 4-15 length of the tx buffer
469 */
470struct iwl_tfd_tb {
471 __le32 lo;
472 __le16 hi_n_len;
473} __packed;
474
475/**
476 * struct iwl_tfd
477 *
478 * Transmit Frame Descriptor (TFD)
479 *
480 * @ __reserved1[3] reserved
481 * @ num_tbs 0-4 number of active tbs
482 * 5 reserved
483 * 6-7 padding (not used)
484 * @ tbs[20] transmit frame buffer descriptors
485 * @ __pad padding
486 *
487 * Each Tx queue uses a circular buffer of 256 TFDs stored in host DRAM.
488 * Both driver and device share these circular buffers, each of which must be
489 * contiguous 256 TFDs x 128 bytes-per-TFD = 32 KBytes
490 *
491 * Driver must indicate the physical address of the base of each
492 * circular buffer via the FH_MEM_CBBC_QUEUE registers.
493 *
494 * Each TFD contains pointer/size information for up to 20 data buffers
495 * in host DRAM. These buffers collectively contain the (one) frame described
496 * by the TFD. Each buffer must be a single contiguous block of memory within
497 * itself, but buffers may be scattered in host DRAM. Each buffer has max size
498 * of (4K - 4). The concatenates all of a TFD's buffers into a single
499 * Tx frame, up to 8 KBytes in size.
500 *
501 * A maximum of 255 (not 256!) TFDs may be on a queue waiting for Tx.
502 */
503struct iwl_tfd {
504 u8 __reserved1[3];
505 u8 num_tbs;
506 struct iwl_tfd_tb tbs[IWL_NUM_OF_TBS];
507 __le32 __pad;
508} __packed;
509
510/* Keep Warm Size */
511#define IWL_KW_SIZE 0x1000 /* 4k */
512
513#endif /* !__iwl_legacy_fh_h__ */
diff --git a/drivers/net/wireless/iwlegacy/iwl-hcmd.c b/drivers/net/wireless/iwlegacy/iwl-hcmd.c
new file mode 100644
index 000000000000..9d721cbda5bb
--- /dev/null
+++ b/drivers/net/wireless/iwlegacy/iwl-hcmd.c
@@ -0,0 +1,271 @@
1/******************************************************************************
2 *
3 * GPL LICENSE SUMMARY
4 *
5 * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of version 2 of the GNU General Public License as
9 * published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful, but
12 * WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
19 * USA
20 *
21 * The full GNU General Public License is included in this distribution
22 * in the file called LICENSE.GPL.
23 *
24 * Contact Information:
25 * Intel Linux Wireless <ilw@linux.intel.com>
26 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
27 *****************************************************************************/
28
29#include <linux/kernel.h>
30#include <linux/module.h>
31#include <linux/sched.h>
32#include <net/mac80211.h>
33
34#include "iwl-dev.h"
35#include "iwl-debug.h"
36#include "iwl-eeprom.h"
37#include "iwl-core.h"
38
39
40const char *iwl_legacy_get_cmd_string(u8 cmd)
41{
42 switch (cmd) {
43 IWL_CMD(REPLY_ALIVE);
44 IWL_CMD(REPLY_ERROR);
45 IWL_CMD(REPLY_RXON);
46 IWL_CMD(REPLY_RXON_ASSOC);
47 IWL_CMD(REPLY_QOS_PARAM);
48 IWL_CMD(REPLY_RXON_TIMING);
49 IWL_CMD(REPLY_ADD_STA);
50 IWL_CMD(REPLY_REMOVE_STA);
51 IWL_CMD(REPLY_WEPKEY);
52 IWL_CMD(REPLY_3945_RX);
53 IWL_CMD(REPLY_TX);
54 IWL_CMD(REPLY_RATE_SCALE);
55 IWL_CMD(REPLY_LEDS_CMD);
56 IWL_CMD(REPLY_TX_LINK_QUALITY_CMD);
57 IWL_CMD(REPLY_CHANNEL_SWITCH);
58 IWL_CMD(CHANNEL_SWITCH_NOTIFICATION);
59 IWL_CMD(REPLY_SPECTRUM_MEASUREMENT_CMD);
60 IWL_CMD(SPECTRUM_MEASURE_NOTIFICATION);
61 IWL_CMD(POWER_TABLE_CMD);
62 IWL_CMD(PM_SLEEP_NOTIFICATION);
63 IWL_CMD(PM_DEBUG_STATISTIC_NOTIFIC);
64 IWL_CMD(REPLY_SCAN_CMD);
65 IWL_CMD(REPLY_SCAN_ABORT_CMD);
66 IWL_CMD(SCAN_START_NOTIFICATION);
67 IWL_CMD(SCAN_RESULTS_NOTIFICATION);
68 IWL_CMD(SCAN_COMPLETE_NOTIFICATION);
69 IWL_CMD(BEACON_NOTIFICATION);
70 IWL_CMD(REPLY_TX_BEACON);
71 IWL_CMD(REPLY_TX_PWR_TABLE_CMD);
72 IWL_CMD(REPLY_BT_CONFIG);
73 IWL_CMD(REPLY_STATISTICS_CMD);
74 IWL_CMD(STATISTICS_NOTIFICATION);
75 IWL_CMD(CARD_STATE_NOTIFICATION);
76 IWL_CMD(MISSED_BEACONS_NOTIFICATION);
77 IWL_CMD(REPLY_CT_KILL_CONFIG_CMD);
78 IWL_CMD(SENSITIVITY_CMD);
79 IWL_CMD(REPLY_PHY_CALIBRATION_CMD);
80 IWL_CMD(REPLY_RX_PHY_CMD);
81 IWL_CMD(REPLY_RX_MPDU_CMD);
82 IWL_CMD(REPLY_RX);
83 IWL_CMD(REPLY_COMPRESSED_BA);
84 default:
85 return "UNKNOWN";
86
87 }
88}
89EXPORT_SYMBOL(iwl_legacy_get_cmd_string);
90
91#define HOST_COMPLETE_TIMEOUT (HZ / 2)
92
93static void iwl_legacy_generic_cmd_callback(struct iwl_priv *priv,
94 struct iwl_device_cmd *cmd,
95 struct iwl_rx_packet *pkt)
96{
97 if (pkt->hdr.flags & IWL_CMD_FAILED_MSK) {
98 IWL_ERR(priv, "Bad return from %s (0x%08X)\n",
99 iwl_legacy_get_cmd_string(cmd->hdr.cmd), pkt->hdr.flags);
100 return;
101 }
102
103#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
104 switch (cmd->hdr.cmd) {
105 case REPLY_TX_LINK_QUALITY_CMD:
106 case SENSITIVITY_CMD:
107 IWL_DEBUG_HC_DUMP(priv, "back from %s (0x%08X)\n",
108 iwl_legacy_get_cmd_string(cmd->hdr.cmd), pkt->hdr.flags);
109 break;
110 default:
111 IWL_DEBUG_HC(priv, "back from %s (0x%08X)\n",
112 iwl_legacy_get_cmd_string(cmd->hdr.cmd), pkt->hdr.flags);
113 }
114#endif
115}
116
117static int
118iwl_legacy_send_cmd_async(struct iwl_priv *priv, struct iwl_host_cmd *cmd)
119{
120 int ret;
121
122 BUG_ON(!(cmd->flags & CMD_ASYNC));
123
124 /* An asynchronous command can not expect an SKB to be set. */
125 BUG_ON(cmd->flags & CMD_WANT_SKB);
126
127 /* Assign a generic callback if one is not provided */
128 if (!cmd->callback)
129 cmd->callback = iwl_legacy_generic_cmd_callback;
130
131 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
132 return -EBUSY;
133
134 ret = iwl_legacy_enqueue_hcmd(priv, cmd);
135 if (ret < 0) {
136 IWL_ERR(priv, "Error sending %s: enqueue_hcmd failed: %d\n",
137 iwl_legacy_get_cmd_string(cmd->id), ret);
138 return ret;
139 }
140 return 0;
141}
142
143int iwl_legacy_send_cmd_sync(struct iwl_priv *priv, struct iwl_host_cmd *cmd)
144{
145 int cmd_idx;
146 int ret;
147
148 BUG_ON(cmd->flags & CMD_ASYNC);
149
150 /* A synchronous command can not have a callback set. */
151 BUG_ON(cmd->callback);
152
153 IWL_DEBUG_INFO(priv, "Attempting to send sync command %s\n",
154 iwl_legacy_get_cmd_string(cmd->id));
155 mutex_lock(&priv->sync_cmd_mutex);
156
157 set_bit(STATUS_HCMD_ACTIVE, &priv->status);
158 IWL_DEBUG_INFO(priv, "Setting HCMD_ACTIVE for command %s\n",
159 iwl_legacy_get_cmd_string(cmd->id));
160
161 cmd_idx = iwl_legacy_enqueue_hcmd(priv, cmd);
162 if (cmd_idx < 0) {
163 ret = cmd_idx;
164 IWL_ERR(priv, "Error sending %s: enqueue_hcmd failed: %d\n",
165 iwl_legacy_get_cmd_string(cmd->id), ret);
166 goto out;
167 }
168
169 ret = wait_event_interruptible_timeout(priv->wait_command_queue,
170 !test_bit(STATUS_HCMD_ACTIVE, &priv->status),
171 HOST_COMPLETE_TIMEOUT);
172 if (!ret) {
173 if (test_bit(STATUS_HCMD_ACTIVE, &priv->status)) {
174 IWL_ERR(priv,
175 "Error sending %s: time out after %dms.\n",
176 iwl_legacy_get_cmd_string(cmd->id),
177 jiffies_to_msecs(HOST_COMPLETE_TIMEOUT));
178
179 clear_bit(STATUS_HCMD_ACTIVE, &priv->status);
180 IWL_DEBUG_INFO(priv,
181 "Clearing HCMD_ACTIVE for command %s\n",
182 iwl_legacy_get_cmd_string(cmd->id));
183 ret = -ETIMEDOUT;
184 goto cancel;
185 }
186 }
187
188 if (test_bit(STATUS_RF_KILL_HW, &priv->status)) {
189 IWL_ERR(priv, "Command %s aborted: RF KILL Switch\n",
190 iwl_legacy_get_cmd_string(cmd->id));
191 ret = -ECANCELED;
192 goto fail;
193 }
194 if (test_bit(STATUS_FW_ERROR, &priv->status)) {
195 IWL_ERR(priv, "Command %s failed: FW Error\n",
196 iwl_legacy_get_cmd_string(cmd->id));
197 ret = -EIO;
198 goto fail;
199 }
200 if ((cmd->flags & CMD_WANT_SKB) && !cmd->reply_page) {
201 IWL_ERR(priv, "Error: Response NULL in '%s'\n",
202 iwl_legacy_get_cmd_string(cmd->id));
203 ret = -EIO;
204 goto cancel;
205 }
206
207 ret = 0;
208 goto out;
209
210cancel:
211 if (cmd->flags & CMD_WANT_SKB) {
212 /*
213 * Cancel the CMD_WANT_SKB flag for the cmd in the
214 * TX cmd queue. Otherwise in case the cmd comes
215 * in later, it will possibly set an invalid
216 * address (cmd->meta.source).
217 */
218 priv->txq[priv->cmd_queue].meta[cmd_idx].flags &=
219 ~CMD_WANT_SKB;
220 }
221fail:
222 if (cmd->reply_page) {
223 iwl_legacy_free_pages(priv, cmd->reply_page);
224 cmd->reply_page = 0;
225 }
226out:
227 mutex_unlock(&priv->sync_cmd_mutex);
228 return ret;
229}
230EXPORT_SYMBOL(iwl_legacy_send_cmd_sync);
231
232int iwl_legacy_send_cmd(struct iwl_priv *priv, struct iwl_host_cmd *cmd)
233{
234 if (cmd->flags & CMD_ASYNC)
235 return iwl_legacy_send_cmd_async(priv, cmd);
236
237 return iwl_legacy_send_cmd_sync(priv, cmd);
238}
239EXPORT_SYMBOL(iwl_legacy_send_cmd);
240
241int
242iwl_legacy_send_cmd_pdu(struct iwl_priv *priv, u8 id, u16 len, const void *data)
243{
244 struct iwl_host_cmd cmd = {
245 .id = id,
246 .len = len,
247 .data = data,
248 };
249
250 return iwl_legacy_send_cmd_sync(priv, &cmd);
251}
252EXPORT_SYMBOL(iwl_legacy_send_cmd_pdu);
253
254int iwl_legacy_send_cmd_pdu_async(struct iwl_priv *priv,
255 u8 id, u16 len, const void *data,
256 void (*callback)(struct iwl_priv *priv,
257 struct iwl_device_cmd *cmd,
258 struct iwl_rx_packet *pkt))
259{
260 struct iwl_host_cmd cmd = {
261 .id = id,
262 .len = len,
263 .data = data,
264 };
265
266 cmd.flags |= CMD_ASYNC;
267 cmd.callback = callback;
268
269 return iwl_legacy_send_cmd_async(priv, &cmd);
270}
271EXPORT_SYMBOL(iwl_legacy_send_cmd_pdu_async);
diff --git a/drivers/net/wireless/iwlegacy/iwl-helpers.h b/drivers/net/wireless/iwlegacy/iwl-helpers.h
new file mode 100644
index 000000000000..02132e755831
--- /dev/null
+++ b/drivers/net/wireless/iwlegacy/iwl-helpers.h
@@ -0,0 +1,181 @@
1/******************************************************************************
2 *
3 * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved.
4 *
5 * Portions of this file are derived from the ipw3945 project, as well
6 * as portions of the ieee80211 subsystem header files.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of version 2 of the GNU General Public License as
10 * published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it will be useful, but WITHOUT
13 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 * more details.
16 *
17 * You should have received a copy of the GNU General Public License along with
18 * this program; if not, write to the Free Software Foundation, Inc.,
19 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
20 *
21 * The full GNU General Public License is included in this distribution in the
22 * file called LICENSE.
23 *
24 * Contact Information:
25 * Intel Linux Wireless <ilw@linux.intel.com>
26 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
27 *
28 *****************************************************************************/
29
30#ifndef __iwl_legacy_helpers_h__
31#define __iwl_legacy_helpers_h__
32
33#include <linux/ctype.h>
34#include <net/mac80211.h>
35
36#include "iwl-io.h"
37
38#define IWL_MASK(lo, hi) ((1 << (hi)) | ((1 << (hi)) - (1 << (lo))))
39
40
41static inline struct ieee80211_conf *iwl_legacy_ieee80211_get_hw_conf(
42 struct ieee80211_hw *hw)
43{
44 return &hw->conf;
45}
46
47/**
48 * iwl_legacy_queue_inc_wrap - increment queue index, wrap back to beginning
49 * @index -- current index
50 * @n_bd -- total number of entries in queue (must be power of 2)
51 */
52static inline int iwl_legacy_queue_inc_wrap(int index, int n_bd)
53{
54 return ++index & (n_bd - 1);
55}
56
57/**
58 * iwl_legacy_queue_dec_wrap - decrement queue index, wrap back to end
59 * @index -- current index
60 * @n_bd -- total number of entries in queue (must be power of 2)
61 */
62static inline int iwl_legacy_queue_dec_wrap(int index, int n_bd)
63{
64 return --index & (n_bd - 1);
65}
66
67/* TODO: Move fw_desc functions to iwl-pci.ko */
68static inline void iwl_legacy_free_fw_desc(struct pci_dev *pci_dev,
69 struct fw_desc *desc)
70{
71 if (desc->v_addr)
72 dma_free_coherent(&pci_dev->dev, desc->len,
73 desc->v_addr, desc->p_addr);
74 desc->v_addr = NULL;
75 desc->len = 0;
76}
77
78static inline int iwl_legacy_alloc_fw_desc(struct pci_dev *pci_dev,
79 struct fw_desc *desc)
80{
81 if (!desc->len) {
82 desc->v_addr = NULL;
83 return -EINVAL;
84 }
85
86 desc->v_addr = dma_alloc_coherent(&pci_dev->dev, desc->len,
87 &desc->p_addr, GFP_KERNEL);
88 return (desc->v_addr != NULL) ? 0 : -ENOMEM;
89}
90
91/*
92 * we have 8 bits used like this:
93 *
94 * 7 6 5 4 3 2 1 0
95 * | | | | | | | |
96 * | | | | | | +-+-------- AC queue (0-3)
97 * | | | | | |
98 * | +-+-+-+-+------------ HW queue ID
99 * |
100 * +---------------------- unused
101 */
102static inline void
103iwl_legacy_set_swq_id(struct iwl_tx_queue *txq, u8 ac, u8 hwq)
104{
105 BUG_ON(ac > 3); /* only have 2 bits */
106 BUG_ON(hwq > 31); /* only use 5 bits */
107
108 txq->swq_id = (hwq << 2) | ac;
109}
110
111static inline void iwl_legacy_wake_queue(struct iwl_priv *priv,
112 struct iwl_tx_queue *txq)
113{
114 u8 queue = txq->swq_id;
115 u8 ac = queue & 3;
116 u8 hwq = (queue >> 2) & 0x1f;
117
118 if (test_and_clear_bit(hwq, priv->queue_stopped))
119 if (atomic_dec_return(&priv->queue_stop_count[ac]) <= 0)
120 ieee80211_wake_queue(priv->hw, ac);
121}
122
123static inline void iwl_legacy_stop_queue(struct iwl_priv *priv,
124 struct iwl_tx_queue *txq)
125{
126 u8 queue = txq->swq_id;
127 u8 ac = queue & 3;
128 u8 hwq = (queue >> 2) & 0x1f;
129
130 if (!test_and_set_bit(hwq, priv->queue_stopped))
131 if (atomic_inc_return(&priv->queue_stop_count[ac]) > 0)
132 ieee80211_stop_queue(priv->hw, ac);
133}
134
135#define ieee80211_stop_queue DO_NOT_USE_ieee80211_stop_queue
136#define ieee80211_wake_queue DO_NOT_USE_ieee80211_wake_queue
137
138static inline void iwl_legacy_disable_interrupts(struct iwl_priv *priv)
139{
140 clear_bit(STATUS_INT_ENABLED, &priv->status);
141
142 /* disable interrupts from uCode/NIC to host */
143 iwl_write32(priv, CSR_INT_MASK, 0x00000000);
144
145 /* acknowledge/clear/reset any interrupts still pending
146 * from uCode or flow handler (Rx/Tx DMA) */
147 iwl_write32(priv, CSR_INT, 0xffffffff);
148 iwl_write32(priv, CSR_FH_INT_STATUS, 0xffffffff);
149 IWL_DEBUG_ISR(priv, "Disabled interrupts\n");
150}
151
152static inline void iwl_legacy_enable_interrupts(struct iwl_priv *priv)
153{
154 IWL_DEBUG_ISR(priv, "Enabling interrupts\n");
155 set_bit(STATUS_INT_ENABLED, &priv->status);
156 iwl_write32(priv, CSR_INT_MASK, priv->inta_mask);
157}
158
159/**
160 * iwl_legacy_beacon_time_mask_low - mask of lower 32 bit of beacon time
161 * @priv -- pointer to iwl_priv data structure
162 * @tsf_bits -- number of bits need to shift for masking)
163 */
164static inline u32 iwl_legacy_beacon_time_mask_low(struct iwl_priv *priv,
165 u16 tsf_bits)
166{
167 return (1 << tsf_bits) - 1;
168}
169
170/**
171 * iwl_legacy_beacon_time_mask_high - mask of higher 32 bit of beacon time
172 * @priv -- pointer to iwl_priv data structure
173 * @tsf_bits -- number of bits need to shift for masking)
174 */
175static inline u32 iwl_legacy_beacon_time_mask_high(struct iwl_priv *priv,
176 u16 tsf_bits)
177{
178 return ((1 << (32 - tsf_bits)) - 1) << tsf_bits;
179}
180
181#endif /* __iwl_legacy_helpers_h__ */
diff --git a/drivers/net/wireless/iwlegacy/iwl-io.h b/drivers/net/wireless/iwlegacy/iwl-io.h
new file mode 100644
index 000000000000..5cc5d342914f
--- /dev/null
+++ b/drivers/net/wireless/iwlegacy/iwl-io.h
@@ -0,0 +1,545 @@
1/******************************************************************************
2 *
3 * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved.
4 *
5 * Portions of this file are derived from the ipw3945 project.
6 *
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms of version 2 of the GNU General Public License as
9 * published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful, but WITHOUT
12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 * more details.
15 *
16 * You should have received a copy of the GNU General Public License along with
17 * this program; if not, write to the Free Software Foundation, Inc.,
18 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
19 *
20 * The full GNU General Public License is included in this distribution in the
21 * file called LICENSE.
22 *
23 * Contact Information:
24 * Intel Linux Wireless <ilw@linux.intel.com>
25 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
26 *
27 *****************************************************************************/
28
29#ifndef __iwl_legacy_io_h__
30#define __iwl_legacy_io_h__
31
32#include <linux/io.h>
33
34#include "iwl-dev.h"
35#include "iwl-debug.h"
36#include "iwl-devtrace.h"
37
38/*
39 * IO, register, and NIC memory access functions
40 *
41 * NOTE on naming convention and macro usage for these
42 *
43 * A single _ prefix before a an access function means that no state
44 * check or debug information is printed when that function is called.
45 *
46 * A double __ prefix before an access function means that state is checked
47 * and the current line number and caller function name are printed in addition
48 * to any other debug output.
49 *
50 * The non-prefixed name is the #define that maps the caller into a
51 * #define that provides the caller's name and __LINE__ to the double
52 * prefix version.
53 *
54 * If you wish to call the function without any debug or state checking,
55 * you should use the single _ prefix version (as is used by dependent IO
56 * routines, for example _iwl_legacy_read_direct32 calls the non-check version of
57 * _iwl_legacy_read32.)
58 *
59 * These declarations are *extremely* useful in quickly isolating code deltas
60 * which result in misconfiguration of the hardware I/O. In combination with
61 * git-bisect and the IO debug level you can quickly determine the specific
62 * commit which breaks the IO sequence to the hardware.
63 *
64 */
65
66static inline void _iwl_legacy_write8(struct iwl_priv *priv, u32 ofs, u8 val)
67{
68 trace_iwlwifi_legacy_dev_iowrite8(priv, ofs, val);
69 iowrite8(val, priv->hw_base + ofs);
70}
71
72#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
73static inline void
74__iwl_legacy_write8(const char *f, u32 l, struct iwl_priv *priv,
75 u32 ofs, u8 val)
76{
77 IWL_DEBUG_IO(priv, "write8(0x%08X, 0x%02X) - %s %d\n", ofs, val, f, l);
78 _iwl_legacy_write8(priv, ofs, val);
79}
80#define iwl_write8(priv, ofs, val) \
81 __iwl_legacy_write8(__FILE__, __LINE__, priv, ofs, val)
82#else
83#define iwl_write8(priv, ofs, val) _iwl_legacy_write8(priv, ofs, val)
84#endif
85
86
87static inline void _iwl_legacy_write32(struct iwl_priv *priv, u32 ofs, u32 val)
88{
89 trace_iwlwifi_legacy_dev_iowrite32(priv, ofs, val);
90 iowrite32(val, priv->hw_base + ofs);
91}
92
93#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
94static inline void
95__iwl_legacy_write32(const char *f, u32 l, struct iwl_priv *priv,
96 u32 ofs, u32 val)
97{
98 IWL_DEBUG_IO(priv, "write32(0x%08X, 0x%08X) - %s %d\n", ofs, val, f, l);
99 _iwl_legacy_write32(priv, ofs, val);
100}
101#define iwl_write32(priv, ofs, val) \
102 __iwl_legacy_write32(__FILE__, __LINE__, priv, ofs, val)
103#else
104#define iwl_write32(priv, ofs, val) _iwl_legacy_write32(priv, ofs, val)
105#endif
106
107static inline u32 _iwl_legacy_read32(struct iwl_priv *priv, u32 ofs)
108{
109 u32 val = ioread32(priv->hw_base + ofs);
110 trace_iwlwifi_legacy_dev_ioread32(priv, ofs, val);
111 return val;
112}
113
114#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
115static inline u32
116__iwl_legacy_read32(char *f, u32 l, struct iwl_priv *priv, u32 ofs)
117{
118 IWL_DEBUG_IO(priv, "read_direct32(0x%08X) - %s %d\n", ofs, f, l);
119 return _iwl_legacy_read32(priv, ofs);
120}
121#define iwl_read32(priv, ofs) __iwl_legacy_read32(__FILE__, __LINE__, priv, ofs)
122#else
123#define iwl_read32(p, o) _iwl_legacy_read32(p, o)
124#endif
125
126#define IWL_POLL_INTERVAL 10 /* microseconds */
127static inline int
128_iwl_legacy_poll_bit(struct iwl_priv *priv, u32 addr,
129 u32 bits, u32 mask, int timeout)
130{
131 int t = 0;
132
133 do {
134 if ((_iwl_legacy_read32(priv, addr) & mask) == (bits & mask))
135 return t;
136 udelay(IWL_POLL_INTERVAL);
137 t += IWL_POLL_INTERVAL;
138 } while (t < timeout);
139
140 return -ETIMEDOUT;
141}
142#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
143static inline int __iwl_legacy_poll_bit(const char *f, u32 l,
144 struct iwl_priv *priv, u32 addr,
145 u32 bits, u32 mask, int timeout)
146{
147 int ret = _iwl_legacy_poll_bit(priv, addr, bits, mask, timeout);
148 IWL_DEBUG_IO(priv, "poll_bit(0x%08X, 0x%08X, 0x%08X) - %s- %s %d\n",
149 addr, bits, mask,
150 unlikely(ret == -ETIMEDOUT) ? "timeout" : "", f, l);
151 return ret;
152}
153#define iwl_poll_bit(priv, addr, bits, mask, timeout) \
154 __iwl_legacy_poll_bit(__FILE__, __LINE__, priv, addr, \
155 bits, mask, timeout)
156#else
157#define iwl_poll_bit(p, a, b, m, t) _iwl_legacy_poll_bit(p, a, b, m, t)
158#endif
159
160static inline void _iwl_legacy_set_bit(struct iwl_priv *priv, u32 reg, u32 mask)
161{
162 _iwl_legacy_write32(priv, reg, _iwl_legacy_read32(priv, reg) | mask);
163}
164#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
165static inline void __iwl_legacy_set_bit(const char *f, u32 l,
166 struct iwl_priv *priv, u32 reg, u32 mask)
167{
168 u32 val = _iwl_legacy_read32(priv, reg) | mask;
169 IWL_DEBUG_IO(priv, "set_bit(0x%08X, 0x%08X) = 0x%08X\n", reg,
170 mask, val);
171 _iwl_legacy_write32(priv, reg, val);
172}
173static inline void iwl_legacy_set_bit(struct iwl_priv *p, u32 r, u32 m)
174{
175 unsigned long reg_flags;
176
177 spin_lock_irqsave(&p->reg_lock, reg_flags);
178 __iwl_legacy_set_bit(__FILE__, __LINE__, p, r, m);
179 spin_unlock_irqrestore(&p->reg_lock, reg_flags);
180}
181#else
182static inline void iwl_legacy_set_bit(struct iwl_priv *p, u32 r, u32 m)
183{
184 unsigned long reg_flags;
185
186 spin_lock_irqsave(&p->reg_lock, reg_flags);
187 _iwl_legacy_set_bit(p, r, m);
188 spin_unlock_irqrestore(&p->reg_lock, reg_flags);
189}
190#endif
191
192static inline void
193_iwl_legacy_clear_bit(struct iwl_priv *priv, u32 reg, u32 mask)
194{
195 _iwl_legacy_write32(priv, reg, _iwl_legacy_read32(priv, reg) & ~mask);
196}
197#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
198static inline void
199__iwl_legacy_clear_bit(const char *f, u32 l,
200 struct iwl_priv *priv, u32 reg, u32 mask)
201{
202 u32 val = _iwl_legacy_read32(priv, reg) & ~mask;
203 IWL_DEBUG_IO(priv, "clear_bit(0x%08X, 0x%08X) = 0x%08X\n", reg, mask, val);
204 _iwl_legacy_write32(priv, reg, val);
205}
206static inline void iwl_legacy_clear_bit(struct iwl_priv *p, u32 r, u32 m)
207{
208 unsigned long reg_flags;
209
210 spin_lock_irqsave(&p->reg_lock, reg_flags);
211 __iwl_legacy_clear_bit(__FILE__, __LINE__, p, r, m);
212 spin_unlock_irqrestore(&p->reg_lock, reg_flags);
213}
214#else
215static inline void iwl_legacy_clear_bit(struct iwl_priv *p, u32 r, u32 m)
216{
217 unsigned long reg_flags;
218
219 spin_lock_irqsave(&p->reg_lock, reg_flags);
220 _iwl_legacy_clear_bit(p, r, m);
221 spin_unlock_irqrestore(&p->reg_lock, reg_flags);
222}
223#endif
224
225static inline int _iwl_legacy_grab_nic_access(struct iwl_priv *priv)
226{
227 int ret;
228 u32 val;
229
230 /* this bit wakes up the NIC */
231 _iwl_legacy_set_bit(priv, CSR_GP_CNTRL,
232 CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
233
234 /*
235 * These bits say the device is running, and should keep running for
236 * at least a short while (at least as long as MAC_ACCESS_REQ stays 1),
237 * but they do not indicate that embedded SRAM is restored yet;
238 * 3945 and 4965 have volatile SRAM, and must save/restore contents
239 * to/from host DRAM when sleeping/waking for power-saving.
240 * Each direction takes approximately 1/4 millisecond; with this
241 * overhead, it's a good idea to grab and hold MAC_ACCESS_REQUEST if a
242 * series of register accesses are expected (e.g. reading Event Log),
243 * to keep device from sleeping.
244 *
245 * CSR_UCODE_DRV_GP1 register bit MAC_SLEEP == 0 indicates that
246 * SRAM is okay/restored. We don't check that here because this call
247 * is just for hardware register access; but GP1 MAC_SLEEP check is a
248 * good idea before accessing 3945/4965 SRAM (e.g. reading Event Log).
249 *
250 */
251 ret = _iwl_legacy_poll_bit(priv, CSR_GP_CNTRL,
252 CSR_GP_CNTRL_REG_VAL_MAC_ACCESS_EN,
253 (CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY |
254 CSR_GP_CNTRL_REG_FLAG_GOING_TO_SLEEP), 15000);
255 if (ret < 0) {
256 val = _iwl_legacy_read32(priv, CSR_GP_CNTRL);
257 IWL_ERR(priv,
258 "MAC is in deep sleep!. CSR_GP_CNTRL = 0x%08X\n", val);
259 _iwl_legacy_write32(priv, CSR_RESET,
260 CSR_RESET_REG_FLAG_FORCE_NMI);
261 return -EIO;
262 }
263
264 return 0;
265}
266
267#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
268static inline int __iwl_legacy_grab_nic_access(const char *f, u32 l,
269 struct iwl_priv *priv)
270{
271 IWL_DEBUG_IO(priv, "grabbing nic access - %s %d\n", f, l);
272 return _iwl_legacy_grab_nic_access(priv);
273}
274#define iwl_grab_nic_access(priv) \
275 __iwl_legacy_grab_nic_access(__FILE__, __LINE__, priv)
276#else
277#define iwl_grab_nic_access(priv) \
278 _iwl_legacy_grab_nic_access(priv)
279#endif
280
281static inline void _iwl_legacy_release_nic_access(struct iwl_priv *priv)
282{
283 _iwl_legacy_clear_bit(priv, CSR_GP_CNTRL,
284 CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
285}
286#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
287static inline void __iwl_legacy_release_nic_access(const char *f, u32 l,
288 struct iwl_priv *priv)
289{
290
291 IWL_DEBUG_IO(priv, "releasing nic access - %s %d\n", f, l);
292 _iwl_legacy_release_nic_access(priv);
293}
294#define iwl_release_nic_access(priv) \
295 __iwl_legacy_release_nic_access(__FILE__, __LINE__, priv)
296#else
297#define iwl_release_nic_access(priv) \
298 _iwl_legacy_release_nic_access(priv)
299#endif
300
301static inline u32 _iwl_legacy_read_direct32(struct iwl_priv *priv, u32 reg)
302{
303 return _iwl_legacy_read32(priv, reg);
304}
305#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
306static inline u32 __iwl_legacy_read_direct32(const char *f, u32 l,
307 struct iwl_priv *priv, u32 reg)
308{
309 u32 value = _iwl_legacy_read_direct32(priv, reg);
310 IWL_DEBUG_IO(priv,
311 "read_direct32(0x%4X) = 0x%08x - %s %d\n", reg, value,
312 f, l);
313 return value;
314}
315static inline u32 iwl_legacy_read_direct32(struct iwl_priv *priv, u32 reg)
316{
317 u32 value;
318 unsigned long reg_flags;
319
320 spin_lock_irqsave(&priv->reg_lock, reg_flags);
321 iwl_grab_nic_access(priv);
322 value = __iwl_legacy_read_direct32(__FILE__, __LINE__, priv, reg);
323 iwl_release_nic_access(priv);
324 spin_unlock_irqrestore(&priv->reg_lock, reg_flags);
325 return value;
326}
327
328#else
329static inline u32 iwl_legacy_read_direct32(struct iwl_priv *priv, u32 reg)
330{
331 u32 value;
332 unsigned long reg_flags;
333
334 spin_lock_irqsave(&priv->reg_lock, reg_flags);
335 iwl_grab_nic_access(priv);
336 value = _iwl_legacy_read_direct32(priv, reg);
337 iwl_release_nic_access(priv);
338 spin_unlock_irqrestore(&priv->reg_lock, reg_flags);
339 return value;
340
341}
342#endif
343
344static inline void _iwl_legacy_write_direct32(struct iwl_priv *priv,
345 u32 reg, u32 value)
346{
347 _iwl_legacy_write32(priv, reg, value);
348}
349static inline void
350iwl_legacy_write_direct32(struct iwl_priv *priv, u32 reg, u32 value)
351{
352 unsigned long reg_flags;
353
354 spin_lock_irqsave(&priv->reg_lock, reg_flags);
355 if (!iwl_grab_nic_access(priv)) {
356 _iwl_legacy_write_direct32(priv, reg, value);
357 iwl_release_nic_access(priv);
358 }
359 spin_unlock_irqrestore(&priv->reg_lock, reg_flags);
360}
361
362static inline void iwl_legacy_write_reg_buf(struct iwl_priv *priv,
363 u32 reg, u32 len, u32 *values)
364{
365 u32 count = sizeof(u32);
366
367 if ((priv != NULL) && (values != NULL)) {
368 for (; 0 < len; len -= count, reg += count, values++)
369 iwl_legacy_write_direct32(priv, reg, *values);
370 }
371}
372
373static inline int _iwl_legacy_poll_direct_bit(struct iwl_priv *priv, u32 addr,
374 u32 mask, int timeout)
375{
376 int t = 0;
377
378 do {
379 if ((iwl_legacy_read_direct32(priv, addr) & mask) == mask)
380 return t;
381 udelay(IWL_POLL_INTERVAL);
382 t += IWL_POLL_INTERVAL;
383 } while (t < timeout);
384
385 return -ETIMEDOUT;
386}
387
388#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
389static inline int __iwl_legacy_poll_direct_bit(const char *f, u32 l,
390 struct iwl_priv *priv,
391 u32 addr, u32 mask, int timeout)
392{
393 int ret = _iwl_legacy_poll_direct_bit(priv, addr, mask, timeout);
394
395 if (unlikely(ret == -ETIMEDOUT))
396 IWL_DEBUG_IO(priv, "poll_direct_bit(0x%08X, 0x%08X) - "
397 "timedout - %s %d\n", addr, mask, f, l);
398 else
399 IWL_DEBUG_IO(priv, "poll_direct_bit(0x%08X, 0x%08X) = 0x%08X "
400 "- %s %d\n", addr, mask, ret, f, l);
401 return ret;
402}
403#define iwl_poll_direct_bit(priv, addr, mask, timeout) \
404__iwl_legacy_poll_direct_bit(__FILE__, __LINE__, priv, addr, mask, timeout)
405#else
406#define iwl_poll_direct_bit _iwl_legacy_poll_direct_bit
407#endif
408
409static inline u32 _iwl_legacy_read_prph(struct iwl_priv *priv, u32 reg)
410{
411 _iwl_legacy_write_direct32(priv, HBUS_TARG_PRPH_RADDR, reg | (3 << 24));
412 rmb();
413 return _iwl_legacy_read_direct32(priv, HBUS_TARG_PRPH_RDAT);
414}
415static inline u32 iwl_legacy_read_prph(struct iwl_priv *priv, u32 reg)
416{
417 unsigned long reg_flags;
418 u32 val;
419
420 spin_lock_irqsave(&priv->reg_lock, reg_flags);
421 iwl_grab_nic_access(priv);
422 val = _iwl_legacy_read_prph(priv, reg);
423 iwl_release_nic_access(priv);
424 spin_unlock_irqrestore(&priv->reg_lock, reg_flags);
425 return val;
426}
427
428static inline void _iwl_legacy_write_prph(struct iwl_priv *priv,
429 u32 addr, u32 val)
430{
431 _iwl_legacy_write_direct32(priv, HBUS_TARG_PRPH_WADDR,
432 ((addr & 0x0000FFFF) | (3 << 24)));
433 wmb();
434 _iwl_legacy_write_direct32(priv, HBUS_TARG_PRPH_WDAT, val);
435}
436
437static inline void
438iwl_legacy_write_prph(struct iwl_priv *priv, u32 addr, u32 val)
439{
440 unsigned long reg_flags;
441
442 spin_lock_irqsave(&priv->reg_lock, reg_flags);
443 if (!iwl_grab_nic_access(priv)) {
444 _iwl_legacy_write_prph(priv, addr, val);
445 iwl_release_nic_access(priv);
446 }
447 spin_unlock_irqrestore(&priv->reg_lock, reg_flags);
448}
449
450#define _iwl_legacy_set_bits_prph(priv, reg, mask) \
451_iwl_legacy_write_prph(priv, reg, (_iwl_legacy_read_prph(priv, reg) | mask))
452
453static inline void
454iwl_legacy_set_bits_prph(struct iwl_priv *priv, u32 reg, u32 mask)
455{
456 unsigned long reg_flags;
457
458 spin_lock_irqsave(&priv->reg_lock, reg_flags);
459 iwl_grab_nic_access(priv);
460 _iwl_legacy_set_bits_prph(priv, reg, mask);
461 iwl_release_nic_access(priv);
462 spin_unlock_irqrestore(&priv->reg_lock, reg_flags);
463}
464
465#define _iwl_legacy_set_bits_mask_prph(priv, reg, bits, mask) \
466_iwl_legacy_write_prph(priv, reg, \
467 ((_iwl_legacy_read_prph(priv, reg) & mask) | bits))
468
469static inline void iwl_legacy_set_bits_mask_prph(struct iwl_priv *priv, u32 reg,
470 u32 bits, u32 mask)
471{
472 unsigned long reg_flags;
473
474 spin_lock_irqsave(&priv->reg_lock, reg_flags);
475 iwl_grab_nic_access(priv);
476 _iwl_legacy_set_bits_mask_prph(priv, reg, bits, mask);
477 iwl_release_nic_access(priv);
478 spin_unlock_irqrestore(&priv->reg_lock, reg_flags);
479}
480
481static inline void iwl_legacy_clear_bits_prph(struct iwl_priv
482 *priv, u32 reg, u32 mask)
483{
484 unsigned long reg_flags;
485 u32 val;
486
487 spin_lock_irqsave(&priv->reg_lock, reg_flags);
488 iwl_grab_nic_access(priv);
489 val = _iwl_legacy_read_prph(priv, reg);
490 _iwl_legacy_write_prph(priv, reg, (val & ~mask));
491 iwl_release_nic_access(priv);
492 spin_unlock_irqrestore(&priv->reg_lock, reg_flags);
493}
494
495static inline u32 iwl_legacy_read_targ_mem(struct iwl_priv *priv, u32 addr)
496{
497 unsigned long reg_flags;
498 u32 value;
499
500 spin_lock_irqsave(&priv->reg_lock, reg_flags);
501 iwl_grab_nic_access(priv);
502
503 _iwl_legacy_write_direct32(priv, HBUS_TARG_MEM_RADDR, addr);
504 rmb();
505 value = _iwl_legacy_read_direct32(priv, HBUS_TARG_MEM_RDAT);
506
507 iwl_release_nic_access(priv);
508 spin_unlock_irqrestore(&priv->reg_lock, reg_flags);
509 return value;
510}
511
512static inline void
513iwl_legacy_write_targ_mem(struct iwl_priv *priv, u32 addr, u32 val)
514{
515 unsigned long reg_flags;
516
517 spin_lock_irqsave(&priv->reg_lock, reg_flags);
518 if (!iwl_grab_nic_access(priv)) {
519 _iwl_legacy_write_direct32(priv, HBUS_TARG_MEM_WADDR, addr);
520 wmb();
521 _iwl_legacy_write_direct32(priv, HBUS_TARG_MEM_WDAT, val);
522 iwl_release_nic_access(priv);
523 }
524 spin_unlock_irqrestore(&priv->reg_lock, reg_flags);
525}
526
527static inline void
528iwl_legacy_write_targ_mem_buf(struct iwl_priv *priv, u32 addr,
529 u32 len, u32 *values)
530{
531 unsigned long reg_flags;
532
533 spin_lock_irqsave(&priv->reg_lock, reg_flags);
534 if (!iwl_grab_nic_access(priv)) {
535 _iwl_legacy_write_direct32(priv, HBUS_TARG_MEM_WADDR, addr);
536 wmb();
537 for (; 0 < len; len -= sizeof(u32), values++)
538 _iwl_legacy_write_direct32(priv,
539 HBUS_TARG_MEM_WDAT, *values);
540
541 iwl_release_nic_access(priv);
542 }
543 spin_unlock_irqrestore(&priv->reg_lock, reg_flags);
544}
545#endif
diff --git a/drivers/net/wireless/iwlegacy/iwl-led.c b/drivers/net/wireless/iwlegacy/iwl-led.c
new file mode 100644
index 000000000000..15eb8b707157
--- /dev/null
+++ b/drivers/net/wireless/iwlegacy/iwl-led.c
@@ -0,0 +1,188 @@
1/******************************************************************************
2 *
3 * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of version 2 of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
17 *
18 * The full GNU General Public License is included in this distribution in the
19 * file called LICENSE.
20 *
21 * Contact Information:
22 * Intel Linux Wireless <ilw@linux.intel.com>
23 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
24 *
25 *****************************************************************************/
26
27
28#include <linux/kernel.h>
29#include <linux/module.h>
30#include <linux/init.h>
31#include <linux/pci.h>
32#include <linux/dma-mapping.h>
33#include <linux/delay.h>
34#include <linux/skbuff.h>
35#include <linux/netdevice.h>
36#include <linux/wireless.h>
37#include <net/mac80211.h>
38#include <linux/etherdevice.h>
39#include <asm/unaligned.h>
40
41#include "iwl-dev.h"
42#include "iwl-core.h"
43#include "iwl-io.h"
44
45/* default: IWL_LED_BLINK(0) using blinking index table */
46static int led_mode;
47module_param(led_mode, int, S_IRUGO);
48MODULE_PARM_DESC(led_mode, "0=system default, "
49 "1=On(RF On)/Off(RF Off), 2=blinking");
50
51static const struct ieee80211_tpt_blink iwl_blink[] = {
52 { .throughput = 0 * 1024 - 1, .blink_time = 334 },
53 { .throughput = 1 * 1024 - 1, .blink_time = 260 },
54 { .throughput = 5 * 1024 - 1, .blink_time = 220 },
55 { .throughput = 10 * 1024 - 1, .blink_time = 190 },
56 { .throughput = 20 * 1024 - 1, .blink_time = 170 },
57 { .throughput = 50 * 1024 - 1, .blink_time = 150 },
58 { .throughput = 70 * 1024 - 1, .blink_time = 130 },
59 { .throughput = 100 * 1024 - 1, .blink_time = 110 },
60 { .throughput = 200 * 1024 - 1, .blink_time = 80 },
61 { .throughput = 300 * 1024 - 1, .blink_time = 50 },
62};
63
64/*
65 * Adjust led blink rate to compensate on a MAC Clock difference on every HW
66 * Led blink rate analysis showed an average deviation of 0% on 3945,
67 * 5% on 4965 HW.
68 * Need to compensate on the led on/off time per HW according to the deviation
69 * to achieve the desired led frequency
70 * The calculation is: (100-averageDeviation)/100 * blinkTime
71 * For code efficiency the calculation will be:
72 * compensation = (100 - averageDeviation) * 64 / 100
73 * NewBlinkTime = (compensation * BlinkTime) / 64
74 */
75static inline u8 iwl_legacy_blink_compensation(struct iwl_priv *priv,
76 u8 time, u16 compensation)
77{
78 if (!compensation) {
79 IWL_ERR(priv, "undefined blink compensation: "
80 "use pre-defined blinking time\n");
81 return time;
82 }
83
84 return (u8)((time * compensation) >> 6);
85}
86
87/* Set led pattern command */
88static int iwl_legacy_led_cmd(struct iwl_priv *priv,
89 unsigned long on,
90 unsigned long off)
91{
92 struct iwl_led_cmd led_cmd = {
93 .id = IWL_LED_LINK,
94 .interval = IWL_DEF_LED_INTRVL
95 };
96 int ret;
97
98 if (!test_bit(STATUS_READY, &priv->status))
99 return -EBUSY;
100
101 if (priv->blink_on == on && priv->blink_off == off)
102 return 0;
103
104 IWL_DEBUG_LED(priv, "Led blink time compensation=%u\n",
105 priv->cfg->base_params->led_compensation);
106 led_cmd.on = iwl_legacy_blink_compensation(priv, on,
107 priv->cfg->base_params->led_compensation);
108 led_cmd.off = iwl_legacy_blink_compensation(priv, off,
109 priv->cfg->base_params->led_compensation);
110
111 ret = priv->cfg->ops->led->cmd(priv, &led_cmd);
112 if (!ret) {
113 priv->blink_on = on;
114 priv->blink_off = off;
115 }
116 return ret;
117}
118
119static void iwl_legacy_led_brightness_set(struct led_classdev *led_cdev,
120 enum led_brightness brightness)
121{
122 struct iwl_priv *priv = container_of(led_cdev, struct iwl_priv, led);
123 unsigned long on = 0;
124
125 if (brightness > 0)
126 on = IWL_LED_SOLID;
127
128 iwl_legacy_led_cmd(priv, on, 0);
129}
130
131static int iwl_legacy_led_blink_set(struct led_classdev *led_cdev,
132 unsigned long *delay_on,
133 unsigned long *delay_off)
134{
135 struct iwl_priv *priv = container_of(led_cdev, struct iwl_priv, led);
136
137 return iwl_legacy_led_cmd(priv, *delay_on, *delay_off);
138}
139
140void iwl_legacy_leds_init(struct iwl_priv *priv)
141{
142 int mode = led_mode;
143 int ret;
144
145 if (mode == IWL_LED_DEFAULT)
146 mode = priv->cfg->led_mode;
147
148 priv->led.name = kasprintf(GFP_KERNEL, "%s-led",
149 wiphy_name(priv->hw->wiphy));
150 priv->led.brightness_set = iwl_legacy_led_brightness_set;
151 priv->led.blink_set = iwl_legacy_led_blink_set;
152 priv->led.max_brightness = 1;
153
154 switch (mode) {
155 case IWL_LED_DEFAULT:
156 WARN_ON(1);
157 break;
158 case IWL_LED_BLINK:
159 priv->led.default_trigger =
160 ieee80211_create_tpt_led_trigger(priv->hw,
161 IEEE80211_TPT_LEDTRIG_FL_CONNECTED,
162 iwl_blink, ARRAY_SIZE(iwl_blink));
163 break;
164 case IWL_LED_RF_STATE:
165 priv->led.default_trigger =
166 ieee80211_get_radio_led_name(priv->hw);
167 break;
168 }
169
170 ret = led_classdev_register(&priv->pci_dev->dev, &priv->led);
171 if (ret) {
172 kfree(priv->led.name);
173 return;
174 }
175
176 priv->led_registered = true;
177}
178EXPORT_SYMBOL(iwl_legacy_leds_init);
179
180void iwl_legacy_leds_exit(struct iwl_priv *priv)
181{
182 if (!priv->led_registered)
183 return;
184
185 led_classdev_unregister(&priv->led);
186 kfree(priv->led.name);
187}
188EXPORT_SYMBOL(iwl_legacy_leds_exit);
diff --git a/drivers/net/wireless/iwlegacy/iwl-led.h b/drivers/net/wireless/iwlegacy/iwl-led.h
new file mode 100644
index 000000000000..f0791f70f79d
--- /dev/null
+++ b/drivers/net/wireless/iwlegacy/iwl-led.h
@@ -0,0 +1,56 @@
1/******************************************************************************
2 *
3 * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of version 2 of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
17 *
18 * The full GNU General Public License is included in this distribution in the
19 * file called LICENSE.
20 *
21 * Contact Information:
22 * Intel Linux Wireless <ilw@linux.intel.com>
23 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
24 *
25 *****************************************************************************/
26
27#ifndef __iwl_legacy_leds_h__
28#define __iwl_legacy_leds_h__
29
30
31struct iwl_priv;
32
33#define IWL_LED_SOLID 11
34#define IWL_DEF_LED_INTRVL cpu_to_le32(1000)
35
36#define IWL_LED_ACTIVITY (0<<1)
37#define IWL_LED_LINK (1<<1)
38
39/*
40 * LED mode
41 * IWL_LED_DEFAULT: use device default
42 * IWL_LED_RF_STATE: turn LED on/off based on RF state
43 * LED ON = RF ON
44 * LED OFF = RF OFF
45 * IWL_LED_BLINK: adjust led blink rate based on blink table
46 */
47enum iwl_led_mode {
48 IWL_LED_DEFAULT,
49 IWL_LED_RF_STATE,
50 IWL_LED_BLINK,
51};
52
53void iwl_legacy_leds_init(struct iwl_priv *priv);
54void iwl_legacy_leds_exit(struct iwl_priv *priv);
55
56#endif /* __iwl_legacy_leds_h__ */
diff --git a/drivers/net/wireless/iwlegacy/iwl-legacy-rs.h b/drivers/net/wireless/iwlegacy/iwl-legacy-rs.h
new file mode 100644
index 000000000000..38647e481eb0
--- /dev/null
+++ b/drivers/net/wireless/iwlegacy/iwl-legacy-rs.h
@@ -0,0 +1,456 @@
1/******************************************************************************
2 *
3 * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of version 2 of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
17 *
18 * The full GNU General Public License is included in this distribution in the
19 * file called LICENSE.
20 *
21 * Contact Information:
22 * Intel Linux Wireless <ilw@linux.intel.com>
23 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
24 *
25 *****************************************************************************/
26
27#ifndef __iwl_legacy_rs_h__
28#define __iwl_legacy_rs_h__
29
30struct iwl_rate_info {
31 u8 plcp; /* uCode API: IWL_RATE_6M_PLCP, etc. */
32 u8 plcp_siso; /* uCode API: IWL_RATE_SISO_6M_PLCP, etc. */
33 u8 plcp_mimo2; /* uCode API: IWL_RATE_MIMO2_6M_PLCP, etc. */
34 u8 ieee; /* MAC header: IWL_RATE_6M_IEEE, etc. */
35 u8 prev_ieee; /* previous rate in IEEE speeds */
36 u8 next_ieee; /* next rate in IEEE speeds */
37 u8 prev_rs; /* previous rate used in rs algo */
38 u8 next_rs; /* next rate used in rs algo */
39 u8 prev_rs_tgg; /* previous rate used in TGG rs algo */
40 u8 next_rs_tgg; /* next rate used in TGG rs algo */
41};
42
43struct iwl3945_rate_info {
44 u8 plcp; /* uCode API: IWL_RATE_6M_PLCP, etc. */
45 u8 ieee; /* MAC header: IWL_RATE_6M_IEEE, etc. */
46 u8 prev_ieee; /* previous rate in IEEE speeds */
47 u8 next_ieee; /* next rate in IEEE speeds */
48 u8 prev_rs; /* previous rate used in rs algo */
49 u8 next_rs; /* next rate used in rs algo */
50 u8 prev_rs_tgg; /* previous rate used in TGG rs algo */
51 u8 next_rs_tgg; /* next rate used in TGG rs algo */
52 u8 table_rs_index; /* index in rate scale table cmd */
53 u8 prev_table_rs; /* prev in rate table cmd */
54};
55
56
57/*
58 * These serve as indexes into
59 * struct iwl_rate_info iwlegacy_rates[IWL_RATE_COUNT];
60 */
61enum {
62 IWL_RATE_1M_INDEX = 0,
63 IWL_RATE_2M_INDEX,
64 IWL_RATE_5M_INDEX,
65 IWL_RATE_11M_INDEX,
66 IWL_RATE_6M_INDEX,
67 IWL_RATE_9M_INDEX,
68 IWL_RATE_12M_INDEX,
69 IWL_RATE_18M_INDEX,
70 IWL_RATE_24M_INDEX,
71 IWL_RATE_36M_INDEX,
72 IWL_RATE_48M_INDEX,
73 IWL_RATE_54M_INDEX,
74 IWL_RATE_60M_INDEX,
75 IWL_RATE_COUNT,
76 IWL_RATE_COUNT_LEGACY = IWL_RATE_COUNT - 1, /* Excluding 60M */
77 IWL_RATE_COUNT_3945 = IWL_RATE_COUNT - 1,
78 IWL_RATE_INVM_INDEX = IWL_RATE_COUNT,
79 IWL_RATE_INVALID = IWL_RATE_COUNT,
80};
81
82enum {
83 IWL_RATE_6M_INDEX_TABLE = 0,
84 IWL_RATE_9M_INDEX_TABLE,
85 IWL_RATE_12M_INDEX_TABLE,
86 IWL_RATE_18M_INDEX_TABLE,
87 IWL_RATE_24M_INDEX_TABLE,
88 IWL_RATE_36M_INDEX_TABLE,
89 IWL_RATE_48M_INDEX_TABLE,
90 IWL_RATE_54M_INDEX_TABLE,
91 IWL_RATE_1M_INDEX_TABLE,
92 IWL_RATE_2M_INDEX_TABLE,
93 IWL_RATE_5M_INDEX_TABLE,
94 IWL_RATE_11M_INDEX_TABLE,
95 IWL_RATE_INVM_INDEX_TABLE = IWL_RATE_INVM_INDEX - 1,
96};
97
98enum {
99 IWL_FIRST_OFDM_RATE = IWL_RATE_6M_INDEX,
100 IWL39_LAST_OFDM_RATE = IWL_RATE_54M_INDEX,
101 IWL_LAST_OFDM_RATE = IWL_RATE_60M_INDEX,
102 IWL_FIRST_CCK_RATE = IWL_RATE_1M_INDEX,
103 IWL_LAST_CCK_RATE = IWL_RATE_11M_INDEX,
104};
105
106/* #define vs. enum to keep from defaulting to 'large integer' */
107#define IWL_RATE_6M_MASK (1 << IWL_RATE_6M_INDEX)
108#define IWL_RATE_9M_MASK (1 << IWL_RATE_9M_INDEX)
109#define IWL_RATE_12M_MASK (1 << IWL_RATE_12M_INDEX)
110#define IWL_RATE_18M_MASK (1 << IWL_RATE_18M_INDEX)
111#define IWL_RATE_24M_MASK (1 << IWL_RATE_24M_INDEX)
112#define IWL_RATE_36M_MASK (1 << IWL_RATE_36M_INDEX)
113#define IWL_RATE_48M_MASK (1 << IWL_RATE_48M_INDEX)
114#define IWL_RATE_54M_MASK (1 << IWL_RATE_54M_INDEX)
115#define IWL_RATE_60M_MASK (1 << IWL_RATE_60M_INDEX)
116#define IWL_RATE_1M_MASK (1 << IWL_RATE_1M_INDEX)
117#define IWL_RATE_2M_MASK (1 << IWL_RATE_2M_INDEX)
118#define IWL_RATE_5M_MASK (1 << IWL_RATE_5M_INDEX)
119#define IWL_RATE_11M_MASK (1 << IWL_RATE_11M_INDEX)
120
121/* uCode API values for legacy bit rates, both OFDM and CCK */
122enum {
123 IWL_RATE_6M_PLCP = 13,
124 IWL_RATE_9M_PLCP = 15,
125 IWL_RATE_12M_PLCP = 5,
126 IWL_RATE_18M_PLCP = 7,
127 IWL_RATE_24M_PLCP = 9,
128 IWL_RATE_36M_PLCP = 11,
129 IWL_RATE_48M_PLCP = 1,
130 IWL_RATE_54M_PLCP = 3,
131 IWL_RATE_60M_PLCP = 3,/*FIXME:RS:should be removed*/
132 IWL_RATE_1M_PLCP = 10,
133 IWL_RATE_2M_PLCP = 20,
134 IWL_RATE_5M_PLCP = 55,
135 IWL_RATE_11M_PLCP = 110,
136 /*FIXME:RS:add IWL_RATE_LEGACY_INVM_PLCP = 0,*/
137};
138
139/* uCode API values for OFDM high-throughput (HT) bit rates */
140enum {
141 IWL_RATE_SISO_6M_PLCP = 0,
142 IWL_RATE_SISO_12M_PLCP = 1,
143 IWL_RATE_SISO_18M_PLCP = 2,
144 IWL_RATE_SISO_24M_PLCP = 3,
145 IWL_RATE_SISO_36M_PLCP = 4,
146 IWL_RATE_SISO_48M_PLCP = 5,
147 IWL_RATE_SISO_54M_PLCP = 6,
148 IWL_RATE_SISO_60M_PLCP = 7,
149 IWL_RATE_MIMO2_6M_PLCP = 0x8,
150 IWL_RATE_MIMO2_12M_PLCP = 0x9,
151 IWL_RATE_MIMO2_18M_PLCP = 0xa,
152 IWL_RATE_MIMO2_24M_PLCP = 0xb,
153 IWL_RATE_MIMO2_36M_PLCP = 0xc,
154 IWL_RATE_MIMO2_48M_PLCP = 0xd,
155 IWL_RATE_MIMO2_54M_PLCP = 0xe,
156 IWL_RATE_MIMO2_60M_PLCP = 0xf,
157 IWL_RATE_SISO_INVM_PLCP,
158 IWL_RATE_MIMO2_INVM_PLCP = IWL_RATE_SISO_INVM_PLCP,
159};
160
161/* MAC header values for bit rates */
162enum {
163 IWL_RATE_6M_IEEE = 12,
164 IWL_RATE_9M_IEEE = 18,
165 IWL_RATE_12M_IEEE = 24,
166 IWL_RATE_18M_IEEE = 36,
167 IWL_RATE_24M_IEEE = 48,
168 IWL_RATE_36M_IEEE = 72,
169 IWL_RATE_48M_IEEE = 96,
170 IWL_RATE_54M_IEEE = 108,
171 IWL_RATE_60M_IEEE = 120,
172 IWL_RATE_1M_IEEE = 2,
173 IWL_RATE_2M_IEEE = 4,
174 IWL_RATE_5M_IEEE = 11,
175 IWL_RATE_11M_IEEE = 22,
176};
177
178#define IWL_CCK_BASIC_RATES_MASK \
179 (IWL_RATE_1M_MASK | \
180 IWL_RATE_2M_MASK)
181
182#define IWL_CCK_RATES_MASK \
183 (IWL_CCK_BASIC_RATES_MASK | \
184 IWL_RATE_5M_MASK | \
185 IWL_RATE_11M_MASK)
186
187#define IWL_OFDM_BASIC_RATES_MASK \
188 (IWL_RATE_6M_MASK | \
189 IWL_RATE_12M_MASK | \
190 IWL_RATE_24M_MASK)
191
192#define IWL_OFDM_RATES_MASK \
193 (IWL_OFDM_BASIC_RATES_MASK | \
194 IWL_RATE_9M_MASK | \
195 IWL_RATE_18M_MASK | \
196 IWL_RATE_36M_MASK | \
197 IWL_RATE_48M_MASK | \
198 IWL_RATE_54M_MASK)
199
200#define IWL_BASIC_RATES_MASK \
201 (IWL_OFDM_BASIC_RATES_MASK | \
202 IWL_CCK_BASIC_RATES_MASK)
203
204#define IWL_RATES_MASK ((1 << IWL_RATE_COUNT) - 1)
205#define IWL_RATES_MASK_3945 ((1 << IWL_RATE_COUNT_3945) - 1)
206
207#define IWL_INVALID_VALUE -1
208
209#define IWL_MIN_RSSI_VAL -100
210#define IWL_MAX_RSSI_VAL 0
211
212/* These values specify how many Tx frame attempts before
213 * searching for a new modulation mode */
214#define IWL_LEGACY_FAILURE_LIMIT 160
215#define IWL_LEGACY_SUCCESS_LIMIT 480
216#define IWL_LEGACY_TABLE_COUNT 160
217
218#define IWL_NONE_LEGACY_FAILURE_LIMIT 400
219#define IWL_NONE_LEGACY_SUCCESS_LIMIT 4500
220#define IWL_NONE_LEGACY_TABLE_COUNT 1500
221
222/* Success ratio (ACKed / attempted tx frames) values (perfect is 128 * 100) */
223#define IWL_RS_GOOD_RATIO 12800 /* 100% */
224#define IWL_RATE_SCALE_SWITCH 10880 /* 85% */
225#define IWL_RATE_HIGH_TH 10880 /* 85% */
226#define IWL_RATE_INCREASE_TH 6400 /* 50% */
227#define IWL_RATE_DECREASE_TH 1920 /* 15% */
228
229/* possible actions when in legacy mode */
230#define IWL_LEGACY_SWITCH_ANTENNA1 0
231#define IWL_LEGACY_SWITCH_ANTENNA2 1
232#define IWL_LEGACY_SWITCH_SISO 2
233#define IWL_LEGACY_SWITCH_MIMO2_AB 3
234#define IWL_LEGACY_SWITCH_MIMO2_AC 4
235#define IWL_LEGACY_SWITCH_MIMO2_BC 5
236
237/* possible actions when in siso mode */
238#define IWL_SISO_SWITCH_ANTENNA1 0
239#define IWL_SISO_SWITCH_ANTENNA2 1
240#define IWL_SISO_SWITCH_MIMO2_AB 2
241#define IWL_SISO_SWITCH_MIMO2_AC 3
242#define IWL_SISO_SWITCH_MIMO2_BC 4
243#define IWL_SISO_SWITCH_GI 5
244
245/* possible actions when in mimo mode */
246#define IWL_MIMO2_SWITCH_ANTENNA1 0
247#define IWL_MIMO2_SWITCH_ANTENNA2 1
248#define IWL_MIMO2_SWITCH_SISO_A 2
249#define IWL_MIMO2_SWITCH_SISO_B 3
250#define IWL_MIMO2_SWITCH_SISO_C 4
251#define IWL_MIMO2_SWITCH_GI 5
252
253#define IWL_MAX_SEARCH IWL_MIMO2_SWITCH_GI
254
255#define IWL_ACTION_LIMIT 3 /* # possible actions */
256
257#define LQ_SIZE 2 /* 2 mode tables: "Active" and "Search" */
258
259/* load per tid defines for A-MPDU activation */
260#define IWL_AGG_TPT_THREHOLD 0
261#define IWL_AGG_LOAD_THRESHOLD 10
262#define IWL_AGG_ALL_TID 0xff
263#define TID_QUEUE_CELL_SPACING 50 /*mS */
264#define TID_QUEUE_MAX_SIZE 20
265#define TID_ROUND_VALUE 5 /* mS */
266#define TID_MAX_LOAD_COUNT 8
267
268#define TID_MAX_TIME_DIFF ((TID_QUEUE_MAX_SIZE - 1) * TID_QUEUE_CELL_SPACING)
269#define TIME_WRAP_AROUND(x, y) (((y) > (x)) ? (y) - (x) : (0-(x)) + (y))
270
271extern const struct iwl_rate_info iwlegacy_rates[IWL_RATE_COUNT];
272
273enum iwl_table_type {
274 LQ_NONE,
275 LQ_G, /* legacy types */
276 LQ_A,
277 LQ_SISO, /* high-throughput types */
278 LQ_MIMO2,
279 LQ_MAX,
280};
281
282#define is_legacy(tbl) (((tbl) == LQ_G) || ((tbl) == LQ_A))
283#define is_siso(tbl) ((tbl) == LQ_SISO)
284#define is_mimo2(tbl) ((tbl) == LQ_MIMO2)
285#define is_mimo(tbl) (is_mimo2(tbl))
286#define is_Ht(tbl) (is_siso(tbl) || is_mimo(tbl))
287#define is_a_band(tbl) ((tbl) == LQ_A)
288#define is_g_and(tbl) ((tbl) == LQ_G)
289
290#define ANT_NONE 0x0
291#define ANT_A BIT(0)
292#define ANT_B BIT(1)
293#define ANT_AB (ANT_A | ANT_B)
294#define ANT_C BIT(2)
295#define ANT_AC (ANT_A | ANT_C)
296#define ANT_BC (ANT_B | ANT_C)
297#define ANT_ABC (ANT_AB | ANT_C)
298
299#define IWL_MAX_MCS_DISPLAY_SIZE 12
300
301struct iwl_rate_mcs_info {
302 char mbps[IWL_MAX_MCS_DISPLAY_SIZE];
303 char mcs[IWL_MAX_MCS_DISPLAY_SIZE];
304};
305
306/**
307 * struct iwl_rate_scale_data -- tx success history for one rate
308 */
309struct iwl_rate_scale_data {
310 u64 data; /* bitmap of successful frames */
311 s32 success_counter; /* number of frames successful */
312 s32 success_ratio; /* per-cent * 128 */
313 s32 counter; /* number of frames attempted */
314 s32 average_tpt; /* success ratio * expected throughput */
315 unsigned long stamp;
316};
317
318/**
319 * struct iwl_scale_tbl_info -- tx params and success history for all rates
320 *
321 * There are two of these in struct iwl_lq_sta,
322 * one for "active", and one for "search".
323 */
324struct iwl_scale_tbl_info {
325 enum iwl_table_type lq_type;
326 u8 ant_type;
327 u8 is_SGI; /* 1 = short guard interval */
328 u8 is_ht40; /* 1 = 40 MHz channel width */
329 u8 is_dup; /* 1 = duplicated data streams */
330 u8 action; /* change modulation; IWL_[LEGACY/SISO/MIMO]_SWITCH_* */
331 u8 max_search; /* maximun number of tables we can search */
332 s32 *expected_tpt; /* throughput metrics; expected_tpt_G, etc. */
333 u32 current_rate; /* rate_n_flags, uCode API format */
334 struct iwl_rate_scale_data win[IWL_RATE_COUNT]; /* rate histories */
335};
336
337struct iwl_traffic_load {
338 unsigned long time_stamp; /* age of the oldest statistics */
339 u32 packet_count[TID_QUEUE_MAX_SIZE]; /* packet count in this time
340 * slice */
341 u32 total; /* total num of packets during the
342 * last TID_MAX_TIME_DIFF */
343 u8 queue_count; /* number of queues that has
344 * been used since the last cleanup */
345 u8 head; /* start of the circular buffer */
346};
347
348/**
349 * struct iwl_lq_sta -- driver's rate scaling private structure
350 *
351 * Pointer to this gets passed back and forth between driver and mac80211.
352 */
353struct iwl_lq_sta {
354 u8 active_tbl; /* index of active table, range 0-1 */
355 u8 enable_counter; /* indicates HT mode */
356 u8 stay_in_tbl; /* 1: disallow, 0: allow search for new mode */
357 u8 search_better_tbl; /* 1: currently trying alternate mode */
358 s32 last_tpt;
359
360 /* The following determine when to search for a new mode */
361 u32 table_count_limit;
362 u32 max_failure_limit; /* # failed frames before new search */
363 u32 max_success_limit; /* # successful frames before new search */
364 u32 table_count;
365 u32 total_failed; /* total failed frames, any/all rates */
366 u32 total_success; /* total successful frames, any/all rates */
367 u64 flush_timer; /* time staying in mode before new search */
368
369 u8 action_counter; /* # mode-switch actions tried */
370 u8 is_green;
371 u8 is_dup;
372 enum ieee80211_band band;
373
374 /* The following are bitmaps of rates; IWL_RATE_6M_MASK, etc. */
375 u32 supp_rates;
376 u16 active_legacy_rate;
377 u16 active_siso_rate;
378 u16 active_mimo2_rate;
379 s8 max_rate_idx; /* Max rate set by user */
380 u8 missed_rate_counter;
381
382 struct iwl_link_quality_cmd lq;
383 struct iwl_scale_tbl_info lq_info[LQ_SIZE]; /* "active", "search" */
384 struct iwl_traffic_load load[TID_MAX_LOAD_COUNT];
385 u8 tx_agg_tid_en;
386#ifdef CONFIG_MAC80211_DEBUGFS
387 struct dentry *rs_sta_dbgfs_scale_table_file;
388 struct dentry *rs_sta_dbgfs_stats_table_file;
389 struct dentry *rs_sta_dbgfs_rate_scale_data_file;
390 struct dentry *rs_sta_dbgfs_tx_agg_tid_en_file;
391 u32 dbg_fixed_rate;
392#endif
393 struct iwl_priv *drv;
394
395 /* used to be in sta_info */
396 int last_txrate_idx;
397 /* last tx rate_n_flags */
398 u32 last_rate_n_flags;
399 /* packets destined for this STA are aggregated */
400 u8 is_agg;
401};
402
403static inline u8 iwl4965_num_of_ant(u8 mask)
404{
405 return !!((mask) & ANT_A) +
406 !!((mask) & ANT_B) +
407 !!((mask) & ANT_C);
408}
409
410static inline u8 iwl4965_first_antenna(u8 mask)
411{
412 if (mask & ANT_A)
413 return ANT_A;
414 if (mask & ANT_B)
415 return ANT_B;
416 return ANT_C;
417}
418
419
420/**
421 * iwl3945_rate_scale_init - Initialize the rate scale table based on assoc info
422 *
423 * The specific throughput table used is based on the type of network
424 * the associated with, including A, B, G, and G w/ TGG protection
425 */
426extern void iwl3945_rate_scale_init(struct ieee80211_hw *hw, s32 sta_id);
427
428/* Initialize station's rate scaling information after adding station */
429extern void iwl4965_rs_rate_init(struct iwl_priv *priv,
430 struct ieee80211_sta *sta, u8 sta_id);
431extern void iwl3945_rs_rate_init(struct iwl_priv *priv,
432 struct ieee80211_sta *sta, u8 sta_id);
433
434/**
435 * iwl_rate_control_register - Register the rate control algorithm callbacks
436 *
437 * Since the rate control algorithm is hardware specific, there is no need
438 * or reason to place it as a stand alone module. The driver can call
439 * iwl_rate_control_register in order to register the rate control callbacks
440 * with the mac80211 subsystem. This should be performed prior to calling
441 * ieee80211_register_hw
442 *
443 */
444extern int iwl4965_rate_control_register(void);
445extern int iwl3945_rate_control_register(void);
446
447/**
448 * iwl_rate_control_unregister - Unregister the rate control callbacks
449 *
450 * This should be called after calling ieee80211_unregister_hw, but before
451 * the driver is unloaded.
452 */
453extern void iwl4965_rate_control_unregister(void);
454extern void iwl3945_rate_control_unregister(void);
455
456#endif /* __iwl_legacy_rs__ */
diff --git a/drivers/net/wireless/iwlegacy/iwl-power.c b/drivers/net/wireless/iwlegacy/iwl-power.c
new file mode 100644
index 000000000000..903ef0d6d6cb
--- /dev/null
+++ b/drivers/net/wireless/iwlegacy/iwl-power.c
@@ -0,0 +1,165 @@
1/******************************************************************************
2 *
3 * Copyright(c) 2007 - 2011 Intel Corporation. All rights reserved.
4 *
5 * Portions of this file are derived from the ipw3945 project, as well
6 * as portions of the ieee80211 subsystem header files.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of version 2 of the GNU General Public License as
10 * published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it will be useful, but WITHOUT
13 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 * more details.
16 *
17 * You should have received a copy of the GNU General Public License along with
18 * this program; if not, write to the Free Software Foundation, Inc.,
19 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
20 *
21 * The full GNU General Public License is included in this distribution in the
22 * file called LICENSE.
23 *
24 * Contact Information:
25 * Intel Linux Wireless <ilw@linux.intel.com>
26 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
27 *****************************************************************************/
28
29
30#include <linux/kernel.h>
31#include <linux/module.h>
32#include <linux/slab.h>
33#include <linux/init.h>
34
35#include <net/mac80211.h>
36
37#include "iwl-eeprom.h"
38#include "iwl-dev.h"
39#include "iwl-core.h"
40#include "iwl-io.h"
41#include "iwl-commands.h"
42#include "iwl-debug.h"
43#include "iwl-power.h"
44
45/*
46 * Setting power level allows the card to go to sleep when not busy.
47 *
48 * We calculate a sleep command based on the required latency, which
49 * we get from mac80211. In order to handle thermal throttling, we can
50 * also use pre-defined power levels.
51 */
52
53/*
54 * This defines the old power levels. They are still used by default
55 * (level 1) and for thermal throttle (levels 3 through 5)
56 */
57
58struct iwl_power_vec_entry {
59 struct iwl_powertable_cmd cmd;
60 u8 no_dtim; /* number of skip dtim */
61};
62
63static void iwl_legacy_power_sleep_cam_cmd(struct iwl_priv *priv,
64 struct iwl_powertable_cmd *cmd)
65{
66 memset(cmd, 0, sizeof(*cmd));
67
68 if (priv->power_data.pci_pm)
69 cmd->flags |= IWL_POWER_PCI_PM_MSK;
70
71 IWL_DEBUG_POWER(priv, "Sleep command for CAM\n");
72}
73
74static int
75iwl_legacy_set_power(struct iwl_priv *priv, struct iwl_powertable_cmd *cmd)
76{
77 IWL_DEBUG_POWER(priv, "Sending power/sleep command\n");
78 IWL_DEBUG_POWER(priv, "Flags value = 0x%08X\n", cmd->flags);
79 IWL_DEBUG_POWER(priv, "Tx timeout = %u\n",
80 le32_to_cpu(cmd->tx_data_timeout));
81 IWL_DEBUG_POWER(priv, "Rx timeout = %u\n",
82 le32_to_cpu(cmd->rx_data_timeout));
83 IWL_DEBUG_POWER(priv,
84 "Sleep interval vector = { %d , %d , %d , %d , %d }\n",
85 le32_to_cpu(cmd->sleep_interval[0]),
86 le32_to_cpu(cmd->sleep_interval[1]),
87 le32_to_cpu(cmd->sleep_interval[2]),
88 le32_to_cpu(cmd->sleep_interval[3]),
89 le32_to_cpu(cmd->sleep_interval[4]));
90
91 return iwl_legacy_send_cmd_pdu(priv, POWER_TABLE_CMD,
92 sizeof(struct iwl_powertable_cmd), cmd);
93}
94
95int
96iwl_legacy_power_set_mode(struct iwl_priv *priv, struct iwl_powertable_cmd *cmd,
97 bool force)
98{
99 int ret;
100 bool update_chains;
101
102 lockdep_assert_held(&priv->mutex);
103
104 /* Don't update the RX chain when chain noise calibration is running */
105 update_chains = priv->chain_noise_data.state == IWL_CHAIN_NOISE_DONE ||
106 priv->chain_noise_data.state == IWL_CHAIN_NOISE_ALIVE;
107
108 if (!memcmp(&priv->power_data.sleep_cmd, cmd, sizeof(*cmd)) && !force)
109 return 0;
110
111 if (!iwl_legacy_is_ready_rf(priv))
112 return -EIO;
113
114 /* scan complete use sleep_power_next, need to be updated */
115 memcpy(&priv->power_data.sleep_cmd_next, cmd, sizeof(*cmd));
116 if (test_bit(STATUS_SCANNING, &priv->status) && !force) {
117 IWL_DEBUG_INFO(priv, "Defer power set mode while scanning\n");
118 return 0;
119 }
120
121 if (cmd->flags & IWL_POWER_DRIVER_ALLOW_SLEEP_MSK)
122 set_bit(STATUS_POWER_PMI, &priv->status);
123
124 ret = iwl_legacy_set_power(priv, cmd);
125 if (!ret) {
126 if (!(cmd->flags & IWL_POWER_DRIVER_ALLOW_SLEEP_MSK))
127 clear_bit(STATUS_POWER_PMI, &priv->status);
128
129 if (priv->cfg->ops->lib->update_chain_flags && update_chains)
130 priv->cfg->ops->lib->update_chain_flags(priv);
131 else if (priv->cfg->ops->lib->update_chain_flags)
132 IWL_DEBUG_POWER(priv,
133 "Cannot update the power, chain noise "
134 "calibration running: %d\n",
135 priv->chain_noise_data.state);
136
137 memcpy(&priv->power_data.sleep_cmd, cmd, sizeof(*cmd));
138 } else
139 IWL_ERR(priv, "set power fail, ret = %d", ret);
140
141 return ret;
142}
143
144int iwl_legacy_power_update_mode(struct iwl_priv *priv, bool force)
145{
146 struct iwl_powertable_cmd cmd;
147
148 iwl_legacy_power_sleep_cam_cmd(priv, &cmd);
149 return iwl_legacy_power_set_mode(priv, &cmd, force);
150}
151EXPORT_SYMBOL(iwl_legacy_power_update_mode);
152
153/* initialize to default */
154void iwl_legacy_power_initialize(struct iwl_priv *priv)
155{
156 u16 lctl = iwl_legacy_pcie_link_ctl(priv);
157
158 priv->power_data.pci_pm = !(lctl & PCI_CFG_LINK_CTRL_VAL_L0S_EN);
159
160 priv->power_data.debug_sleep_level_override = -1;
161
162 memset(&priv->power_data.sleep_cmd, 0,
163 sizeof(priv->power_data.sleep_cmd));
164}
165EXPORT_SYMBOL(iwl_legacy_power_initialize);
diff --git a/drivers/net/wireless/iwlegacy/iwl-power.h b/drivers/net/wireless/iwlegacy/iwl-power.h
new file mode 100644
index 000000000000..d30b36acdc4a
--- /dev/null
+++ b/drivers/net/wireless/iwlegacy/iwl-power.h
@@ -0,0 +1,55 @@
1/******************************************************************************
2 *
3 * Copyright(c) 2007 - 2011 Intel Corporation. All rights reserved.
4 *
5 * Portions of this file are derived from the ipw3945 project, as well
6 * as portions of the ieee80211 subsystem header files.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of version 2 of the GNU General Public License as
10 * published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it will be useful, but WITHOUT
13 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 * more details.
16 *
17 * You should have received a copy of the GNU General Public License along with
18 * this program; if not, write to the Free Software Foundation, Inc.,
19 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
20 *
21 * The full GNU General Public License is included in this distribution in the
22 * file called LICENSE.
23 *
24 * Contact Information:
25 * Intel Linux Wireless <ilw@linux.intel.com>
26 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
27 *****************************************************************************/
28#ifndef __iwl_legacy_power_setting_h__
29#define __iwl_legacy_power_setting_h__
30
31#include "iwl-commands.h"
32
33enum iwl_power_level {
34 IWL_POWER_INDEX_1,
35 IWL_POWER_INDEX_2,
36 IWL_POWER_INDEX_3,
37 IWL_POWER_INDEX_4,
38 IWL_POWER_INDEX_5,
39 IWL_POWER_NUM
40};
41
42struct iwl_power_mgr {
43 struct iwl_powertable_cmd sleep_cmd;
44 struct iwl_powertable_cmd sleep_cmd_next;
45 int debug_sleep_level_override;
46 bool pci_pm;
47};
48
49int
50iwl_legacy_power_set_mode(struct iwl_priv *priv, struct iwl_powertable_cmd *cmd,
51 bool force);
52int iwl_legacy_power_update_mode(struct iwl_priv *priv, bool force);
53void iwl_legacy_power_initialize(struct iwl_priv *priv);
54
55#endif /* __iwl_legacy_power_setting_h__ */
diff --git a/drivers/net/wireless/iwlegacy/iwl-prph.h b/drivers/net/wireless/iwlegacy/iwl-prph.h
new file mode 100644
index 000000000000..30a493003ab0
--- /dev/null
+++ b/drivers/net/wireless/iwlegacy/iwl-prph.h
@@ -0,0 +1,523 @@
1/******************************************************************************
2 *
3 * This file is provided under a dual BSD/GPLv2 license. When using or
4 * redistributing this file, you may do so under either license.
5 *
6 * GPL LICENSE SUMMARY
7 *
8 * Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved.
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of version 2 of the GNU General Public License as
12 * published by the Free Software Foundation.
13 *
14 * This program is distributed in the hope that it will be useful, but
15 * WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
22 * USA
23 *
24 * The full GNU General Public License is included in this distribution
25 * in the file called LICENSE.GPL.
26 *
27 * Contact Information:
28 * Intel Linux Wireless <ilw@linux.intel.com>
29 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
30 *
31 * BSD LICENSE
32 *
33 * Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved.
34 * All rights reserved.
35 *
36 * Redistribution and use in source and binary forms, with or without
37 * modification, are permitted provided that the following conditions
38 * are met:
39 *
40 * * Redistributions of source code must retain the above copyright
41 * notice, this list of conditions and the following disclaimer.
42 * * Redistributions in binary form must reproduce the above copyright
43 * notice, this list of conditions and the following disclaimer in
44 * the documentation and/or other materials provided with the
45 * distribution.
46 * * Neither the name Intel Corporation nor the names of its
47 * contributors may be used to endorse or promote products derived
48 * from this software without specific prior written permission.
49 *
50 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
51 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
52 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
53 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
54 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
55 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
56 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
57 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
58 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
59 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
60 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
61 *****************************************************************************/
62
63#ifndef __iwl_legacy_prph_h__
64#define __iwl_legacy_prph_h__
65
66/*
67 * Registers in this file are internal, not PCI bus memory mapped.
68 * Driver accesses these via HBUS_TARG_PRPH_* registers.
69 */
70#define PRPH_BASE (0x00000)
71#define PRPH_END (0xFFFFF)
72
73/* APMG (power management) constants */
74#define APMG_BASE (PRPH_BASE + 0x3000)
75#define APMG_CLK_CTRL_REG (APMG_BASE + 0x0000)
76#define APMG_CLK_EN_REG (APMG_BASE + 0x0004)
77#define APMG_CLK_DIS_REG (APMG_BASE + 0x0008)
78#define APMG_PS_CTRL_REG (APMG_BASE + 0x000c)
79#define APMG_PCIDEV_STT_REG (APMG_BASE + 0x0010)
80#define APMG_RFKILL_REG (APMG_BASE + 0x0014)
81#define APMG_RTC_INT_STT_REG (APMG_BASE + 0x001c)
82#define APMG_RTC_INT_MSK_REG (APMG_BASE + 0x0020)
83#define APMG_DIGITAL_SVR_REG (APMG_BASE + 0x0058)
84#define APMG_ANALOG_SVR_REG (APMG_BASE + 0x006C)
85
86#define APMS_CLK_VAL_MRB_FUNC_MODE (0x00000001)
87#define APMG_CLK_VAL_DMA_CLK_RQT (0x00000200)
88#define APMG_CLK_VAL_BSM_CLK_RQT (0x00000800)
89
90#define APMG_PS_CTRL_EARLY_PWR_OFF_RESET_DIS (0x00400000)
91#define APMG_PS_CTRL_VAL_RESET_REQ (0x04000000)
92#define APMG_PS_CTRL_MSK_PWR_SRC (0x03000000)
93#define APMG_PS_CTRL_VAL_PWR_SRC_VMAIN (0x00000000)
94#define APMG_PS_CTRL_VAL_PWR_SRC_MAX (0x01000000) /* 3945 only */
95#define APMG_PS_CTRL_VAL_PWR_SRC_VAUX (0x02000000)
96#define APMG_SVR_VOLTAGE_CONFIG_BIT_MSK (0x000001E0) /* bit 8:5 */
97#define APMG_SVR_DIGITAL_VOLTAGE_1_32 (0x00000060)
98
99#define APMG_PCIDEV_STT_VAL_L1_ACT_DIS (0x00000800)
100
101/**
102 * BSM (Bootstrap State Machine)
103 *
104 * The Bootstrap State Machine (BSM) stores a short bootstrap uCode program
105 * in special SRAM that does not power down when the embedded control
106 * processor is sleeping (e.g. for periodic power-saving shutdowns of radio).
107 *
108 * When powering back up after sleeps (or during initial uCode load), the BSM
109 * internally loads the short bootstrap program from the special SRAM into the
110 * embedded processor's instruction SRAM, and starts the processor so it runs
111 * the bootstrap program.
112 *
113 * This bootstrap program loads (via PCI busmaster DMA) instructions and data
114 * images for a uCode program from host DRAM locations. The host driver
115 * indicates DRAM locations and sizes for instruction and data images via the
116 * four BSM_DRAM_* registers. Once the bootstrap program loads the new program,
117 * the new program starts automatically.
118 *
119 * The uCode used for open-source drivers includes two programs:
120 *
121 * 1) Initialization -- performs hardware calibration and sets up some
122 * internal data, then notifies host via "initialize alive" notification
123 * (struct iwl_init_alive_resp) that it has completed all of its work.
124 * After signal from host, it then loads and starts the runtime program.
125 * The initialization program must be used when initially setting up the
126 * NIC after loading the driver.
127 *
128 * 2) Runtime/Protocol -- performs all normal runtime operations. This
129 * notifies host via "alive" notification (struct iwl_alive_resp) that it
130 * is ready to be used.
131 *
132 * When initializing the NIC, the host driver does the following procedure:
133 *
134 * 1) Load bootstrap program (instructions only, no data image for bootstrap)
135 * into bootstrap memory. Use dword writes starting at BSM_SRAM_LOWER_BOUND
136 *
137 * 2) Point (via BSM_DRAM_*) to the "initialize" uCode data and instruction
138 * images in host DRAM.
139 *
140 * 3) Set up BSM to copy from BSM SRAM into uCode instruction SRAM when asked:
141 * BSM_WR_MEM_SRC_REG = 0
142 * BSM_WR_MEM_DST_REG = RTC_INST_LOWER_BOUND
143 * BSM_WR_MEM_DWCOUNT_REG = # dwords in bootstrap instruction image
144 *
145 * 4) Load bootstrap into instruction SRAM:
146 * BSM_WR_CTRL_REG = BSM_WR_CTRL_REG_BIT_START
147 *
148 * 5) Wait for load completion:
149 * Poll BSM_WR_CTRL_REG for BSM_WR_CTRL_REG_BIT_START = 0
150 *
151 * 6) Enable future boot loads whenever NIC's power management triggers it:
152 * BSM_WR_CTRL_REG = BSM_WR_CTRL_REG_BIT_START_EN
153 *
154 * 7) Start the NIC by removing all reset bits:
155 * CSR_RESET = 0
156 *
157 * The bootstrap uCode (already in instruction SRAM) loads initialization
158 * uCode. Initialization uCode performs data initialization, sends
159 * "initialize alive" notification to host, and waits for a signal from
160 * host to load runtime code.
161 *
162 * 4) Point (via BSM_DRAM_*) to the "runtime" uCode data and instruction
163 * images in host DRAM. The last register loaded must be the instruction
164 * byte count register ("1" in MSbit tells initialization uCode to load
165 * the runtime uCode):
166 * BSM_DRAM_INST_BYTECOUNT_REG = byte count | BSM_DRAM_INST_LOAD
167 *
168 * 5) Wait for "alive" notification, then issue normal runtime commands.
169 *
170 * Data caching during power-downs:
171 *
172 * Just before the embedded controller powers down (e.g for automatic
173 * power-saving modes, or for RFKILL), uCode stores (via PCI busmaster DMA)
174 * a current snapshot of the embedded processor's data SRAM into host DRAM.
175 * This caches the data while the embedded processor's memory is powered down.
176 * Location and size are controlled by BSM_DRAM_DATA_* registers.
177 *
178 * NOTE: Instruction SRAM does not need to be saved, since that doesn't
179 * change during operation; the original image (from uCode distribution
180 * file) can be used for reload.
181 *
182 * When powering back up, the BSM loads the bootstrap program. Bootstrap looks
183 * at the BSM_DRAM_* registers, which now point to the runtime instruction
184 * image and the cached (modified) runtime data (*not* the initialization
185 * uCode). Bootstrap reloads these runtime images into SRAM, and restarts the
186 * uCode from where it left off before the power-down.
187 *
188 * NOTE: Initialization uCode does *not* run as part of the save/restore
189 * procedure.
190 *
191 * This save/restore method is mostly for autonomous power management during
192 * normal operation (result of POWER_TABLE_CMD). Platform suspend/resume and
193 * RFKILL should use complete restarts (with total re-initialization) of uCode,
194 * allowing total shutdown (including BSM memory).
195 *
196 * Note that, during normal operation, the host DRAM that held the initial
197 * startup data for the runtime code is now being used as a backup data cache
198 * for modified data! If you need to completely re-initialize the NIC, make
199 * sure that you use the runtime data image from the uCode distribution file,
200 * not the modified/saved runtime data. You may want to store a separate
201 * "clean" runtime data image in DRAM to avoid disk reads of distribution file.
202 */
203
204/* BSM bit fields */
205#define BSM_WR_CTRL_REG_BIT_START (0x80000000) /* start boot load now */
206#define BSM_WR_CTRL_REG_BIT_START_EN (0x40000000) /* enable boot after pwrup*/
207#define BSM_DRAM_INST_LOAD (0x80000000) /* start program load now */
208
209/* BSM addresses */
210#define BSM_BASE (PRPH_BASE + 0x3400)
211#define BSM_END (PRPH_BASE + 0x3800)
212
213#define BSM_WR_CTRL_REG (BSM_BASE + 0x000) /* ctl and status */
214#define BSM_WR_MEM_SRC_REG (BSM_BASE + 0x004) /* source in BSM mem */
215#define BSM_WR_MEM_DST_REG (BSM_BASE + 0x008) /* dest in SRAM mem */
216#define BSM_WR_DWCOUNT_REG (BSM_BASE + 0x00C) /* bytes */
217#define BSM_WR_STATUS_REG (BSM_BASE + 0x010) /* bit 0: 1 == done */
218
219/*
220 * Pointers and size regs for bootstrap load and data SRAM save/restore.
221 * NOTE: 3945 pointers use bits 31:0 of DRAM address.
222 * 4965 pointers use bits 35:4 of DRAM address.
223 */
224#define BSM_DRAM_INST_PTR_REG (BSM_BASE + 0x090)
225#define BSM_DRAM_INST_BYTECOUNT_REG (BSM_BASE + 0x094)
226#define BSM_DRAM_DATA_PTR_REG (BSM_BASE + 0x098)
227#define BSM_DRAM_DATA_BYTECOUNT_REG (BSM_BASE + 0x09C)
228
229/*
230 * BSM special memory, stays powered on during power-save sleeps.
231 * Read/write, address range from LOWER_BOUND to (LOWER_BOUND + SIZE -1)
232 */
233#define BSM_SRAM_LOWER_BOUND (PRPH_BASE + 0x3800)
234#define BSM_SRAM_SIZE (1024) /* bytes */
235
236
237/* 3945 Tx scheduler registers */
238#define ALM_SCD_BASE (PRPH_BASE + 0x2E00)
239#define ALM_SCD_MODE_REG (ALM_SCD_BASE + 0x000)
240#define ALM_SCD_ARASTAT_REG (ALM_SCD_BASE + 0x004)
241#define ALM_SCD_TXFACT_REG (ALM_SCD_BASE + 0x010)
242#define ALM_SCD_TXF4MF_REG (ALM_SCD_BASE + 0x014)
243#define ALM_SCD_TXF5MF_REG (ALM_SCD_BASE + 0x020)
244#define ALM_SCD_SBYP_MODE_1_REG (ALM_SCD_BASE + 0x02C)
245#define ALM_SCD_SBYP_MODE_2_REG (ALM_SCD_BASE + 0x030)
246
247/**
248 * Tx Scheduler
249 *
250 * The Tx Scheduler selects the next frame to be transmitted, choosing TFDs
251 * (Transmit Frame Descriptors) from up to 16 circular Tx queues resident in
252 * host DRAM. It steers each frame's Tx command (which contains the frame
253 * data) into one of up to 7 prioritized Tx DMA FIFO channels within the
254 * device. A queue maps to only one (selectable by driver) Tx DMA channel,
255 * but one DMA channel may take input from several queues.
256 *
257 * Tx DMA FIFOs have dedicated purposes. For 4965, they are used as follows
258 * (cf. default_queue_to_tx_fifo in iwl-4965.c):
259 *
260 * 0 -- EDCA BK (background) frames, lowest priority
261 * 1 -- EDCA BE (best effort) frames, normal priority
262 * 2 -- EDCA VI (video) frames, higher priority
263 * 3 -- EDCA VO (voice) and management frames, highest priority
264 * 4 -- Commands (e.g. RXON, etc.)
265 * 5 -- unused (HCCA)
266 * 6 -- unused (HCCA)
267 * 7 -- not used by driver (device-internal only)
268 *
269 *
270 * Driver should normally map queues 0-6 to Tx DMA/FIFO channels 0-6.
271 * In addition, driver can map the remaining queues to Tx DMA/FIFO
272 * channels 0-3 to support 11n aggregation via EDCA DMA channels.
273 *
274 * The driver sets up each queue to work in one of two modes:
275 *
276 * 1) Scheduler-Ack, in which the scheduler automatically supports a
277 * block-ack (BA) window of up to 64 TFDs. In this mode, each queue
278 * contains TFDs for a unique combination of Recipient Address (RA)
279 * and Traffic Identifier (TID), that is, traffic of a given
280 * Quality-Of-Service (QOS) priority, destined for a single station.
281 *
282 * In scheduler-ack mode, the scheduler keeps track of the Tx status of
283 * each frame within the BA window, including whether it's been transmitted,
284 * and whether it's been acknowledged by the receiving station. The device
285 * automatically processes block-acks received from the receiving STA,
286 * and reschedules un-acked frames to be retransmitted (successful
287 * Tx completion may end up being out-of-order).
288 *
289 * The driver must maintain the queue's Byte Count table in host DRAM
290 * (struct iwl4965_sched_queue_byte_cnt_tbl) for this mode.
291 * This mode does not support fragmentation.
292 *
293 * 2) FIFO (a.k.a. non-Scheduler-ACK), in which each TFD is processed in order.
294 * The device may automatically retry Tx, but will retry only one frame
295 * at a time, until receiving ACK from receiving station, or reaching
296 * retry limit and giving up.
297 *
298 * The command queue (#4/#9) must use this mode!
299 * This mode does not require use of the Byte Count table in host DRAM.
300 *
301 * Driver controls scheduler operation via 3 means:
302 * 1) Scheduler registers
303 * 2) Shared scheduler data base in internal 4956 SRAM
304 * 3) Shared data in host DRAM
305 *
306 * Initialization:
307 *
308 * When loading, driver should allocate memory for:
309 * 1) 16 TFD circular buffers, each with space for (typically) 256 TFDs.
310 * 2) 16 Byte Count circular buffers in 16 KBytes contiguous memory
311 * (1024 bytes for each queue).
312 *
313 * After receiving "Alive" response from uCode, driver must initialize
314 * the scheduler (especially for queue #4/#9, the command queue, otherwise
315 * the driver can't issue commands!):
316 */
317
318/**
319 * Max Tx window size is the max number of contiguous TFDs that the scheduler
320 * can keep track of at one time when creating block-ack chains of frames.
321 * Note that "64" matches the number of ack bits in a block-ack packet.
322 * Driver should use SCD_WIN_SIZE and SCD_FRAME_LIMIT values to initialize
323 * IWL49_SCD_CONTEXT_QUEUE_OFFSET(x) values.
324 */
325#define SCD_WIN_SIZE 64
326#define SCD_FRAME_LIMIT 64
327
328/* SCD registers are internal, must be accessed via HBUS_TARG_PRPH regs */
329#define IWL49_SCD_START_OFFSET 0xa02c00
330
331/*
332 * 4965 tells driver SRAM address for internal scheduler structs via this reg.
333 * Value is valid only after "Alive" response from uCode.
334 */
335#define IWL49_SCD_SRAM_BASE_ADDR (IWL49_SCD_START_OFFSET + 0x0)
336
337/*
338 * Driver may need to update queue-empty bits after changing queue's
339 * write and read pointers (indexes) during (re-)initialization (i.e. when
340 * scheduler is not tracking what's happening).
341 * Bit fields:
342 * 31-16: Write mask -- 1: update empty bit, 0: don't change empty bit
343 * 15-00: Empty state, one for each queue -- 1: empty, 0: non-empty
344 * NOTE: This register is not used by Linux driver.
345 */
346#define IWL49_SCD_EMPTY_BITS (IWL49_SCD_START_OFFSET + 0x4)
347
348/*
349 * Physical base address of array of byte count (BC) circular buffers (CBs).
350 * Each Tx queue has a BC CB in host DRAM to support Scheduler-ACK mode.
351 * This register points to BC CB for queue 0, must be on 1024-byte boundary.
352 * Others are spaced by 1024 bytes.
353 * Each BC CB is 2 bytes * (256 + 64) = 740 bytes, followed by 384 bytes pad.
354 * (Index into a queue's BC CB) = (index into queue's TFD CB) = (SSN & 0xff).
355 * Bit fields:
356 * 25-00: Byte Count CB physical address [35:10], must be 1024-byte aligned.
357 */
358#define IWL49_SCD_DRAM_BASE_ADDR (IWL49_SCD_START_OFFSET + 0x10)
359
360/*
361 * Enables any/all Tx DMA/FIFO channels.
362 * Scheduler generates requests for only the active channels.
363 * Set this to 0xff to enable all 8 channels (normal usage).
364 * Bit fields:
365 * 7- 0: Enable (1), disable (0), one bit for each channel 0-7
366 */
367#define IWL49_SCD_TXFACT (IWL49_SCD_START_OFFSET + 0x1c)
368/*
369 * Queue (x) Write Pointers (indexes, really!), one for each Tx queue.
370 * Initialized and updated by driver as new TFDs are added to queue.
371 * NOTE: If using Block Ack, index must correspond to frame's
372 * Start Sequence Number; index = (SSN & 0xff)
373 * NOTE: Alternative to HBUS_TARG_WRPTR, which is what Linux driver uses?
374 */
375#define IWL49_SCD_QUEUE_WRPTR(x) (IWL49_SCD_START_OFFSET + 0x24 + (x) * 4)
376
377/*
378 * Queue (x) Read Pointers (indexes, really!), one for each Tx queue.
379 * For FIFO mode, index indicates next frame to transmit.
380 * For Scheduler-ACK mode, index indicates first frame in Tx window.
381 * Initialized by driver, updated by scheduler.
382 */
383#define IWL49_SCD_QUEUE_RDPTR(x) (IWL49_SCD_START_OFFSET + 0x64 + (x) * 4)
384
385/*
386 * Select which queues work in chain mode (1) vs. not (0).
387 * Use chain mode to build chains of aggregated frames.
388 * Bit fields:
389 * 31-16: Reserved
390 * 15-00: Mode, one bit for each queue -- 1: Chain mode, 0: one-at-a-time
391 * NOTE: If driver sets up queue for chain mode, it should be also set up
392 * Scheduler-ACK mode as well, via SCD_QUEUE_STATUS_BITS(x).
393 */
394#define IWL49_SCD_QUEUECHAIN_SEL (IWL49_SCD_START_OFFSET + 0xd0)
395
396/*
397 * Select which queues interrupt driver when scheduler increments
398 * a queue's read pointer (index).
399 * Bit fields:
400 * 31-16: Reserved
401 * 15-00: Interrupt enable, one bit for each queue -- 1: enabled, 0: disabled
402 * NOTE: This functionality is apparently a no-op; driver relies on interrupts
403 * from Rx queue to read Tx command responses and update Tx queues.
404 */
405#define IWL49_SCD_INTERRUPT_MASK (IWL49_SCD_START_OFFSET + 0xe4)
406
407/*
408 * Queue search status registers. One for each queue.
409 * Sets up queue mode and assigns queue to Tx DMA channel.
410 * Bit fields:
411 * 19-10: Write mask/enable bits for bits 0-9
412 * 9: Driver should init to "0"
413 * 8: Scheduler-ACK mode (1), non-Scheduler-ACK (i.e. FIFO) mode (0).
414 * Driver should init to "1" for aggregation mode, or "0" otherwise.
415 * 7-6: Driver should init to "0"
416 * 5: Window Size Left; indicates whether scheduler can request
417 * another TFD, based on window size, etc. Driver should init
418 * this bit to "1" for aggregation mode, or "0" for non-agg.
419 * 4-1: Tx FIFO to use (range 0-7).
420 * 0: Queue is active (1), not active (0).
421 * Other bits should be written as "0"
422 *
423 * NOTE: If enabling Scheduler-ACK mode, chain mode should also be enabled
424 * via SCD_QUEUECHAIN_SEL.
425 */
426#define IWL49_SCD_QUEUE_STATUS_BITS(x)\
427 (IWL49_SCD_START_OFFSET + 0x104 + (x) * 4)
428
429/* Bit field positions */
430#define IWL49_SCD_QUEUE_STTS_REG_POS_ACTIVE (0)
431#define IWL49_SCD_QUEUE_STTS_REG_POS_TXF (1)
432#define IWL49_SCD_QUEUE_STTS_REG_POS_WSL (5)
433#define IWL49_SCD_QUEUE_STTS_REG_POS_SCD_ACK (8)
434
435/* Write masks */
436#define IWL49_SCD_QUEUE_STTS_REG_POS_SCD_ACT_EN (10)
437#define IWL49_SCD_QUEUE_STTS_REG_MSK (0x0007FC00)
438
439/**
440 * 4965 internal SRAM structures for scheduler, shared with driver ...
441 *
442 * Driver should clear and initialize the following areas after receiving
443 * "Alive" response from 4965 uCode, i.e. after initial
444 * uCode load, or after a uCode load done for error recovery:
445 *
446 * SCD_CONTEXT_DATA_OFFSET (size 128 bytes)
447 * SCD_TX_STTS_BITMAP_OFFSET (size 256 bytes)
448 * SCD_TRANSLATE_TBL_OFFSET (size 32 bytes)
449 *
450 * Driver accesses SRAM via HBUS_TARG_MEM_* registers.
451 * Driver reads base address of this scheduler area from SCD_SRAM_BASE_ADDR.
452 * All OFFSET values must be added to this base address.
453 */
454
455/*
456 * Queue context. One 8-byte entry for each of 16 queues.
457 *
458 * Driver should clear this entire area (size 0x80) to 0 after receiving
459 * "Alive" notification from uCode. Additionally, driver should init
460 * each queue's entry as follows:
461 *
462 * LS Dword bit fields:
463 * 0-06: Max Tx window size for Scheduler-ACK. Driver should init to 64.
464 *
465 * MS Dword bit fields:
466 * 16-22: Frame limit. Driver should init to 10 (0xa).
467 *
468 * Driver should init all other bits to 0.
469 *
470 * Init must be done after driver receives "Alive" response from 4965 uCode,
471 * and when setting up queue for aggregation.
472 */
473#define IWL49_SCD_CONTEXT_DATA_OFFSET 0x380
474#define IWL49_SCD_CONTEXT_QUEUE_OFFSET(x) \
475 (IWL49_SCD_CONTEXT_DATA_OFFSET + ((x) * 8))
476
477#define IWL49_SCD_QUEUE_CTX_REG1_WIN_SIZE_POS (0)
478#define IWL49_SCD_QUEUE_CTX_REG1_WIN_SIZE_MSK (0x0000007F)
479#define IWL49_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS (16)
480#define IWL49_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK (0x007F0000)
481
482/*
483 * Tx Status Bitmap
484 *
485 * Driver should clear this entire area (size 0x100) to 0 after receiving
486 * "Alive" notification from uCode. Area is used only by device itself;
487 * no other support (besides clearing) is required from driver.
488 */
489#define IWL49_SCD_TX_STTS_BITMAP_OFFSET 0x400
490
491/*
492 * RAxTID to queue translation mapping.
493 *
494 * When queue is in Scheduler-ACK mode, frames placed in a that queue must be
495 * for only one combination of receiver address (RA) and traffic ID (TID), i.e.
496 * one QOS priority level destined for one station (for this wireless link,
497 * not final destination). The SCD_TRANSLATE_TABLE area provides 16 16-bit
498 * mappings, one for each of the 16 queues. If queue is not in Scheduler-ACK
499 * mode, the device ignores the mapping value.
500 *
501 * Bit fields, for each 16-bit map:
502 * 15-9: Reserved, set to 0
503 * 8-4: Index into device's station table for recipient station
504 * 3-0: Traffic ID (tid), range 0-15
505 *
506 * Driver should clear this entire area (size 32 bytes) to 0 after receiving
507 * "Alive" notification from uCode. To update a 16-bit map value, driver
508 * must read a dword-aligned value from device SRAM, replace the 16-bit map
509 * value of interest, and write the dword value back into device SRAM.
510 */
511#define IWL49_SCD_TRANSLATE_TBL_OFFSET 0x500
512
513/* Find translation table dword to read/write for given queue */
514#define IWL49_SCD_TRANSLATE_TBL_OFFSET_QUEUE(x) \
515 ((IWL49_SCD_TRANSLATE_TBL_OFFSET + ((x) * 2)) & 0xfffffffc)
516
517#define IWL_SCD_TXFIFO_POS_TID (0)
518#define IWL_SCD_TXFIFO_POS_RA (4)
519#define IWL_SCD_QUEUE_RA_TID_MAP_RATID_MSK (0x01FF)
520
521/*********************** END TX SCHEDULER *************************************/
522
523#endif /* __iwl_legacy_prph_h__ */
diff --git a/drivers/net/wireless/iwlegacy/iwl-rx.c b/drivers/net/wireless/iwlegacy/iwl-rx.c
new file mode 100644
index 000000000000..654cf233a384
--- /dev/null
+++ b/drivers/net/wireless/iwlegacy/iwl-rx.c
@@ -0,0 +1,302 @@
1/******************************************************************************
2 *
3 * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved.
4 *
5 * Portions of this file are derived from the ipw3945 project, as well
6 * as portions of the ieee80211 subsystem header files.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of version 2 of the GNU General Public License as
10 * published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it will be useful, but WITHOUT
13 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 * more details.
16 *
17 * You should have received a copy of the GNU General Public License along with
18 * this program; if not, write to the Free Software Foundation, Inc.,
19 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
20 *
21 * The full GNU General Public License is included in this distribution in the
22 * file called LICENSE.
23 *
24 * Contact Information:
25 * Intel Linux Wireless <ilw@linux.intel.com>
26 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
27 *
28 *****************************************************************************/
29
30#include <linux/etherdevice.h>
31#include <linux/slab.h>
32#include <net/mac80211.h>
33#include <asm/unaligned.h>
34#include "iwl-eeprom.h"
35#include "iwl-dev.h"
36#include "iwl-core.h"
37#include "iwl-sta.h"
38#include "iwl-io.h"
39#include "iwl-helpers.h"
40/************************** RX-FUNCTIONS ****************************/
41/*
42 * Rx theory of operation
43 *
44 * Driver allocates a circular buffer of Receive Buffer Descriptors (RBDs),
45 * each of which point to Receive Buffers to be filled by the NIC. These get
46 * used not only for Rx frames, but for any command response or notification
47 * from the NIC. The driver and NIC manage the Rx buffers by means
48 * of indexes into the circular buffer.
49 *
50 * Rx Queue Indexes
51 * The host/firmware share two index registers for managing the Rx buffers.
52 *
53 * The READ index maps to the first position that the firmware may be writing
54 * to -- the driver can read up to (but not including) this position and get
55 * good data.
56 * The READ index is managed by the firmware once the card is enabled.
57 *
58 * The WRITE index maps to the last position the driver has read from -- the
59 * position preceding WRITE is the last slot the firmware can place a packet.
60 *
61 * The queue is empty (no good data) if WRITE = READ - 1, and is full if
62 * WRITE = READ.
63 *
64 * During initialization, the host sets up the READ queue position to the first
65 * INDEX position, and WRITE to the last (READ - 1 wrapped)
66 *
67 * When the firmware places a packet in a buffer, it will advance the READ index
68 * and fire the RX interrupt. The driver can then query the READ index and
69 * process as many packets as possible, moving the WRITE index forward as it
70 * resets the Rx queue buffers with new memory.
71 *
72 * The management in the driver is as follows:
73 * + A list of pre-allocated SKBs is stored in iwl->rxq->rx_free. When
74 * iwl->rxq->free_count drops to or below RX_LOW_WATERMARK, work is scheduled
75 * to replenish the iwl->rxq->rx_free.
76 * + In iwl_rx_replenish (scheduled) if 'processed' != 'read' then the
77 * iwl->rxq is replenished and the READ INDEX is updated (updating the
78 * 'processed' and 'read' driver indexes as well)
79 * + A received packet is processed and handed to the kernel network stack,
80 * detached from the iwl->rxq. The driver 'processed' index is updated.
81 * + The Host/Firmware iwl->rxq is replenished at tasklet time from the rx_free
82 * list. If there are no allocated buffers in iwl->rxq->rx_free, the READ
83 * INDEX is not incremented and iwl->status(RX_STALLED) is set. If there
84 * were enough free buffers and RX_STALLED is set it is cleared.
85 *
86 *
87 * Driver sequence:
88 *
89 * iwl_legacy_rx_queue_alloc() Allocates rx_free
90 * iwl_rx_replenish() Replenishes rx_free list from rx_used, and calls
91 * iwl_rx_queue_restock
92 * iwl_rx_queue_restock() Moves available buffers from rx_free into Rx
93 * queue, updates firmware pointers, and updates
94 * the WRITE index. If insufficient rx_free buffers
95 * are available, schedules iwl_rx_replenish
96 *
97 * -- enable interrupts --
98 * ISR - iwl_rx() Detach iwl_rx_mem_buffers from pool up to the
99 * READ INDEX, detaching the SKB from the pool.
100 * Moves the packet buffer from queue to rx_used.
101 * Calls iwl_rx_queue_restock to refill any empty
102 * slots.
103 * ...
104 *
105 */
106
107/**
108 * iwl_legacy_rx_queue_space - Return number of free slots available in queue.
109 */
110int iwl_legacy_rx_queue_space(const struct iwl_rx_queue *q)
111{
112 int s = q->read - q->write;
113 if (s <= 0)
114 s += RX_QUEUE_SIZE;
115 /* keep some buffer to not confuse full and empty queue */
116 s -= 2;
117 if (s < 0)
118 s = 0;
119 return s;
120}
121EXPORT_SYMBOL(iwl_legacy_rx_queue_space);
122
123/**
124 * iwl_legacy_rx_queue_update_write_ptr - Update the write pointer for the RX queue
125 */
126void
127iwl_legacy_rx_queue_update_write_ptr(struct iwl_priv *priv,
128 struct iwl_rx_queue *q)
129{
130 unsigned long flags;
131 u32 rx_wrt_ptr_reg = priv->hw_params.rx_wrt_ptr_reg;
132 u32 reg;
133
134 spin_lock_irqsave(&q->lock, flags);
135
136 if (q->need_update == 0)
137 goto exit_unlock;
138
139 /* If power-saving is in use, make sure device is awake */
140 if (test_bit(STATUS_POWER_PMI, &priv->status)) {
141 reg = iwl_read32(priv, CSR_UCODE_DRV_GP1);
142
143 if (reg & CSR_UCODE_DRV_GP1_BIT_MAC_SLEEP) {
144 IWL_DEBUG_INFO(priv,
145 "Rx queue requesting wakeup,"
146 " GP1 = 0x%x\n", reg);
147 iwl_legacy_set_bit(priv, CSR_GP_CNTRL,
148 CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
149 goto exit_unlock;
150 }
151
152 q->write_actual = (q->write & ~0x7);
153 iwl_legacy_write_direct32(priv, rx_wrt_ptr_reg,
154 q->write_actual);
155
156 /* Else device is assumed to be awake */
157 } else {
158 /* Device expects a multiple of 8 */
159 q->write_actual = (q->write & ~0x7);
160 iwl_legacy_write_direct32(priv, rx_wrt_ptr_reg,
161 q->write_actual);
162 }
163
164 q->need_update = 0;
165
166 exit_unlock:
167 spin_unlock_irqrestore(&q->lock, flags);
168}
169EXPORT_SYMBOL(iwl_legacy_rx_queue_update_write_ptr);
170
171int iwl_legacy_rx_queue_alloc(struct iwl_priv *priv)
172{
173 struct iwl_rx_queue *rxq = &priv->rxq;
174 struct device *dev = &priv->pci_dev->dev;
175 int i;
176
177 spin_lock_init(&rxq->lock);
178 INIT_LIST_HEAD(&rxq->rx_free);
179 INIT_LIST_HEAD(&rxq->rx_used);
180
181 /* Alloc the circular buffer of Read Buffer Descriptors (RBDs) */
182 rxq->bd = dma_alloc_coherent(dev, 4 * RX_QUEUE_SIZE, &rxq->bd_dma,
183 GFP_KERNEL);
184 if (!rxq->bd)
185 goto err_bd;
186
187 rxq->rb_stts = dma_alloc_coherent(dev, sizeof(struct iwl_rb_status),
188 &rxq->rb_stts_dma, GFP_KERNEL);
189 if (!rxq->rb_stts)
190 goto err_rb;
191
192 /* Fill the rx_used queue with _all_ of the Rx buffers */
193 for (i = 0; i < RX_FREE_BUFFERS + RX_QUEUE_SIZE; i++)
194 list_add_tail(&rxq->pool[i].list, &rxq->rx_used);
195
196 /* Set us so that we have processed and used all buffers, but have
197 * not restocked the Rx queue with fresh buffers */
198 rxq->read = rxq->write = 0;
199 rxq->write_actual = 0;
200 rxq->free_count = 0;
201 rxq->need_update = 0;
202 return 0;
203
204err_rb:
205 dma_free_coherent(&priv->pci_dev->dev, 4 * RX_QUEUE_SIZE, rxq->bd,
206 rxq->bd_dma);
207err_bd:
208 return -ENOMEM;
209}
210EXPORT_SYMBOL(iwl_legacy_rx_queue_alloc);
211
212
213void iwl_legacy_rx_spectrum_measure_notif(struct iwl_priv *priv,
214 struct iwl_rx_mem_buffer *rxb)
215{
216 struct iwl_rx_packet *pkt = rxb_addr(rxb);
217 struct iwl_spectrum_notification *report = &(pkt->u.spectrum_notif);
218
219 if (!report->state) {
220 IWL_DEBUG_11H(priv,
221 "Spectrum Measure Notification: Start\n");
222 return;
223 }
224
225 memcpy(&priv->measure_report, report, sizeof(*report));
226 priv->measurement_status |= MEASUREMENT_READY;
227}
228EXPORT_SYMBOL(iwl_legacy_rx_spectrum_measure_notif);
229
230void iwl_legacy_recover_from_statistics(struct iwl_priv *priv,
231 struct iwl_rx_packet *pkt)
232{
233 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
234 return;
235 if (iwl_legacy_is_any_associated(priv)) {
236 if (priv->cfg->ops->lib->check_plcp_health) {
237 if (!priv->cfg->ops->lib->check_plcp_health(
238 priv, pkt)) {
239 /*
240 * high plcp error detected
241 * reset Radio
242 */
243 iwl_legacy_force_reset(priv,
244 IWL_RF_RESET, false);
245 }
246 }
247 }
248}
249EXPORT_SYMBOL(iwl_legacy_recover_from_statistics);
250
251/*
252 * returns non-zero if packet should be dropped
253 */
254int iwl_legacy_set_decrypted_flag(struct iwl_priv *priv,
255 struct ieee80211_hdr *hdr,
256 u32 decrypt_res,
257 struct ieee80211_rx_status *stats)
258{
259 u16 fc = le16_to_cpu(hdr->frame_control);
260
261 /*
262 * All contexts have the same setting here due to it being
263 * a module parameter, so OK to check any context.
264 */
265 if (priv->contexts[IWL_RXON_CTX_BSS].active.filter_flags &
266 RXON_FILTER_DIS_DECRYPT_MSK)
267 return 0;
268
269 if (!(fc & IEEE80211_FCTL_PROTECTED))
270 return 0;
271
272 IWL_DEBUG_RX(priv, "decrypt_res:0x%x\n", decrypt_res);
273 switch (decrypt_res & RX_RES_STATUS_SEC_TYPE_MSK) {
274 case RX_RES_STATUS_SEC_TYPE_TKIP:
275 /* The uCode has got a bad phase 1 Key, pushes the packet.
276 * Decryption will be done in SW. */
277 if ((decrypt_res & RX_RES_STATUS_DECRYPT_TYPE_MSK) ==
278 RX_RES_STATUS_BAD_KEY_TTAK)
279 break;
280
281 case RX_RES_STATUS_SEC_TYPE_WEP:
282 if ((decrypt_res & RX_RES_STATUS_DECRYPT_TYPE_MSK) ==
283 RX_RES_STATUS_BAD_ICV_MIC) {
284 /* bad ICV, the packet is destroyed since the
285 * decryption is inplace, drop it */
286 IWL_DEBUG_RX(priv, "Packet destroyed\n");
287 return -1;
288 }
289 case RX_RES_STATUS_SEC_TYPE_CCMP:
290 if ((decrypt_res & RX_RES_STATUS_DECRYPT_TYPE_MSK) ==
291 RX_RES_STATUS_DECRYPT_OK) {
292 IWL_DEBUG_RX(priv, "hw decrypt successfully!!!\n");
293 stats->flag |= RX_FLAG_DECRYPTED;
294 }
295 break;
296
297 default:
298 break;
299 }
300 return 0;
301}
302EXPORT_SYMBOL(iwl_legacy_set_decrypted_flag);
diff --git a/drivers/net/wireless/iwlegacy/iwl-scan.c b/drivers/net/wireless/iwlegacy/iwl-scan.c
new file mode 100644
index 000000000000..60f597f796ca
--- /dev/null
+++ b/drivers/net/wireless/iwlegacy/iwl-scan.c
@@ -0,0 +1,625 @@
1/******************************************************************************
2 *
3 * GPL LICENSE SUMMARY
4 *
5 * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of version 2 of the GNU General Public License as
9 * published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful, but
12 * WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
19 * USA
20 *
21 * The full GNU General Public License is included in this distribution
22 * in the file called LICENSE.GPL.
23 *
24 * Contact Information:
25 * Intel Linux Wireless <ilw@linux.intel.com>
26 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
27 *****************************************************************************/
28#include <linux/slab.h>
29#include <linux/types.h>
30#include <linux/etherdevice.h>
31#include <net/mac80211.h>
32
33#include "iwl-eeprom.h"
34#include "iwl-dev.h"
35#include "iwl-core.h"
36#include "iwl-sta.h"
37#include "iwl-io.h"
38#include "iwl-helpers.h"
39
40/* For active scan, listen ACTIVE_DWELL_TIME (msec) on each channel after
41 * sending probe req. This should be set long enough to hear probe responses
42 * from more than one AP. */
43#define IWL_ACTIVE_DWELL_TIME_24 (30) /* all times in msec */
44#define IWL_ACTIVE_DWELL_TIME_52 (20)
45
46#define IWL_ACTIVE_DWELL_FACTOR_24GHZ (3)
47#define IWL_ACTIVE_DWELL_FACTOR_52GHZ (2)
48
49/* For passive scan, listen PASSIVE_DWELL_TIME (msec) on each channel.
50 * Must be set longer than active dwell time.
51 * For the most reliable scan, set > AP beacon interval (typically 100msec). */
52#define IWL_PASSIVE_DWELL_TIME_24 (20) /* all times in msec */
53#define IWL_PASSIVE_DWELL_TIME_52 (10)
54#define IWL_PASSIVE_DWELL_BASE (100)
55#define IWL_CHANNEL_TUNE_TIME 5
56
57static int iwl_legacy_send_scan_abort(struct iwl_priv *priv)
58{
59 int ret;
60 struct iwl_rx_packet *pkt;
61 struct iwl_host_cmd cmd = {
62 .id = REPLY_SCAN_ABORT_CMD,
63 .flags = CMD_WANT_SKB,
64 };
65
66 /* Exit instantly with error when device is not ready
67 * to receive scan abort command or it does not perform
68 * hardware scan currently */
69 if (!test_bit(STATUS_READY, &priv->status) ||
70 !test_bit(STATUS_GEO_CONFIGURED, &priv->status) ||
71 !test_bit(STATUS_SCAN_HW, &priv->status) ||
72 test_bit(STATUS_FW_ERROR, &priv->status) ||
73 test_bit(STATUS_EXIT_PENDING, &priv->status))
74 return -EIO;
75
76 ret = iwl_legacy_send_cmd_sync(priv, &cmd);
77 if (ret)
78 return ret;
79
80 pkt = (struct iwl_rx_packet *)cmd.reply_page;
81 if (pkt->u.status != CAN_ABORT_STATUS) {
82 /* The scan abort will return 1 for success or
83 * 2 for "failure". A failure condition can be
84 * due to simply not being in an active scan which
85 * can occur if we send the scan abort before we
86 * the microcode has notified us that a scan is
87 * completed. */
88 IWL_DEBUG_SCAN(priv, "SCAN_ABORT ret %d.\n", pkt->u.status);
89 ret = -EIO;
90 }
91
92 iwl_legacy_free_pages(priv, cmd.reply_page);
93 return ret;
94}
95
96static void iwl_legacy_complete_scan(struct iwl_priv *priv, bool aborted)
97{
98 /* check if scan was requested from mac80211 */
99 if (priv->scan_request) {
100 IWL_DEBUG_SCAN(priv, "Complete scan in mac80211\n");
101 ieee80211_scan_completed(priv->hw, aborted);
102 }
103
104 priv->is_internal_short_scan = false;
105 priv->scan_vif = NULL;
106 priv->scan_request = NULL;
107}
108
109void iwl_legacy_force_scan_end(struct iwl_priv *priv)
110{
111 lockdep_assert_held(&priv->mutex);
112
113 if (!test_bit(STATUS_SCANNING, &priv->status)) {
114 IWL_DEBUG_SCAN(priv, "Forcing scan end while not scanning\n");
115 return;
116 }
117
118 IWL_DEBUG_SCAN(priv, "Forcing scan end\n");
119 clear_bit(STATUS_SCANNING, &priv->status);
120 clear_bit(STATUS_SCAN_HW, &priv->status);
121 clear_bit(STATUS_SCAN_ABORTING, &priv->status);
122 iwl_legacy_complete_scan(priv, true);
123}
124
125static void iwl_legacy_do_scan_abort(struct iwl_priv *priv)
126{
127 int ret;
128
129 lockdep_assert_held(&priv->mutex);
130
131 if (!test_bit(STATUS_SCANNING, &priv->status)) {
132 IWL_DEBUG_SCAN(priv, "Not performing scan to abort\n");
133 return;
134 }
135
136 if (test_and_set_bit(STATUS_SCAN_ABORTING, &priv->status)) {
137 IWL_DEBUG_SCAN(priv, "Scan abort in progress\n");
138 return;
139 }
140
141 ret = iwl_legacy_send_scan_abort(priv);
142 if (ret) {
143 IWL_DEBUG_SCAN(priv, "Send scan abort failed %d\n", ret);
144 iwl_legacy_force_scan_end(priv);
145 } else
146 IWL_DEBUG_SCAN(priv, "Sucessfully send scan abort\n");
147}
148
149/**
150 * iwl_scan_cancel - Cancel any currently executing HW scan
151 */
152int iwl_legacy_scan_cancel(struct iwl_priv *priv)
153{
154 IWL_DEBUG_SCAN(priv, "Queuing abort scan\n");
155 queue_work(priv->workqueue, &priv->abort_scan);
156 return 0;
157}
158EXPORT_SYMBOL(iwl_legacy_scan_cancel);
159
160/**
161 * iwl_legacy_scan_cancel_timeout - Cancel any currently executing HW scan
162 * @ms: amount of time to wait (in milliseconds) for scan to abort
163 *
164 */
165int iwl_legacy_scan_cancel_timeout(struct iwl_priv *priv, unsigned long ms)
166{
167 unsigned long timeout = jiffies + msecs_to_jiffies(ms);
168
169 lockdep_assert_held(&priv->mutex);
170
171 IWL_DEBUG_SCAN(priv, "Scan cancel timeout\n");
172
173 iwl_legacy_do_scan_abort(priv);
174
175 while (time_before_eq(jiffies, timeout)) {
176 if (!test_bit(STATUS_SCAN_HW, &priv->status))
177 break;
178 msleep(20);
179 }
180
181 return test_bit(STATUS_SCAN_HW, &priv->status);
182}
183EXPORT_SYMBOL(iwl_legacy_scan_cancel_timeout);
184
185/* Service response to REPLY_SCAN_CMD (0x80) */
186static void iwl_legacy_rx_reply_scan(struct iwl_priv *priv,
187 struct iwl_rx_mem_buffer *rxb)
188{
189#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
190 struct iwl_rx_packet *pkt = rxb_addr(rxb);
191 struct iwl_scanreq_notification *notif =
192 (struct iwl_scanreq_notification *)pkt->u.raw;
193
194 IWL_DEBUG_SCAN(priv, "Scan request status = 0x%x\n", notif->status);
195#endif
196}
197
198/* Service SCAN_START_NOTIFICATION (0x82) */
199static void iwl_legacy_rx_scan_start_notif(struct iwl_priv *priv,
200 struct iwl_rx_mem_buffer *rxb)
201{
202 struct iwl_rx_packet *pkt = rxb_addr(rxb);
203 struct iwl_scanstart_notification *notif =
204 (struct iwl_scanstart_notification *)pkt->u.raw;
205 priv->scan_start_tsf = le32_to_cpu(notif->tsf_low);
206 IWL_DEBUG_SCAN(priv, "Scan start: "
207 "%d [802.11%s] "
208 "(TSF: 0x%08X:%08X) - %d (beacon timer %u)\n",
209 notif->channel,
210 notif->band ? "bg" : "a",
211 le32_to_cpu(notif->tsf_high),
212 le32_to_cpu(notif->tsf_low),
213 notif->status, notif->beacon_timer);
214}
215
216/* Service SCAN_RESULTS_NOTIFICATION (0x83) */
217static void iwl_legacy_rx_scan_results_notif(struct iwl_priv *priv,
218 struct iwl_rx_mem_buffer *rxb)
219{
220#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
221 struct iwl_rx_packet *pkt = rxb_addr(rxb);
222 struct iwl_scanresults_notification *notif =
223 (struct iwl_scanresults_notification *)pkt->u.raw;
224
225 IWL_DEBUG_SCAN(priv, "Scan ch.res: "
226 "%d [802.11%s] "
227 "(TSF: 0x%08X:%08X) - %d "
228 "elapsed=%lu usec\n",
229 notif->channel,
230 notif->band ? "bg" : "a",
231 le32_to_cpu(notif->tsf_high),
232 le32_to_cpu(notif->tsf_low),
233 le32_to_cpu(notif->statistics[0]),
234 le32_to_cpu(notif->tsf_low) - priv->scan_start_tsf);
235#endif
236}
237
238/* Service SCAN_COMPLETE_NOTIFICATION (0x84) */
239static void iwl_legacy_rx_scan_complete_notif(struct iwl_priv *priv,
240 struct iwl_rx_mem_buffer *rxb)
241{
242
243#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
244 struct iwl_rx_packet *pkt = rxb_addr(rxb);
245 struct iwl_scancomplete_notification *scan_notif = (void *)pkt->u.raw;
246#endif
247
248 IWL_DEBUG_SCAN(priv,
249 "Scan complete: %d channels (TSF 0x%08X:%08X) - %d\n",
250 scan_notif->scanned_channels,
251 scan_notif->tsf_low,
252 scan_notif->tsf_high, scan_notif->status);
253
254 /* The HW is no longer scanning */
255 clear_bit(STATUS_SCAN_HW, &priv->status);
256
257 IWL_DEBUG_SCAN(priv, "Scan on %sGHz took %dms\n",
258 (priv->scan_band == IEEE80211_BAND_2GHZ) ? "2.4" : "5.2",
259 jiffies_to_msecs(jiffies - priv->scan_start));
260
261 queue_work(priv->workqueue, &priv->scan_completed);
262}
263
264void iwl_legacy_setup_rx_scan_handlers(struct iwl_priv *priv)
265{
266 /* scan handlers */
267 priv->rx_handlers[REPLY_SCAN_CMD] = iwl_legacy_rx_reply_scan;
268 priv->rx_handlers[SCAN_START_NOTIFICATION] =
269 iwl_legacy_rx_scan_start_notif;
270 priv->rx_handlers[SCAN_RESULTS_NOTIFICATION] =
271 iwl_legacy_rx_scan_results_notif;
272 priv->rx_handlers[SCAN_COMPLETE_NOTIFICATION] =
273 iwl_legacy_rx_scan_complete_notif;
274}
275EXPORT_SYMBOL(iwl_legacy_setup_rx_scan_handlers);
276
277inline u16 iwl_legacy_get_active_dwell_time(struct iwl_priv *priv,
278 enum ieee80211_band band,
279 u8 n_probes)
280{
281 if (band == IEEE80211_BAND_5GHZ)
282 return IWL_ACTIVE_DWELL_TIME_52 +
283 IWL_ACTIVE_DWELL_FACTOR_52GHZ * (n_probes + 1);
284 else
285 return IWL_ACTIVE_DWELL_TIME_24 +
286 IWL_ACTIVE_DWELL_FACTOR_24GHZ * (n_probes + 1);
287}
288EXPORT_SYMBOL(iwl_legacy_get_active_dwell_time);
289
290u16 iwl_legacy_get_passive_dwell_time(struct iwl_priv *priv,
291 enum ieee80211_band band,
292 struct ieee80211_vif *vif)
293{
294 struct iwl_rxon_context *ctx;
295 u16 passive = (band == IEEE80211_BAND_2GHZ) ?
296 IWL_PASSIVE_DWELL_BASE + IWL_PASSIVE_DWELL_TIME_24 :
297 IWL_PASSIVE_DWELL_BASE + IWL_PASSIVE_DWELL_TIME_52;
298
299 if (iwl_legacy_is_any_associated(priv)) {
300 /*
301 * If we're associated, we clamp the maximum passive
302 * dwell time to be 98% of the smallest beacon interval
303 * (minus 2 * channel tune time)
304 */
305 for_each_context(priv, ctx) {
306 u16 value;
307
308 if (!iwl_legacy_is_associated_ctx(ctx))
309 continue;
310 value = ctx->vif ? ctx->vif->bss_conf.beacon_int : 0;
311 if ((value > IWL_PASSIVE_DWELL_BASE) || !value)
312 value = IWL_PASSIVE_DWELL_BASE;
313 value = (value * 98) / 100 - IWL_CHANNEL_TUNE_TIME * 2;
314 passive = min(value, passive);
315 }
316 }
317
318 return passive;
319}
320EXPORT_SYMBOL(iwl_legacy_get_passive_dwell_time);
321
322void iwl_legacy_init_scan_params(struct iwl_priv *priv)
323{
324 u8 ant_idx = fls(priv->hw_params.valid_tx_ant) - 1;
325 if (!priv->scan_tx_ant[IEEE80211_BAND_5GHZ])
326 priv->scan_tx_ant[IEEE80211_BAND_5GHZ] = ant_idx;
327 if (!priv->scan_tx_ant[IEEE80211_BAND_2GHZ])
328 priv->scan_tx_ant[IEEE80211_BAND_2GHZ] = ant_idx;
329}
330EXPORT_SYMBOL(iwl_legacy_init_scan_params);
331
332static int __must_check iwl_legacy_scan_initiate(struct iwl_priv *priv,
333 struct ieee80211_vif *vif,
334 bool internal,
335 enum ieee80211_band band)
336{
337 int ret;
338
339 lockdep_assert_held(&priv->mutex);
340
341 if (WARN_ON(!priv->cfg->ops->utils->request_scan))
342 return -EOPNOTSUPP;
343
344 cancel_delayed_work(&priv->scan_check);
345
346 if (!iwl_legacy_is_ready_rf(priv)) {
347 IWL_WARN(priv, "Request scan called when driver not ready.\n");
348 return -EIO;
349 }
350
351 if (test_bit(STATUS_SCAN_HW, &priv->status)) {
352 IWL_DEBUG_SCAN(priv,
353 "Multiple concurrent scan requests in parallel.\n");
354 return -EBUSY;
355 }
356
357 if (test_bit(STATUS_SCAN_ABORTING, &priv->status)) {
358 IWL_DEBUG_SCAN(priv, "Scan request while abort pending.\n");
359 return -EBUSY;
360 }
361
362 IWL_DEBUG_SCAN(priv, "Starting %sscan...\n",
363 internal ? "internal short " : "");
364
365 set_bit(STATUS_SCANNING, &priv->status);
366 priv->is_internal_short_scan = internal;
367 priv->scan_start = jiffies;
368 priv->scan_band = band;
369
370 ret = priv->cfg->ops->utils->request_scan(priv, vif);
371 if (ret) {
372 clear_bit(STATUS_SCANNING, &priv->status);
373 priv->is_internal_short_scan = false;
374 return ret;
375 }
376
377 queue_delayed_work(priv->workqueue, &priv->scan_check,
378 IWL_SCAN_CHECK_WATCHDOG);
379
380 return 0;
381}
382
383int iwl_legacy_mac_hw_scan(struct ieee80211_hw *hw,
384 struct ieee80211_vif *vif,
385 struct cfg80211_scan_request *req)
386{
387 struct iwl_priv *priv = hw->priv;
388 int ret;
389
390 IWL_DEBUG_MAC80211(priv, "enter\n");
391
392 if (req->n_channels == 0)
393 return -EINVAL;
394
395 mutex_lock(&priv->mutex);
396
397 if (test_bit(STATUS_SCANNING, &priv->status) &&
398 !priv->is_internal_short_scan) {
399 IWL_DEBUG_SCAN(priv, "Scan already in progress.\n");
400 ret = -EAGAIN;
401 goto out_unlock;
402 }
403
404 /* mac80211 will only ask for one band at a time */
405 priv->scan_request = req;
406 priv->scan_vif = vif;
407
408 /*
409 * If an internal scan is in progress, just set
410 * up the scan_request as per above.
411 */
412 if (priv->is_internal_short_scan) {
413 IWL_DEBUG_SCAN(priv, "SCAN request during internal scan\n");
414 ret = 0;
415 } else
416 ret = iwl_legacy_scan_initiate(priv, vif, false,
417 req->channels[0]->band);
418
419 IWL_DEBUG_MAC80211(priv, "leave\n");
420
421out_unlock:
422 mutex_unlock(&priv->mutex);
423
424 return ret;
425}
426EXPORT_SYMBOL(iwl_legacy_mac_hw_scan);
427
428/*
429 * internal short scan, this function should only been called while associated.
430 * It will reset and tune the radio to prevent possible RF related problem
431 */
432void iwl_legacy_internal_short_hw_scan(struct iwl_priv *priv)
433{
434 queue_work(priv->workqueue, &priv->start_internal_scan);
435}
436
437static void iwl_legacy_bg_start_internal_scan(struct work_struct *work)
438{
439 struct iwl_priv *priv =
440 container_of(work, struct iwl_priv, start_internal_scan);
441
442 IWL_DEBUG_SCAN(priv, "Start internal scan\n");
443
444 mutex_lock(&priv->mutex);
445
446 if (priv->is_internal_short_scan == true) {
447 IWL_DEBUG_SCAN(priv, "Internal scan already in progress\n");
448 goto unlock;
449 }
450
451 if (test_bit(STATUS_SCANNING, &priv->status)) {
452 IWL_DEBUG_SCAN(priv, "Scan already in progress.\n");
453 goto unlock;
454 }
455
456 if (iwl_legacy_scan_initiate(priv, NULL, true, priv->band))
457 IWL_DEBUG_SCAN(priv, "failed to start internal short scan\n");
458 unlock:
459 mutex_unlock(&priv->mutex);
460}
461
462static void iwl_legacy_bg_scan_check(struct work_struct *data)
463{
464 struct iwl_priv *priv =
465 container_of(data, struct iwl_priv, scan_check.work);
466
467 IWL_DEBUG_SCAN(priv, "Scan check work\n");
468
469 /* Since we are here firmware does not finish scan and
470 * most likely is in bad shape, so we don't bother to
471 * send abort command, just force scan complete to mac80211 */
472 mutex_lock(&priv->mutex);
473 iwl_legacy_force_scan_end(priv);
474 mutex_unlock(&priv->mutex);
475}
476
477/**
478 * iwl_legacy_fill_probe_req - fill in all required fields and IE for probe request
479 */
480
481u16
482iwl_legacy_fill_probe_req(struct iwl_priv *priv, struct ieee80211_mgmt *frame,
483 const u8 *ta, const u8 *ies, int ie_len, int left)
484{
485 int len = 0;
486 u8 *pos = NULL;
487
488 /* Make sure there is enough space for the probe request,
489 * two mandatory IEs and the data */
490 left -= 24;
491 if (left < 0)
492 return 0;
493
494 frame->frame_control = cpu_to_le16(IEEE80211_STYPE_PROBE_REQ);
495 memcpy(frame->da, iwlegacy_bcast_addr, ETH_ALEN);
496 memcpy(frame->sa, ta, ETH_ALEN);
497 memcpy(frame->bssid, iwlegacy_bcast_addr, ETH_ALEN);
498 frame->seq_ctrl = 0;
499
500 len += 24;
501
502 /* ...next IE... */
503 pos = &frame->u.probe_req.variable[0];
504
505 /* fill in our indirect SSID IE */
506 left -= 2;
507 if (left < 0)
508 return 0;
509 *pos++ = WLAN_EID_SSID;
510 *pos++ = 0;
511
512 len += 2;
513
514 if (WARN_ON(left < ie_len))
515 return len;
516
517 if (ies && ie_len) {
518 memcpy(pos, ies, ie_len);
519 len += ie_len;
520 }
521
522 return (u16)len;
523}
524EXPORT_SYMBOL(iwl_legacy_fill_probe_req);
525
526static void iwl_legacy_bg_abort_scan(struct work_struct *work)
527{
528 struct iwl_priv *priv = container_of(work, struct iwl_priv, abort_scan);
529
530 IWL_DEBUG_SCAN(priv, "Abort scan work\n");
531
532 /* We keep scan_check work queued in case when firmware will not
533 * report back scan completed notification */
534 mutex_lock(&priv->mutex);
535 iwl_legacy_scan_cancel_timeout(priv, 200);
536 mutex_unlock(&priv->mutex);
537}
538
539static void iwl_legacy_bg_scan_completed(struct work_struct *work)
540{
541 struct iwl_priv *priv =
542 container_of(work, struct iwl_priv, scan_completed);
543 bool aborted;
544
545 IWL_DEBUG_SCAN(priv, "Completed %sscan.\n",
546 priv->is_internal_short_scan ? "internal short " : "");
547
548 cancel_delayed_work(&priv->scan_check);
549
550 mutex_lock(&priv->mutex);
551
552 aborted = test_and_clear_bit(STATUS_SCAN_ABORTING, &priv->status);
553 if (aborted)
554 IWL_DEBUG_SCAN(priv, "Aborted scan completed.\n");
555
556 if (!test_and_clear_bit(STATUS_SCANNING, &priv->status)) {
557 IWL_DEBUG_SCAN(priv, "Scan already completed.\n");
558 goto out_settings;
559 }
560
561 if (priv->is_internal_short_scan && !aborted) {
562 int err;
563
564 /* Check if mac80211 requested scan during our internal scan */
565 if (priv->scan_request == NULL)
566 goto out_complete;
567
568 /* If so request a new scan */
569 err = iwl_legacy_scan_initiate(priv, priv->scan_vif, false,
570 priv->scan_request->channels[0]->band);
571 if (err) {
572 IWL_DEBUG_SCAN(priv,
573 "failed to initiate pending scan: %d\n", err);
574 aborted = true;
575 goto out_complete;
576 }
577
578 goto out;
579 }
580
581out_complete:
582 iwl_legacy_complete_scan(priv, aborted);
583
584out_settings:
585 /* Can we still talk to firmware ? */
586 if (!iwl_legacy_is_ready_rf(priv))
587 goto out;
588
589 /*
590 * We do not commit power settings while scan is pending,
591 * do it now if the settings changed.
592 */
593 iwl_legacy_power_set_mode(priv, &priv->power_data.sleep_cmd_next,
594 false);
595 iwl_legacy_set_tx_power(priv, priv->tx_power_next, false);
596
597 priv->cfg->ops->utils->post_scan(priv);
598
599out:
600 mutex_unlock(&priv->mutex);
601}
602
603void iwl_legacy_setup_scan_deferred_work(struct iwl_priv *priv)
604{
605 INIT_WORK(&priv->scan_completed, iwl_legacy_bg_scan_completed);
606 INIT_WORK(&priv->abort_scan, iwl_legacy_bg_abort_scan);
607 INIT_WORK(&priv->start_internal_scan,
608 iwl_legacy_bg_start_internal_scan);
609 INIT_DELAYED_WORK(&priv->scan_check, iwl_legacy_bg_scan_check);
610}
611EXPORT_SYMBOL(iwl_legacy_setup_scan_deferred_work);
612
613void iwl_legacy_cancel_scan_deferred_work(struct iwl_priv *priv)
614{
615 cancel_work_sync(&priv->start_internal_scan);
616 cancel_work_sync(&priv->abort_scan);
617 cancel_work_sync(&priv->scan_completed);
618
619 if (cancel_delayed_work_sync(&priv->scan_check)) {
620 mutex_lock(&priv->mutex);
621 iwl_legacy_force_scan_end(priv);
622 mutex_unlock(&priv->mutex);
623 }
624}
625EXPORT_SYMBOL(iwl_legacy_cancel_scan_deferred_work);
diff --git a/drivers/net/wireless/iwlegacy/iwl-spectrum.h b/drivers/net/wireless/iwlegacy/iwl-spectrum.h
new file mode 100644
index 000000000000..9f70a4723103
--- /dev/null
+++ b/drivers/net/wireless/iwlegacy/iwl-spectrum.h
@@ -0,0 +1,92 @@
1/******************************************************************************
2 *
3 * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved.
4 *
5 * Portions of this file are derived from the ieee80211 subsystem header files.
6 *
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms of version 2 of the GNU General Public License as
9 * published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful, but WITHOUT
12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 * more details.
15 *
16 * You should have received a copy of the GNU General Public License along with
17 * this program; if not, write to the Free Software Foundation, Inc.,
18 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
19 *
20 * The full GNU General Public License is included in this distribution in the
21 * file called LICENSE.
22 *
23 * Contact Information:
24 * Intel Linux Wireless <ilw@linux.intel.com>
25 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
26 *
27 *****************************************************************************/
28
29#ifndef __iwl_legacy_spectrum_h__
30#define __iwl_legacy_spectrum_h__
31enum { /* ieee80211_basic_report.map */
32 IEEE80211_BASIC_MAP_BSS = (1 << 0),
33 IEEE80211_BASIC_MAP_OFDM = (1 << 1),
34 IEEE80211_BASIC_MAP_UNIDENTIFIED = (1 << 2),
35 IEEE80211_BASIC_MAP_RADAR = (1 << 3),
36 IEEE80211_BASIC_MAP_UNMEASURED = (1 << 4),
37 /* Bits 5-7 are reserved */
38
39};
40struct ieee80211_basic_report {
41 u8 channel;
42 __le64 start_time;
43 __le16 duration;
44 u8 map;
45} __packed;
46
47enum { /* ieee80211_measurement_request.mode */
48 /* Bit 0 is reserved */
49 IEEE80211_MEASUREMENT_ENABLE = (1 << 1),
50 IEEE80211_MEASUREMENT_REQUEST = (1 << 2),
51 IEEE80211_MEASUREMENT_REPORT = (1 << 3),
52 /* Bits 4-7 are reserved */
53};
54
55enum {
56 IEEE80211_REPORT_BASIC = 0, /* required */
57 IEEE80211_REPORT_CCA = 1, /* optional */
58 IEEE80211_REPORT_RPI = 2, /* optional */
59 /* 3-255 reserved */
60};
61
62struct ieee80211_measurement_params {
63 u8 channel;
64 __le64 start_time;
65 __le16 duration;
66} __packed;
67
68struct ieee80211_info_element {
69 u8 id;
70 u8 len;
71 u8 data[0];
72} __packed;
73
74struct ieee80211_measurement_request {
75 struct ieee80211_info_element ie;
76 u8 token;
77 u8 mode;
78 u8 type;
79 struct ieee80211_measurement_params params[0];
80} __packed;
81
82struct ieee80211_measurement_report {
83 struct ieee80211_info_element ie;
84 u8 token;
85 u8 mode;
86 u8 type;
87 union {
88 struct ieee80211_basic_report basic[0];
89 } u;
90} __packed;
91
92#endif
diff --git a/drivers/net/wireless/iwlegacy/iwl-sta.c b/drivers/net/wireless/iwlegacy/iwl-sta.c
new file mode 100644
index 000000000000..47c9da3834ea
--- /dev/null
+++ b/drivers/net/wireless/iwlegacy/iwl-sta.c
@@ -0,0 +1,816 @@
1/******************************************************************************
2 *
3 * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved.
4 *
5 * Portions of this file are derived from the ipw3945 project, as well
6 * as portions of the ieee80211 subsystem header files.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of version 2 of the GNU General Public License as
10 * published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it will be useful, but WITHOUT
13 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 * more details.
16 *
17 * You should have received a copy of the GNU General Public License along with
18 * this program; if not, write to the Free Software Foundation, Inc.,
19 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
20 *
21 * The full GNU General Public License is included in this distribution in the
22 * file called LICENSE.
23 *
24 * Contact Information:
25 * Intel Linux Wireless <ilw@linux.intel.com>
26 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
27 *
28 *****************************************************************************/
29
30#include <net/mac80211.h>
31#include <linux/etherdevice.h>
32#include <linux/sched.h>
33#include <linux/lockdep.h>
34
35#include "iwl-dev.h"
36#include "iwl-core.h"
37#include "iwl-sta.h"
38
39/* priv->sta_lock must be held */
40static void iwl_legacy_sta_ucode_activate(struct iwl_priv *priv, u8 sta_id)
41{
42
43 if (!(priv->stations[sta_id].used & IWL_STA_DRIVER_ACTIVE))
44 IWL_ERR(priv,
45 "ACTIVATE a non DRIVER active station id %u addr %pM\n",
46 sta_id, priv->stations[sta_id].sta.sta.addr);
47
48 if (priv->stations[sta_id].used & IWL_STA_UCODE_ACTIVE) {
49 IWL_DEBUG_ASSOC(priv,
50 "STA id %u addr %pM already present"
51 " in uCode (according to driver)\n",
52 sta_id, priv->stations[sta_id].sta.sta.addr);
53 } else {
54 priv->stations[sta_id].used |= IWL_STA_UCODE_ACTIVE;
55 IWL_DEBUG_ASSOC(priv, "Added STA id %u addr %pM to uCode\n",
56 sta_id, priv->stations[sta_id].sta.sta.addr);
57 }
58}
59
60static int iwl_legacy_process_add_sta_resp(struct iwl_priv *priv,
61 struct iwl_legacy_addsta_cmd *addsta,
62 struct iwl_rx_packet *pkt,
63 bool sync)
64{
65 u8 sta_id = addsta->sta.sta_id;
66 unsigned long flags;
67 int ret = -EIO;
68
69 if (pkt->hdr.flags & IWL_CMD_FAILED_MSK) {
70 IWL_ERR(priv, "Bad return from REPLY_ADD_STA (0x%08X)\n",
71 pkt->hdr.flags);
72 return ret;
73 }
74
75 IWL_DEBUG_INFO(priv, "Processing response for adding station %u\n",
76 sta_id);
77
78 spin_lock_irqsave(&priv->sta_lock, flags);
79
80 switch (pkt->u.add_sta.status) {
81 case ADD_STA_SUCCESS_MSK:
82 IWL_DEBUG_INFO(priv, "REPLY_ADD_STA PASSED\n");
83 iwl_legacy_sta_ucode_activate(priv, sta_id);
84 ret = 0;
85 break;
86 case ADD_STA_NO_ROOM_IN_TABLE:
87 IWL_ERR(priv, "Adding station %d failed, no room in table.\n",
88 sta_id);
89 break;
90 case ADD_STA_NO_BLOCK_ACK_RESOURCE:
91 IWL_ERR(priv,
92 "Adding station %d failed, no block ack resource.\n",
93 sta_id);
94 break;
95 case ADD_STA_MODIFY_NON_EXIST_STA:
96 IWL_ERR(priv, "Attempting to modify non-existing station %d\n",
97 sta_id);
98 break;
99 default:
100 IWL_DEBUG_ASSOC(priv, "Received REPLY_ADD_STA:(0x%08X)\n",
101 pkt->u.add_sta.status);
102 break;
103 }
104
105 IWL_DEBUG_INFO(priv, "%s station id %u addr %pM\n",
106 priv->stations[sta_id].sta.mode ==
107 STA_CONTROL_MODIFY_MSK ? "Modified" : "Added",
108 sta_id, priv->stations[sta_id].sta.sta.addr);
109
110 /*
111 * XXX: The MAC address in the command buffer is often changed from
112 * the original sent to the device. That is, the MAC address
113 * written to the command buffer often is not the same MAC adress
114 * read from the command buffer when the command returns. This
115 * issue has not yet been resolved and this debugging is left to
116 * observe the problem.
117 */
118 IWL_DEBUG_INFO(priv, "%s station according to cmd buffer %pM\n",
119 priv->stations[sta_id].sta.mode ==
120 STA_CONTROL_MODIFY_MSK ? "Modified" : "Added",
121 addsta->sta.addr);
122 spin_unlock_irqrestore(&priv->sta_lock, flags);
123
124 return ret;
125}
126
127static void iwl_legacy_add_sta_callback(struct iwl_priv *priv,
128 struct iwl_device_cmd *cmd,
129 struct iwl_rx_packet *pkt)
130{
131 struct iwl_legacy_addsta_cmd *addsta =
132 (struct iwl_legacy_addsta_cmd *)cmd->cmd.payload;
133
134 iwl_legacy_process_add_sta_resp(priv, addsta, pkt, false);
135
136}
137
138int iwl_legacy_send_add_sta(struct iwl_priv *priv,
139 struct iwl_legacy_addsta_cmd *sta, u8 flags)
140{
141 struct iwl_rx_packet *pkt = NULL;
142 int ret = 0;
143 u8 data[sizeof(*sta)];
144 struct iwl_host_cmd cmd = {
145 .id = REPLY_ADD_STA,
146 .flags = flags,
147 .data = data,
148 };
149 u8 sta_id __maybe_unused = sta->sta.sta_id;
150
151 IWL_DEBUG_INFO(priv, "Adding sta %u (%pM) %ssynchronously\n",
152 sta_id, sta->sta.addr, flags & CMD_ASYNC ? "a" : "");
153
154 if (flags & CMD_ASYNC)
155 cmd.callback = iwl_legacy_add_sta_callback;
156 else {
157 cmd.flags |= CMD_WANT_SKB;
158 might_sleep();
159 }
160
161 cmd.len = priv->cfg->ops->utils->build_addsta_hcmd(sta, data);
162 ret = iwl_legacy_send_cmd(priv, &cmd);
163
164 if (ret || (flags & CMD_ASYNC))
165 return ret;
166
167 if (ret == 0) {
168 pkt = (struct iwl_rx_packet *)cmd.reply_page;
169 ret = iwl_legacy_process_add_sta_resp(priv, sta, pkt, true);
170 }
171 iwl_legacy_free_pages(priv, cmd.reply_page);
172
173 return ret;
174}
175EXPORT_SYMBOL(iwl_legacy_send_add_sta);
176
177static void iwl_legacy_set_ht_add_station(struct iwl_priv *priv, u8 index,
178 struct ieee80211_sta *sta,
179 struct iwl_rxon_context *ctx)
180{
181 struct ieee80211_sta_ht_cap *sta_ht_inf = &sta->ht_cap;
182 __le32 sta_flags;
183 u8 mimo_ps_mode;
184
185 if (!sta || !sta_ht_inf->ht_supported)
186 goto done;
187
188 mimo_ps_mode = (sta_ht_inf->cap & IEEE80211_HT_CAP_SM_PS) >> 2;
189 IWL_DEBUG_ASSOC(priv, "spatial multiplexing power save mode: %s\n",
190 (mimo_ps_mode == WLAN_HT_CAP_SM_PS_STATIC) ?
191 "static" :
192 (mimo_ps_mode == WLAN_HT_CAP_SM_PS_DYNAMIC) ?
193 "dynamic" : "disabled");
194
195 sta_flags = priv->stations[index].sta.station_flags;
196
197 sta_flags &= ~(STA_FLG_RTS_MIMO_PROT_MSK | STA_FLG_MIMO_DIS_MSK);
198
199 switch (mimo_ps_mode) {
200 case WLAN_HT_CAP_SM_PS_STATIC:
201 sta_flags |= STA_FLG_MIMO_DIS_MSK;
202 break;
203 case WLAN_HT_CAP_SM_PS_DYNAMIC:
204 sta_flags |= STA_FLG_RTS_MIMO_PROT_MSK;
205 break;
206 case WLAN_HT_CAP_SM_PS_DISABLED:
207 break;
208 default:
209 IWL_WARN(priv, "Invalid MIMO PS mode %d\n", mimo_ps_mode);
210 break;
211 }
212
213 sta_flags |= cpu_to_le32(
214 (u32)sta_ht_inf->ampdu_factor << STA_FLG_MAX_AGG_SIZE_POS);
215
216 sta_flags |= cpu_to_le32(
217 (u32)sta_ht_inf->ampdu_density << STA_FLG_AGG_MPDU_DENSITY_POS);
218
219 if (iwl_legacy_is_ht40_tx_allowed(priv, ctx, &sta->ht_cap))
220 sta_flags |= STA_FLG_HT40_EN_MSK;
221 else
222 sta_flags &= ~STA_FLG_HT40_EN_MSK;
223
224 priv->stations[index].sta.station_flags = sta_flags;
225 done:
226 return;
227}
228
229/**
230 * iwl_legacy_prep_station - Prepare station information for addition
231 *
232 * should be called with sta_lock held
233 */
234u8 iwl_legacy_prep_station(struct iwl_priv *priv, struct iwl_rxon_context *ctx,
235 const u8 *addr, bool is_ap, struct ieee80211_sta *sta)
236{
237 struct iwl_station_entry *station;
238 int i;
239 u8 sta_id = IWL_INVALID_STATION;
240 u16 rate;
241
242 if (is_ap)
243 sta_id = ctx->ap_sta_id;
244 else if (is_broadcast_ether_addr(addr))
245 sta_id = ctx->bcast_sta_id;
246 else
247 for (i = IWL_STA_ID; i < priv->hw_params.max_stations; i++) {
248 if (!compare_ether_addr(priv->stations[i].sta.sta.addr,
249 addr)) {
250 sta_id = i;
251 break;
252 }
253
254 if (!priv->stations[i].used &&
255 sta_id == IWL_INVALID_STATION)
256 sta_id = i;
257 }
258
259 /*
260 * These two conditions have the same outcome, but keep them
261 * separate
262 */
263 if (unlikely(sta_id == IWL_INVALID_STATION))
264 return sta_id;
265
266 /*
267 * uCode is not able to deal with multiple requests to add a
268 * station. Keep track if one is in progress so that we do not send
269 * another.
270 */
271 if (priv->stations[sta_id].used & IWL_STA_UCODE_INPROGRESS) {
272 IWL_DEBUG_INFO(priv,
273 "STA %d already in process of being added.\n",
274 sta_id);
275 return sta_id;
276 }
277
278 if ((priv->stations[sta_id].used & IWL_STA_DRIVER_ACTIVE) &&
279 (priv->stations[sta_id].used & IWL_STA_UCODE_ACTIVE) &&
280 !compare_ether_addr(priv->stations[sta_id].sta.sta.addr, addr)) {
281 IWL_DEBUG_ASSOC(priv,
282 "STA %d (%pM) already added, not adding again.\n",
283 sta_id, addr);
284 return sta_id;
285 }
286
287 station = &priv->stations[sta_id];
288 station->used = IWL_STA_DRIVER_ACTIVE;
289 IWL_DEBUG_ASSOC(priv, "Add STA to driver ID %d: %pM\n",
290 sta_id, addr);
291 priv->num_stations++;
292
293 /* Set up the REPLY_ADD_STA command to send to device */
294 memset(&station->sta, 0, sizeof(struct iwl_legacy_addsta_cmd));
295 memcpy(station->sta.sta.addr, addr, ETH_ALEN);
296 station->sta.mode = 0;
297 station->sta.sta.sta_id = sta_id;
298 station->sta.station_flags = ctx->station_flags;
299 station->ctxid = ctx->ctxid;
300
301 if (sta) {
302 struct iwl_station_priv_common *sta_priv;
303
304 sta_priv = (void *)sta->drv_priv;
305 sta_priv->ctx = ctx;
306 }
307
308 /*
309 * OK to call unconditionally, since local stations (IBSS BSSID
310 * STA and broadcast STA) pass in a NULL sta, and mac80211
311 * doesn't allow HT IBSS.
312 */
313 iwl_legacy_set_ht_add_station(priv, sta_id, sta, ctx);
314
315 /* 3945 only */
316 rate = (priv->band == IEEE80211_BAND_5GHZ) ?
317 IWL_RATE_6M_PLCP : IWL_RATE_1M_PLCP;
318 /* Turn on both antennas for the station... */
319 station->sta.rate_n_flags = cpu_to_le16(rate | RATE_MCS_ANT_AB_MSK);
320
321 return sta_id;
322
323}
324EXPORT_SYMBOL_GPL(iwl_legacy_prep_station);
325
326#define STA_WAIT_TIMEOUT (HZ/2)
327
328/**
329 * iwl_legacy_add_station_common -
330 */
331int
332iwl_legacy_add_station_common(struct iwl_priv *priv,
333 struct iwl_rxon_context *ctx,
334 const u8 *addr, bool is_ap,
335 struct ieee80211_sta *sta, u8 *sta_id_r)
336{
337 unsigned long flags_spin;
338 int ret = 0;
339 u8 sta_id;
340 struct iwl_legacy_addsta_cmd sta_cmd;
341
342 *sta_id_r = 0;
343 spin_lock_irqsave(&priv->sta_lock, flags_spin);
344 sta_id = iwl_legacy_prep_station(priv, ctx, addr, is_ap, sta);
345 if (sta_id == IWL_INVALID_STATION) {
346 IWL_ERR(priv, "Unable to prepare station %pM for addition\n",
347 addr);
348 spin_unlock_irqrestore(&priv->sta_lock, flags_spin);
349 return -EINVAL;
350 }
351
352 /*
353 * uCode is not able to deal with multiple requests to add a
354 * station. Keep track if one is in progress so that we do not send
355 * another.
356 */
357 if (priv->stations[sta_id].used & IWL_STA_UCODE_INPROGRESS) {
358 IWL_DEBUG_INFO(priv,
359 "STA %d already in process of being added.\n",
360 sta_id);
361 spin_unlock_irqrestore(&priv->sta_lock, flags_spin);
362 return -EEXIST;
363 }
364
365 if ((priv->stations[sta_id].used & IWL_STA_DRIVER_ACTIVE) &&
366 (priv->stations[sta_id].used & IWL_STA_UCODE_ACTIVE)) {
367 IWL_DEBUG_ASSOC(priv,
368 "STA %d (%pM) already added, not adding again.\n",
369 sta_id, addr);
370 spin_unlock_irqrestore(&priv->sta_lock, flags_spin);
371 return -EEXIST;
372 }
373
374 priv->stations[sta_id].used |= IWL_STA_UCODE_INPROGRESS;
375 memcpy(&sta_cmd, &priv->stations[sta_id].sta,
376 sizeof(struct iwl_legacy_addsta_cmd));
377 spin_unlock_irqrestore(&priv->sta_lock, flags_spin);
378
379 /* Add station to device's station table */
380 ret = iwl_legacy_send_add_sta(priv, &sta_cmd, CMD_SYNC);
381 if (ret) {
382 spin_lock_irqsave(&priv->sta_lock, flags_spin);
383 IWL_ERR(priv, "Adding station %pM failed.\n",
384 priv->stations[sta_id].sta.sta.addr);
385 priv->stations[sta_id].used &= ~IWL_STA_DRIVER_ACTIVE;
386 priv->stations[sta_id].used &= ~IWL_STA_UCODE_INPROGRESS;
387 spin_unlock_irqrestore(&priv->sta_lock, flags_spin);
388 }
389 *sta_id_r = sta_id;
390 return ret;
391}
392EXPORT_SYMBOL(iwl_legacy_add_station_common);
393
394/**
395 * iwl_legacy_sta_ucode_deactivate - deactivate ucode status for a station
396 *
397 * priv->sta_lock must be held
398 */
399static void iwl_legacy_sta_ucode_deactivate(struct iwl_priv *priv, u8 sta_id)
400{
401 /* Ucode must be active and driver must be non active */
402 if ((priv->stations[sta_id].used &
403 (IWL_STA_UCODE_ACTIVE | IWL_STA_DRIVER_ACTIVE)) !=
404 IWL_STA_UCODE_ACTIVE)
405 IWL_ERR(priv, "removed non active STA %u\n", sta_id);
406
407 priv->stations[sta_id].used &= ~IWL_STA_UCODE_ACTIVE;
408
409 memset(&priv->stations[sta_id], 0, sizeof(struct iwl_station_entry));
410 IWL_DEBUG_ASSOC(priv, "Removed STA %u\n", sta_id);
411}
412
413static int iwl_legacy_send_remove_station(struct iwl_priv *priv,
414 const u8 *addr, int sta_id,
415 bool temporary)
416{
417 struct iwl_rx_packet *pkt;
418 int ret;
419
420 unsigned long flags_spin;
421 struct iwl_rem_sta_cmd rm_sta_cmd;
422
423 struct iwl_host_cmd cmd = {
424 .id = REPLY_REMOVE_STA,
425 .len = sizeof(struct iwl_rem_sta_cmd),
426 .flags = CMD_SYNC,
427 .data = &rm_sta_cmd,
428 };
429
430 memset(&rm_sta_cmd, 0, sizeof(rm_sta_cmd));
431 rm_sta_cmd.num_sta = 1;
432 memcpy(&rm_sta_cmd.addr, addr, ETH_ALEN);
433
434 cmd.flags |= CMD_WANT_SKB;
435
436 ret = iwl_legacy_send_cmd(priv, &cmd);
437
438 if (ret)
439 return ret;
440
441 pkt = (struct iwl_rx_packet *)cmd.reply_page;
442 if (pkt->hdr.flags & IWL_CMD_FAILED_MSK) {
443 IWL_ERR(priv, "Bad return from REPLY_REMOVE_STA (0x%08X)\n",
444 pkt->hdr.flags);
445 ret = -EIO;
446 }
447
448 if (!ret) {
449 switch (pkt->u.rem_sta.status) {
450 case REM_STA_SUCCESS_MSK:
451 if (!temporary) {
452 spin_lock_irqsave(&priv->sta_lock, flags_spin);
453 iwl_legacy_sta_ucode_deactivate(priv, sta_id);
454 spin_unlock_irqrestore(&priv->sta_lock,
455 flags_spin);
456 }
457 IWL_DEBUG_ASSOC(priv, "REPLY_REMOVE_STA PASSED\n");
458 break;
459 default:
460 ret = -EIO;
461 IWL_ERR(priv, "REPLY_REMOVE_STA failed\n");
462 break;
463 }
464 }
465 iwl_legacy_free_pages(priv, cmd.reply_page);
466
467 return ret;
468}
469
470/**
471 * iwl_legacy_remove_station - Remove driver's knowledge of station.
472 */
473int iwl_legacy_remove_station(struct iwl_priv *priv, const u8 sta_id,
474 const u8 *addr)
475{
476 unsigned long flags;
477
478 if (!iwl_legacy_is_ready(priv)) {
479 IWL_DEBUG_INFO(priv,
480 "Unable to remove station %pM, device not ready.\n",
481 addr);
482 /*
483 * It is typical for stations to be removed when we are
484 * going down. Return success since device will be down
485 * soon anyway
486 */
487 return 0;
488 }
489
490 IWL_DEBUG_ASSOC(priv, "Removing STA from driver:%d %pM\n",
491 sta_id, addr);
492
493 if (WARN_ON(sta_id == IWL_INVALID_STATION))
494 return -EINVAL;
495
496 spin_lock_irqsave(&priv->sta_lock, flags);
497
498 if (!(priv->stations[sta_id].used & IWL_STA_DRIVER_ACTIVE)) {
499 IWL_DEBUG_INFO(priv, "Removing %pM but non DRIVER active\n",
500 addr);
501 goto out_err;
502 }
503
504 if (!(priv->stations[sta_id].used & IWL_STA_UCODE_ACTIVE)) {
505 IWL_DEBUG_INFO(priv, "Removing %pM but non UCODE active\n",
506 addr);
507 goto out_err;
508 }
509
510 if (priv->stations[sta_id].used & IWL_STA_LOCAL) {
511 kfree(priv->stations[sta_id].lq);
512 priv->stations[sta_id].lq = NULL;
513 }
514
515 priv->stations[sta_id].used &= ~IWL_STA_DRIVER_ACTIVE;
516
517 priv->num_stations--;
518
519 BUG_ON(priv->num_stations < 0);
520
521 spin_unlock_irqrestore(&priv->sta_lock, flags);
522
523 return iwl_legacy_send_remove_station(priv, addr, sta_id, false);
524out_err:
525 spin_unlock_irqrestore(&priv->sta_lock, flags);
526 return -EINVAL;
527}
528EXPORT_SYMBOL_GPL(iwl_legacy_remove_station);
529
530/**
531 * iwl_legacy_clear_ucode_stations - clear ucode station table bits
532 *
533 * This function clears all the bits in the driver indicating
534 * which stations are active in the ucode. Call when something
535 * other than explicit station management would cause this in
536 * the ucode, e.g. unassociated RXON.
537 */
538void iwl_legacy_clear_ucode_stations(struct iwl_priv *priv,
539 struct iwl_rxon_context *ctx)
540{
541 int i;
542 unsigned long flags_spin;
543 bool cleared = false;
544
545 IWL_DEBUG_INFO(priv, "Clearing ucode stations in driver\n");
546
547 spin_lock_irqsave(&priv->sta_lock, flags_spin);
548 for (i = 0; i < priv->hw_params.max_stations; i++) {
549 if (ctx && ctx->ctxid != priv->stations[i].ctxid)
550 continue;
551
552 if (priv->stations[i].used & IWL_STA_UCODE_ACTIVE) {
553 IWL_DEBUG_INFO(priv,
554 "Clearing ucode active for station %d\n", i);
555 priv->stations[i].used &= ~IWL_STA_UCODE_ACTIVE;
556 cleared = true;
557 }
558 }
559 spin_unlock_irqrestore(&priv->sta_lock, flags_spin);
560
561 if (!cleared)
562 IWL_DEBUG_INFO(priv,
563 "No active stations found to be cleared\n");
564}
565EXPORT_SYMBOL(iwl_legacy_clear_ucode_stations);
566
567/**
568 * iwl_legacy_restore_stations() - Restore driver known stations to device
569 *
570 * All stations considered active by driver, but not present in ucode, is
571 * restored.
572 *
573 * Function sleeps.
574 */
575void
576iwl_legacy_restore_stations(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
577{
578 struct iwl_legacy_addsta_cmd sta_cmd;
579 struct iwl_link_quality_cmd lq;
580 unsigned long flags_spin;
581 int i;
582 bool found = false;
583 int ret;
584 bool send_lq;
585
586 if (!iwl_legacy_is_ready(priv)) {
587 IWL_DEBUG_INFO(priv,
588 "Not ready yet, not restoring any stations.\n");
589 return;
590 }
591
592 IWL_DEBUG_ASSOC(priv, "Restoring all known stations ... start.\n");
593 spin_lock_irqsave(&priv->sta_lock, flags_spin);
594 for (i = 0; i < priv->hw_params.max_stations; i++) {
595 if (ctx->ctxid != priv->stations[i].ctxid)
596 continue;
597 if ((priv->stations[i].used & IWL_STA_DRIVER_ACTIVE) &&
598 !(priv->stations[i].used & IWL_STA_UCODE_ACTIVE)) {
599 IWL_DEBUG_ASSOC(priv, "Restoring sta %pM\n",
600 priv->stations[i].sta.sta.addr);
601 priv->stations[i].sta.mode = 0;
602 priv->stations[i].used |= IWL_STA_UCODE_INPROGRESS;
603 found = true;
604 }
605 }
606
607 for (i = 0; i < priv->hw_params.max_stations; i++) {
608 if ((priv->stations[i].used & IWL_STA_UCODE_INPROGRESS)) {
609 memcpy(&sta_cmd, &priv->stations[i].sta,
610 sizeof(struct iwl_legacy_addsta_cmd));
611 send_lq = false;
612 if (priv->stations[i].lq) {
613 memcpy(&lq, priv->stations[i].lq,
614 sizeof(struct iwl_link_quality_cmd));
615 send_lq = true;
616 }
617 spin_unlock_irqrestore(&priv->sta_lock, flags_spin);
618 ret = iwl_legacy_send_add_sta(priv, &sta_cmd, CMD_SYNC);
619 if (ret) {
620 spin_lock_irqsave(&priv->sta_lock, flags_spin);
621 IWL_ERR(priv, "Adding station %pM failed.\n",
622 priv->stations[i].sta.sta.addr);
623 priv->stations[i].used &=
624 ~IWL_STA_DRIVER_ACTIVE;
625 priv->stations[i].used &=
626 ~IWL_STA_UCODE_INPROGRESS;
627 spin_unlock_irqrestore(&priv->sta_lock,
628 flags_spin);
629 }
630 /*
631 * Rate scaling has already been initialized, send
632 * current LQ command
633 */
634 if (send_lq)
635 iwl_legacy_send_lq_cmd(priv, ctx, &lq,
636 CMD_SYNC, true);
637 spin_lock_irqsave(&priv->sta_lock, flags_spin);
638 priv->stations[i].used &= ~IWL_STA_UCODE_INPROGRESS;
639 }
640 }
641
642 spin_unlock_irqrestore(&priv->sta_lock, flags_spin);
643 if (!found)
644 IWL_DEBUG_INFO(priv, "Restoring all known stations"
645 " .... no stations to be restored.\n");
646 else
647 IWL_DEBUG_INFO(priv, "Restoring all known stations"
648 " .... complete.\n");
649}
650EXPORT_SYMBOL(iwl_legacy_restore_stations);
651
652int iwl_legacy_get_free_ucode_key_index(struct iwl_priv *priv)
653{
654 int i;
655
656 for (i = 0; i < priv->sta_key_max_num; i++)
657 if (!test_and_set_bit(i, &priv->ucode_key_table))
658 return i;
659
660 return WEP_INVALID_OFFSET;
661}
662EXPORT_SYMBOL(iwl_legacy_get_free_ucode_key_index);
663
664void iwl_legacy_dealloc_bcast_stations(struct iwl_priv *priv)
665{
666 unsigned long flags;
667 int i;
668
669 spin_lock_irqsave(&priv->sta_lock, flags);
670 for (i = 0; i < priv->hw_params.max_stations; i++) {
671 if (!(priv->stations[i].used & IWL_STA_BCAST))
672 continue;
673
674 priv->stations[i].used &= ~IWL_STA_UCODE_ACTIVE;
675 priv->num_stations--;
676 BUG_ON(priv->num_stations < 0);
677 kfree(priv->stations[i].lq);
678 priv->stations[i].lq = NULL;
679 }
680 spin_unlock_irqrestore(&priv->sta_lock, flags);
681}
682EXPORT_SYMBOL_GPL(iwl_legacy_dealloc_bcast_stations);
683
684#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
685static void iwl_legacy_dump_lq_cmd(struct iwl_priv *priv,
686 struct iwl_link_quality_cmd *lq)
687{
688 int i;
689 IWL_DEBUG_RATE(priv, "lq station id 0x%x\n", lq->sta_id);
690 IWL_DEBUG_RATE(priv, "lq ant 0x%X 0x%X\n",
691 lq->general_params.single_stream_ant_msk,
692 lq->general_params.dual_stream_ant_msk);
693
694 for (i = 0; i < LINK_QUAL_MAX_RETRY_NUM; i++)
695 IWL_DEBUG_RATE(priv, "lq index %d 0x%X\n",
696 i, lq->rs_table[i].rate_n_flags);
697}
698#else
699static inline void iwl_legacy_dump_lq_cmd(struct iwl_priv *priv,
700 struct iwl_link_quality_cmd *lq)
701{
702}
703#endif
704
705/**
706 * iwl_legacy_is_lq_table_valid() - Test one aspect of LQ cmd for validity
707 *
708 * It sometimes happens when a HT rate has been in use and we
709 * loose connectivity with AP then mac80211 will first tell us that the
710 * current channel is not HT anymore before removing the station. In such a
711 * scenario the RXON flags will be updated to indicate we are not
712 * communicating HT anymore, but the LQ command may still contain HT rates.
713 * Test for this to prevent driver from sending LQ command between the time
714 * RXON flags are updated and when LQ command is updated.
715 */
716static bool iwl_legacy_is_lq_table_valid(struct iwl_priv *priv,
717 struct iwl_rxon_context *ctx,
718 struct iwl_link_quality_cmd *lq)
719{
720 int i;
721
722 if (ctx->ht.enabled)
723 return true;
724
725 IWL_DEBUG_INFO(priv, "Channel %u is not an HT channel\n",
726 ctx->active.channel);
727 for (i = 0; i < LINK_QUAL_MAX_RETRY_NUM; i++) {
728 if (le32_to_cpu(lq->rs_table[i].rate_n_flags) &
729 RATE_MCS_HT_MSK) {
730 IWL_DEBUG_INFO(priv,
731 "index %d of LQ expects HT channel\n",
732 i);
733 return false;
734 }
735 }
736 return true;
737}
738
739/**
740 * iwl_legacy_send_lq_cmd() - Send link quality command
741 * @init: This command is sent as part of station initialization right
742 * after station has been added.
743 *
744 * The link quality command is sent as the last step of station creation.
745 * This is the special case in which init is set and we call a callback in
746 * this case to clear the state indicating that station creation is in
747 * progress.
748 */
749int iwl_legacy_send_lq_cmd(struct iwl_priv *priv, struct iwl_rxon_context *ctx,
750 struct iwl_link_quality_cmd *lq, u8 flags, bool init)
751{
752 int ret = 0;
753 unsigned long flags_spin;
754
755 struct iwl_host_cmd cmd = {
756 .id = REPLY_TX_LINK_QUALITY_CMD,
757 .len = sizeof(struct iwl_link_quality_cmd),
758 .flags = flags,
759 .data = lq,
760 };
761
762 if (WARN_ON(lq->sta_id == IWL_INVALID_STATION))
763 return -EINVAL;
764
765
766 spin_lock_irqsave(&priv->sta_lock, flags_spin);
767 if (!(priv->stations[lq->sta_id].used & IWL_STA_DRIVER_ACTIVE)) {
768 spin_unlock_irqrestore(&priv->sta_lock, flags_spin);
769 return -EINVAL;
770 }
771 spin_unlock_irqrestore(&priv->sta_lock, flags_spin);
772
773 iwl_legacy_dump_lq_cmd(priv, lq);
774 BUG_ON(init && (cmd.flags & CMD_ASYNC));
775
776 if (iwl_legacy_is_lq_table_valid(priv, ctx, lq))
777 ret = iwl_legacy_send_cmd(priv, &cmd);
778 else
779 ret = -EINVAL;
780
781 if (cmd.flags & CMD_ASYNC)
782 return ret;
783
784 if (init) {
785 IWL_DEBUG_INFO(priv, "init LQ command complete,"
786 " clearing sta addition status for sta %d\n",
787 lq->sta_id);
788 spin_lock_irqsave(&priv->sta_lock, flags_spin);
789 priv->stations[lq->sta_id].used &= ~IWL_STA_UCODE_INPROGRESS;
790 spin_unlock_irqrestore(&priv->sta_lock, flags_spin);
791 }
792 return ret;
793}
794EXPORT_SYMBOL(iwl_legacy_send_lq_cmd);
795
796int iwl_legacy_mac_sta_remove(struct ieee80211_hw *hw,
797 struct ieee80211_vif *vif,
798 struct ieee80211_sta *sta)
799{
800 struct iwl_priv *priv = hw->priv;
801 struct iwl_station_priv_common *sta_common = (void *)sta->drv_priv;
802 int ret;
803
804 IWL_DEBUG_INFO(priv, "received request to remove station %pM\n",
805 sta->addr);
806 mutex_lock(&priv->mutex);
807 IWL_DEBUG_INFO(priv, "proceeding to remove station %pM\n",
808 sta->addr);
809 ret = iwl_legacy_remove_station(priv, sta_common->sta_id, sta->addr);
810 if (ret)
811 IWL_ERR(priv, "Error removing station %pM\n",
812 sta->addr);
813 mutex_unlock(&priv->mutex);
814 return ret;
815}
816EXPORT_SYMBOL(iwl_legacy_mac_sta_remove);
diff --git a/drivers/net/wireless/iwlegacy/iwl-sta.h b/drivers/net/wireless/iwlegacy/iwl-sta.h
new file mode 100644
index 000000000000..67bd75fe01a1
--- /dev/null
+++ b/drivers/net/wireless/iwlegacy/iwl-sta.h
@@ -0,0 +1,148 @@
1/******************************************************************************
2 *
3 * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved.
4 *
5 * Portions of this file are derived from the ipw3945 project, as well
6 * as portions of the ieee80211 subsystem header files.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of version 2 of the GNU General Public License as
10 * published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it will be useful, but WITHOUT
13 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 * more details.
16 *
17 * You should have received a copy of the GNU General Public License along with
18 * this program; if not, write to the Free Software Foundation, Inc.,
19 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
20 *
21 * The full GNU General Public License is included in this distribution in the
22 * file called LICENSE.
23 *
24 * Contact Information:
25 * Intel Linux Wireless <ilw@linux.intel.com>
26 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
27 *
28 *****************************************************************************/
29#ifndef __iwl_legacy_sta_h__
30#define __iwl_legacy_sta_h__
31
32#include "iwl-dev.h"
33
34#define HW_KEY_DYNAMIC 0
35#define HW_KEY_DEFAULT 1
36
37#define IWL_STA_DRIVER_ACTIVE BIT(0) /* driver entry is active */
38#define IWL_STA_UCODE_ACTIVE BIT(1) /* ucode entry is active */
39#define IWL_STA_UCODE_INPROGRESS BIT(2) /* ucode entry is in process of
40 being activated */
41#define IWL_STA_LOCAL BIT(3) /* station state not directed by mac80211;
42 (this is for the IBSS BSSID stations) */
43#define IWL_STA_BCAST BIT(4) /* this station is the special bcast station */
44
45
46void iwl_legacy_restore_stations(struct iwl_priv *priv,
47 struct iwl_rxon_context *ctx);
48void iwl_legacy_clear_ucode_stations(struct iwl_priv *priv,
49 struct iwl_rxon_context *ctx);
50void iwl_legacy_dealloc_bcast_stations(struct iwl_priv *priv);
51int iwl_legacy_get_free_ucode_key_index(struct iwl_priv *priv);
52int iwl_legacy_send_add_sta(struct iwl_priv *priv,
53 struct iwl_legacy_addsta_cmd *sta, u8 flags);
54int iwl_legacy_add_station_common(struct iwl_priv *priv,
55 struct iwl_rxon_context *ctx,
56 const u8 *addr, bool is_ap,
57 struct ieee80211_sta *sta, u8 *sta_id_r);
58int iwl_legacy_remove_station(struct iwl_priv *priv,
59 const u8 sta_id,
60 const u8 *addr);
61int iwl_legacy_mac_sta_remove(struct ieee80211_hw *hw,
62 struct ieee80211_vif *vif,
63 struct ieee80211_sta *sta);
64
65u8 iwl_legacy_prep_station(struct iwl_priv *priv,
66 struct iwl_rxon_context *ctx,
67 const u8 *addr, bool is_ap,
68 struct ieee80211_sta *sta);
69
70int iwl_legacy_send_lq_cmd(struct iwl_priv *priv,
71 struct iwl_rxon_context *ctx,
72 struct iwl_link_quality_cmd *lq,
73 u8 flags, bool init);
74
75/**
76 * iwl_legacy_clear_driver_stations - clear knowledge of all stations from driver
77 * @priv: iwl priv struct
78 *
79 * This is called during iwl_down() to make sure that in the case
80 * we're coming there from a hardware restart mac80211 will be
81 * able to reconfigure stations -- if we're getting there in the
82 * normal down flow then the stations will already be cleared.
83 */
84static inline void iwl_legacy_clear_driver_stations(struct iwl_priv *priv)
85{
86 unsigned long flags;
87 struct iwl_rxon_context *ctx;
88
89 spin_lock_irqsave(&priv->sta_lock, flags);
90 memset(priv->stations, 0, sizeof(priv->stations));
91 priv->num_stations = 0;
92
93 priv->ucode_key_table = 0;
94
95 for_each_context(priv, ctx) {
96 /*
97 * Remove all key information that is not stored as part
98 * of station information since mac80211 may not have had
99 * a chance to remove all the keys. When device is
100 * reconfigured by mac80211 after an error all keys will
101 * be reconfigured.
102 */
103 memset(ctx->wep_keys, 0, sizeof(ctx->wep_keys));
104 ctx->key_mapping_keys = 0;
105 }
106
107 spin_unlock_irqrestore(&priv->sta_lock, flags);
108}
109
110static inline int iwl_legacy_sta_id(struct ieee80211_sta *sta)
111{
112 if (WARN_ON(!sta))
113 return IWL_INVALID_STATION;
114
115 return ((struct iwl_station_priv_common *)sta->drv_priv)->sta_id;
116}
117
118/**
119 * iwl_legacy_sta_id_or_broadcast - return sta_id or broadcast sta
120 * @priv: iwl priv
121 * @context: the current context
122 * @sta: mac80211 station
123 *
124 * In certain circumstances mac80211 passes a station pointer
125 * that may be %NULL, for example during TX or key setup. In
126 * that case, we need to use the broadcast station, so this
127 * inline wraps that pattern.
128 */
129static inline int iwl_legacy_sta_id_or_broadcast(struct iwl_priv *priv,
130 struct iwl_rxon_context *context,
131 struct ieee80211_sta *sta)
132{
133 int sta_id;
134
135 if (!sta)
136 return context->bcast_sta_id;
137
138 sta_id = iwl_legacy_sta_id(sta);
139
140 /*
141 * mac80211 should not be passing a partially
142 * initialised station!
143 */
144 WARN_ON(sta_id == IWL_INVALID_STATION);
145
146 return sta_id;
147}
148#endif /* __iwl_legacy_sta_h__ */
diff --git a/drivers/net/wireless/iwlegacy/iwl-tx.c b/drivers/net/wireless/iwlegacy/iwl-tx.c
new file mode 100644
index 000000000000..a227773cb384
--- /dev/null
+++ b/drivers/net/wireless/iwlegacy/iwl-tx.c
@@ -0,0 +1,660 @@
1/******************************************************************************
2 *
3 * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved.
4 *
5 * Portions of this file are derived from the ipw3945 project, as well
6 * as portions of the ieee80211 subsystem header files.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of version 2 of the GNU General Public License as
10 * published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it will be useful, but WITHOUT
13 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 * more details.
16 *
17 * You should have received a copy of the GNU General Public License along with
18 * this program; if not, write to the Free Software Foundation, Inc.,
19 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
20 *
21 * The full GNU General Public License is included in this distribution in the
22 * file called LICENSE.
23 *
24 * Contact Information:
25 * Intel Linux Wireless <ilw@linux.intel.com>
26 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
27 *
28 *****************************************************************************/
29
30#include <linux/etherdevice.h>
31#include <linux/sched.h>
32#include <linux/slab.h>
33#include <net/mac80211.h>
34#include "iwl-eeprom.h"
35#include "iwl-dev.h"
36#include "iwl-core.h"
37#include "iwl-sta.h"
38#include "iwl-io.h"
39#include "iwl-helpers.h"
40
41/**
42 * iwl_legacy_txq_update_write_ptr - Send new write index to hardware
43 */
44void
45iwl_legacy_txq_update_write_ptr(struct iwl_priv *priv, struct iwl_tx_queue *txq)
46{
47 u32 reg = 0;
48 int txq_id = txq->q.id;
49
50 if (txq->need_update == 0)
51 return;
52
53 /* if we're trying to save power */
54 if (test_bit(STATUS_POWER_PMI, &priv->status)) {
55 /* wake up nic if it's powered down ...
56 * uCode will wake up, and interrupt us again, so next
57 * time we'll skip this part. */
58 reg = iwl_read32(priv, CSR_UCODE_DRV_GP1);
59
60 if (reg & CSR_UCODE_DRV_GP1_BIT_MAC_SLEEP) {
61 IWL_DEBUG_INFO(priv,
62 "Tx queue %d requesting wakeup,"
63 " GP1 = 0x%x\n", txq_id, reg);
64 iwl_legacy_set_bit(priv, CSR_GP_CNTRL,
65 CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
66 return;
67 }
68
69 iwl_legacy_write_direct32(priv, HBUS_TARG_WRPTR,
70 txq->q.write_ptr | (txq_id << 8));
71
72 /*
73 * else not in power-save mode,
74 * uCode will never sleep when we're
75 * trying to tx (during RFKILL, we're not trying to tx).
76 */
77 } else
78 iwl_write32(priv, HBUS_TARG_WRPTR,
79 txq->q.write_ptr | (txq_id << 8));
80 txq->need_update = 0;
81}
82EXPORT_SYMBOL(iwl_legacy_txq_update_write_ptr);
83
84/**
85 * iwl_legacy_tx_queue_unmap - Unmap any remaining DMA mappings and free skb's
86 */
87void iwl_legacy_tx_queue_unmap(struct iwl_priv *priv, int txq_id)
88{
89 struct iwl_tx_queue *txq = &priv->txq[txq_id];
90 struct iwl_queue *q = &txq->q;
91
92 if (q->n_bd == 0)
93 return;
94
95 while (q->write_ptr != q->read_ptr) {
96 priv->cfg->ops->lib->txq_free_tfd(priv, txq);
97 q->read_ptr = iwl_legacy_queue_inc_wrap(q->read_ptr, q->n_bd);
98 }
99}
100EXPORT_SYMBOL(iwl_legacy_tx_queue_unmap);
101
102/**
103 * iwl_legacy_tx_queue_free - Deallocate DMA queue.
104 * @txq: Transmit queue to deallocate.
105 *
106 * Empty queue by removing and destroying all BD's.
107 * Free all buffers.
108 * 0-fill, but do not free "txq" descriptor structure.
109 */
110void iwl_legacy_tx_queue_free(struct iwl_priv *priv, int txq_id)
111{
112 struct iwl_tx_queue *txq = &priv->txq[txq_id];
113 struct device *dev = &priv->pci_dev->dev;
114 int i;
115
116 iwl_legacy_tx_queue_unmap(priv, txq_id);
117
118 /* De-alloc array of command/tx buffers */
119 for (i = 0; i < TFD_TX_CMD_SLOTS; i++)
120 kfree(txq->cmd[i]);
121
122 /* De-alloc circular buffer of TFDs */
123 if (txq->q.n_bd)
124 dma_free_coherent(dev, priv->hw_params.tfd_size *
125 txq->q.n_bd, txq->tfds, txq->q.dma_addr);
126
127 /* De-alloc array of per-TFD driver data */
128 kfree(txq->txb);
129 txq->txb = NULL;
130
131 /* deallocate arrays */
132 kfree(txq->cmd);
133 kfree(txq->meta);
134 txq->cmd = NULL;
135 txq->meta = NULL;
136
137 /* 0-fill queue descriptor structure */
138 memset(txq, 0, sizeof(*txq));
139}
140EXPORT_SYMBOL(iwl_legacy_tx_queue_free);
141
142/**
143 * iwl_cmd_queue_unmap - Unmap any remaining DMA mappings from command queue
144 */
145void iwl_legacy_cmd_queue_unmap(struct iwl_priv *priv)
146{
147 struct iwl_tx_queue *txq = &priv->txq[priv->cmd_queue];
148 struct iwl_queue *q = &txq->q;
149 bool huge = false;
150 int i;
151
152 if (q->n_bd == 0)
153 return;
154
155 while (q->read_ptr != q->write_ptr) {
156 /* we have no way to tell if it is a huge cmd ATM */
157 i = iwl_legacy_get_cmd_index(q, q->read_ptr, 0);
158
159 if (txq->meta[i].flags & CMD_SIZE_HUGE)
160 huge = true;
161 else
162 pci_unmap_single(priv->pci_dev,
163 dma_unmap_addr(&txq->meta[i], mapping),
164 dma_unmap_len(&txq->meta[i], len),
165 PCI_DMA_BIDIRECTIONAL);
166
167 q->read_ptr = iwl_legacy_queue_inc_wrap(q->read_ptr, q->n_bd);
168 }
169
170 if (huge) {
171 i = q->n_window;
172 pci_unmap_single(priv->pci_dev,
173 dma_unmap_addr(&txq->meta[i], mapping),
174 dma_unmap_len(&txq->meta[i], len),
175 PCI_DMA_BIDIRECTIONAL);
176 }
177}
178EXPORT_SYMBOL(iwl_legacy_cmd_queue_unmap);
179
180/**
181 * iwl_legacy_cmd_queue_free - Deallocate DMA queue.
182 * @txq: Transmit queue to deallocate.
183 *
184 * Empty queue by removing and destroying all BD's.
185 * Free all buffers.
186 * 0-fill, but do not free "txq" descriptor structure.
187 */
188void iwl_legacy_cmd_queue_free(struct iwl_priv *priv)
189{
190 struct iwl_tx_queue *txq = &priv->txq[priv->cmd_queue];
191 struct device *dev = &priv->pci_dev->dev;
192 int i;
193
194 iwl_legacy_cmd_queue_unmap(priv);
195
196 /* De-alloc array of command/tx buffers */
197 for (i = 0; i <= TFD_CMD_SLOTS; i++)
198 kfree(txq->cmd[i]);
199
200 /* De-alloc circular buffer of TFDs */
201 if (txq->q.n_bd)
202 dma_free_coherent(dev, priv->hw_params.tfd_size * txq->q.n_bd,
203 txq->tfds, txq->q.dma_addr);
204
205 /* deallocate arrays */
206 kfree(txq->cmd);
207 kfree(txq->meta);
208 txq->cmd = NULL;
209 txq->meta = NULL;
210
211 /* 0-fill queue descriptor structure */
212 memset(txq, 0, sizeof(*txq));
213}
214EXPORT_SYMBOL(iwl_legacy_cmd_queue_free);
215
216/*************** DMA-QUEUE-GENERAL-FUNCTIONS *****
217 * DMA services
218 *
219 * Theory of operation
220 *
221 * A Tx or Rx queue resides in host DRAM, and is comprised of a circular buffer
222 * of buffer descriptors, each of which points to one or more data buffers for
223 * the device to read from or fill. Driver and device exchange status of each
224 * queue via "read" and "write" pointers. Driver keeps minimum of 2 empty
225 * entries in each circular buffer, to protect against confusing empty and full
226 * queue states.
227 *
228 * The device reads or writes the data in the queues via the device's several
229 * DMA/FIFO channels. Each queue is mapped to a single DMA channel.
230 *
231 * For Tx queue, there are low mark and high mark limits. If, after queuing
232 * the packet for Tx, free space become < low mark, Tx queue stopped. When
233 * reclaiming packets (on 'tx done IRQ), if free space become > high mark,
234 * Tx queue resumed.
235 *
236 * See more detailed info in iwl-4965-hw.h.
237 ***************************************************/
238
239int iwl_legacy_queue_space(const struct iwl_queue *q)
240{
241 int s = q->read_ptr - q->write_ptr;
242
243 if (q->read_ptr > q->write_ptr)
244 s -= q->n_bd;
245
246 if (s <= 0)
247 s += q->n_window;
248 /* keep some reserve to not confuse empty and full situations */
249 s -= 2;
250 if (s < 0)
251 s = 0;
252 return s;
253}
254EXPORT_SYMBOL(iwl_legacy_queue_space);
255
256
257/**
258 * iwl_legacy_queue_init - Initialize queue's high/low-water and read/write indexes
259 */
260static int iwl_legacy_queue_init(struct iwl_priv *priv, struct iwl_queue *q,
261 int count, int slots_num, u32 id)
262{
263 q->n_bd = count;
264 q->n_window = slots_num;
265 q->id = id;
266
267 /* count must be power-of-two size, otherwise iwl_legacy_queue_inc_wrap
268 * and iwl_legacy_queue_dec_wrap are broken. */
269 BUG_ON(!is_power_of_2(count));
270
271 /* slots_num must be power-of-two size, otherwise
272 * iwl_legacy_get_cmd_index is broken. */
273 BUG_ON(!is_power_of_2(slots_num));
274
275 q->low_mark = q->n_window / 4;
276 if (q->low_mark < 4)
277 q->low_mark = 4;
278
279 q->high_mark = q->n_window / 8;
280 if (q->high_mark < 2)
281 q->high_mark = 2;
282
283 q->write_ptr = q->read_ptr = 0;
284
285 return 0;
286}
287
288/**
289 * iwl_legacy_tx_queue_alloc - Alloc driver data and TFD CB for one Tx/cmd queue
290 */
291static int iwl_legacy_tx_queue_alloc(struct iwl_priv *priv,
292 struct iwl_tx_queue *txq, u32 id)
293{
294 struct device *dev = &priv->pci_dev->dev;
295 size_t tfd_sz = priv->hw_params.tfd_size * TFD_QUEUE_SIZE_MAX;
296
297 /* Driver private data, only for Tx (not command) queues,
298 * not shared with device. */
299 if (id != priv->cmd_queue) {
300 txq->txb = kzalloc(sizeof(txq->txb[0]) *
301 TFD_QUEUE_SIZE_MAX, GFP_KERNEL);
302 if (!txq->txb) {
303 IWL_ERR(priv, "kmalloc for auxiliary BD "
304 "structures failed\n");
305 goto error;
306 }
307 } else {
308 txq->txb = NULL;
309 }
310
311 /* Circular buffer of transmit frame descriptors (TFDs),
312 * shared with device */
313 txq->tfds = dma_alloc_coherent(dev, tfd_sz, &txq->q.dma_addr,
314 GFP_KERNEL);
315 if (!txq->tfds) {
316 IWL_ERR(priv, "pci_alloc_consistent(%zd) failed\n", tfd_sz);
317 goto error;
318 }
319 txq->q.id = id;
320
321 return 0;
322
323 error:
324 kfree(txq->txb);
325 txq->txb = NULL;
326
327 return -ENOMEM;
328}
329
330/**
331 * iwl_legacy_tx_queue_init - Allocate and initialize one tx/cmd queue
332 */
333int iwl_legacy_tx_queue_init(struct iwl_priv *priv, struct iwl_tx_queue *txq,
334 int slots_num, u32 txq_id)
335{
336 int i, len;
337 int ret;
338 int actual_slots = slots_num;
339
340 /*
341 * Alloc buffer array for commands (Tx or other types of commands).
342 * For the command queue (#4/#9), allocate command space + one big
343 * command for scan, since scan command is very huge; the system will
344 * not have two scans at the same time, so only one is needed.
345 * For normal Tx queues (all other queues), no super-size command
346 * space is needed.
347 */
348 if (txq_id == priv->cmd_queue)
349 actual_slots++;
350
351 txq->meta = kzalloc(sizeof(struct iwl_cmd_meta) * actual_slots,
352 GFP_KERNEL);
353 txq->cmd = kzalloc(sizeof(struct iwl_device_cmd *) * actual_slots,
354 GFP_KERNEL);
355
356 if (!txq->meta || !txq->cmd)
357 goto out_free_arrays;
358
359 len = sizeof(struct iwl_device_cmd);
360 for (i = 0; i < actual_slots; i++) {
361 /* only happens for cmd queue */
362 if (i == slots_num)
363 len = IWL_MAX_CMD_SIZE;
364
365 txq->cmd[i] = kmalloc(len, GFP_KERNEL);
366 if (!txq->cmd[i])
367 goto err;
368 }
369
370 /* Alloc driver data array and TFD circular buffer */
371 ret = iwl_legacy_tx_queue_alloc(priv, txq, txq_id);
372 if (ret)
373 goto err;
374
375 txq->need_update = 0;
376
377 /*
378 * For the default queues 0-3, set up the swq_id
379 * already -- all others need to get one later
380 * (if they need one at all).
381 */
382 if (txq_id < 4)
383 iwl_legacy_set_swq_id(txq, txq_id, txq_id);
384
385 /* TFD_QUEUE_SIZE_MAX must be power-of-two size, otherwise
386 * iwl_legacy_queue_inc_wrap and iwl_legacy_queue_dec_wrap are broken. */
387 BUILD_BUG_ON(TFD_QUEUE_SIZE_MAX & (TFD_QUEUE_SIZE_MAX - 1));
388
389 /* Initialize queue's high/low-water marks, and head/tail indexes */
390 iwl_legacy_queue_init(priv, &txq->q,
391 TFD_QUEUE_SIZE_MAX, slots_num, txq_id);
392
393 /* Tell device where to find queue */
394 priv->cfg->ops->lib->txq_init(priv, txq);
395
396 return 0;
397err:
398 for (i = 0; i < actual_slots; i++)
399 kfree(txq->cmd[i]);
400out_free_arrays:
401 kfree(txq->meta);
402 kfree(txq->cmd);
403
404 return -ENOMEM;
405}
406EXPORT_SYMBOL(iwl_legacy_tx_queue_init);
407
408void iwl_legacy_tx_queue_reset(struct iwl_priv *priv, struct iwl_tx_queue *txq,
409 int slots_num, u32 txq_id)
410{
411 int actual_slots = slots_num;
412
413 if (txq_id == priv->cmd_queue)
414 actual_slots++;
415
416 memset(txq->meta, 0, sizeof(struct iwl_cmd_meta) * actual_slots);
417
418 txq->need_update = 0;
419
420 /* Initialize queue's high/low-water marks, and head/tail indexes */
421 iwl_legacy_queue_init(priv, &txq->q,
422 TFD_QUEUE_SIZE_MAX, slots_num, txq_id);
423
424 /* Tell device where to find queue */
425 priv->cfg->ops->lib->txq_init(priv, txq);
426}
427EXPORT_SYMBOL(iwl_legacy_tx_queue_reset);
428
429/*************** HOST COMMAND QUEUE FUNCTIONS *****/
430
431/**
432 * iwl_legacy_enqueue_hcmd - enqueue a uCode command
433 * @priv: device private data point
434 * @cmd: a point to the ucode command structure
435 *
436 * The function returns < 0 values to indicate the operation is
437 * failed. On success, it turns the index (> 0) of command in the
438 * command queue.
439 */
440int iwl_legacy_enqueue_hcmd(struct iwl_priv *priv, struct iwl_host_cmd *cmd)
441{
442 struct iwl_tx_queue *txq = &priv->txq[priv->cmd_queue];
443 struct iwl_queue *q = &txq->q;
444 struct iwl_device_cmd *out_cmd;
445 struct iwl_cmd_meta *out_meta;
446 dma_addr_t phys_addr;
447 unsigned long flags;
448 int len;
449 u32 idx;
450 u16 fix_size;
451
452 cmd->len = priv->cfg->ops->utils->get_hcmd_size(cmd->id, cmd->len);
453 fix_size = (u16)(cmd->len + sizeof(out_cmd->hdr));
454
455 /* If any of the command structures end up being larger than
456 * the TFD_MAX_PAYLOAD_SIZE, and it sent as a 'small' command then
457 * we will need to increase the size of the TFD entries
458 * Also, check to see if command buffer should not exceed the size
459 * of device_cmd and max_cmd_size. */
460 BUG_ON((fix_size > TFD_MAX_PAYLOAD_SIZE) &&
461 !(cmd->flags & CMD_SIZE_HUGE));
462 BUG_ON(fix_size > IWL_MAX_CMD_SIZE);
463
464 if (iwl_legacy_is_rfkill(priv) || iwl_legacy_is_ctkill(priv)) {
465 IWL_WARN(priv, "Not sending command - %s KILL\n",
466 iwl_legacy_is_rfkill(priv) ? "RF" : "CT");
467 return -EIO;
468 }
469
470 if (iwl_legacy_queue_space(q) < ((cmd->flags & CMD_ASYNC) ? 2 : 1)) {
471 IWL_ERR(priv, "No space in command queue\n");
472 IWL_ERR(priv, "Restarting adapter due to queue full\n");
473 queue_work(priv->workqueue, &priv->restart);
474 return -ENOSPC;
475 }
476
477 spin_lock_irqsave(&priv->hcmd_lock, flags);
478
479 /* If this is a huge cmd, mark the huge flag also on the meta.flags
480 * of the _original_ cmd. This is used for DMA mapping clean up.
481 */
482 if (cmd->flags & CMD_SIZE_HUGE) {
483 idx = iwl_legacy_get_cmd_index(q, q->write_ptr, 0);
484 txq->meta[idx].flags = CMD_SIZE_HUGE;
485 }
486
487 idx = iwl_legacy_get_cmd_index(q, q->write_ptr, cmd->flags & CMD_SIZE_HUGE);
488 out_cmd = txq->cmd[idx];
489 out_meta = &txq->meta[idx];
490
491 memset(out_meta, 0, sizeof(*out_meta)); /* re-initialize to NULL */
492 out_meta->flags = cmd->flags;
493 if (cmd->flags & CMD_WANT_SKB)
494 out_meta->source = cmd;
495 if (cmd->flags & CMD_ASYNC)
496 out_meta->callback = cmd->callback;
497
498 out_cmd->hdr.cmd = cmd->id;
499 memcpy(&out_cmd->cmd.payload, cmd->data, cmd->len);
500
501 /* At this point, the out_cmd now has all of the incoming cmd
502 * information */
503
504 out_cmd->hdr.flags = 0;
505 out_cmd->hdr.sequence = cpu_to_le16(QUEUE_TO_SEQ(priv->cmd_queue) |
506 INDEX_TO_SEQ(q->write_ptr));
507 if (cmd->flags & CMD_SIZE_HUGE)
508 out_cmd->hdr.sequence |= SEQ_HUGE_FRAME;
509 len = sizeof(struct iwl_device_cmd);
510 if (idx == TFD_CMD_SLOTS)
511 len = IWL_MAX_CMD_SIZE;
512
513#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
514 switch (out_cmd->hdr.cmd) {
515 case REPLY_TX_LINK_QUALITY_CMD:
516 case SENSITIVITY_CMD:
517 IWL_DEBUG_HC_DUMP(priv,
518 "Sending command %s (#%x), seq: 0x%04X, "
519 "%d bytes at %d[%d]:%d\n",
520 iwl_legacy_get_cmd_string(out_cmd->hdr.cmd),
521 out_cmd->hdr.cmd,
522 le16_to_cpu(out_cmd->hdr.sequence), fix_size,
523 q->write_ptr, idx, priv->cmd_queue);
524 break;
525 default:
526 IWL_DEBUG_HC(priv, "Sending command %s (#%x), seq: 0x%04X, "
527 "%d bytes at %d[%d]:%d\n",
528 iwl_legacy_get_cmd_string(out_cmd->hdr.cmd),
529 out_cmd->hdr.cmd,
530 le16_to_cpu(out_cmd->hdr.sequence), fix_size,
531 q->write_ptr, idx, priv->cmd_queue);
532 }
533#endif
534 txq->need_update = 1;
535
536 if (priv->cfg->ops->lib->txq_update_byte_cnt_tbl)
537 /* Set up entry in queue's byte count circular buffer */
538 priv->cfg->ops->lib->txq_update_byte_cnt_tbl(priv, txq, 0);
539
540 phys_addr = pci_map_single(priv->pci_dev, &out_cmd->hdr,
541 fix_size, PCI_DMA_BIDIRECTIONAL);
542 dma_unmap_addr_set(out_meta, mapping, phys_addr);
543 dma_unmap_len_set(out_meta, len, fix_size);
544
545 trace_iwlwifi_legacy_dev_hcmd(priv, &out_cmd->hdr,
546 fix_size, cmd->flags);
547
548 priv->cfg->ops->lib->txq_attach_buf_to_tfd(priv, txq,
549 phys_addr, fix_size, 1,
550 U32_PAD(cmd->len));
551
552 /* Increment and update queue's write index */
553 q->write_ptr = iwl_legacy_queue_inc_wrap(q->write_ptr, q->n_bd);
554 iwl_legacy_txq_update_write_ptr(priv, txq);
555
556 spin_unlock_irqrestore(&priv->hcmd_lock, flags);
557 return idx;
558}
559
560/**
561 * iwl_legacy_hcmd_queue_reclaim - Reclaim TX command queue entries already Tx'd
562 *
563 * When FW advances 'R' index, all entries between old and new 'R' index
564 * need to be reclaimed. As result, some free space forms. If there is
565 * enough free space (> low mark), wake the stack that feeds us.
566 */
567static void iwl_legacy_hcmd_queue_reclaim(struct iwl_priv *priv, int txq_id,
568 int idx, int cmd_idx)
569{
570 struct iwl_tx_queue *txq = &priv->txq[txq_id];
571 struct iwl_queue *q = &txq->q;
572 int nfreed = 0;
573
574 if ((idx >= q->n_bd) || (iwl_legacy_queue_used(q, idx) == 0)) {
575 IWL_ERR(priv, "Read index for DMA queue txq id (%d), index %d, "
576 "is out of range [0-%d] %d %d.\n", txq_id,
577 idx, q->n_bd, q->write_ptr, q->read_ptr);
578 return;
579 }
580
581 for (idx = iwl_legacy_queue_inc_wrap(idx, q->n_bd); q->read_ptr != idx;
582 q->read_ptr = iwl_legacy_queue_inc_wrap(q->read_ptr, q->n_bd)) {
583
584 if (nfreed++ > 0) {
585 IWL_ERR(priv, "HCMD skipped: index (%d) %d %d\n", idx,
586 q->write_ptr, q->read_ptr);
587 queue_work(priv->workqueue, &priv->restart);
588 }
589
590 }
591}
592
593/**
594 * iwl_legacy_tx_cmd_complete - Pull unused buffers off the queue and reclaim them
595 * @rxb: Rx buffer to reclaim
596 *
597 * If an Rx buffer has an async callback associated with it the callback
598 * will be executed. The attached skb (if present) will only be freed
599 * if the callback returns 1
600 */
601void
602iwl_legacy_tx_cmd_complete(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb)
603{
604 struct iwl_rx_packet *pkt = rxb_addr(rxb);
605 u16 sequence = le16_to_cpu(pkt->hdr.sequence);
606 int txq_id = SEQ_TO_QUEUE(sequence);
607 int index = SEQ_TO_INDEX(sequence);
608 int cmd_index;
609 bool huge = !!(pkt->hdr.sequence & SEQ_HUGE_FRAME);
610 struct iwl_device_cmd *cmd;
611 struct iwl_cmd_meta *meta;
612 struct iwl_tx_queue *txq = &priv->txq[priv->cmd_queue];
613
614 /* If a Tx command is being handled and it isn't in the actual
615 * command queue then there a command routing bug has been introduced
616 * in the queue management code. */
617 if (WARN(txq_id != priv->cmd_queue,
618 "wrong command queue %d (should be %d), sequence 0x%X readp=%d writep=%d\n",
619 txq_id, priv->cmd_queue, sequence,
620 priv->txq[priv->cmd_queue].q.read_ptr,
621 priv->txq[priv->cmd_queue].q.write_ptr)) {
622 iwl_print_hex_error(priv, pkt, 32);
623 return;
624 }
625
626 /* If this is a huge cmd, clear the huge flag on the meta.flags
627 * of the _original_ cmd. So that iwl_legacy_cmd_queue_free won't unmap
628 * the DMA buffer for the scan (huge) command.
629 */
630 if (huge) {
631 cmd_index = iwl_legacy_get_cmd_index(&txq->q, index, 0);
632 txq->meta[cmd_index].flags = 0;
633 }
634 cmd_index = iwl_legacy_get_cmd_index(&txq->q, index, huge);
635 cmd = txq->cmd[cmd_index];
636 meta = &txq->meta[cmd_index];
637
638 pci_unmap_single(priv->pci_dev,
639 dma_unmap_addr(meta, mapping),
640 dma_unmap_len(meta, len),
641 PCI_DMA_BIDIRECTIONAL);
642
643 /* Input error checking is done when commands are added to queue. */
644 if (meta->flags & CMD_WANT_SKB) {
645 meta->source->reply_page = (unsigned long)rxb_addr(rxb);
646 rxb->page = NULL;
647 } else if (meta->callback)
648 meta->callback(priv, cmd, pkt);
649
650 iwl_legacy_hcmd_queue_reclaim(priv, txq_id, index, cmd_index);
651
652 if (!(meta->flags & CMD_ASYNC)) {
653 clear_bit(STATUS_HCMD_ACTIVE, &priv->status);
654 IWL_DEBUG_INFO(priv, "Clearing HCMD_ACTIVE for command %s\n",
655 iwl_legacy_get_cmd_string(cmd->hdr.cmd));
656 wake_up_interruptible(&priv->wait_command_queue);
657 }
658 meta->flags = 0;
659}
660EXPORT_SYMBOL(iwl_legacy_tx_cmd_complete);
diff --git a/drivers/net/wireless/iwlegacy/iwl3945-base.c b/drivers/net/wireless/iwlegacy/iwl3945-base.c
new file mode 100644
index 000000000000..ab87e1b73529
--- /dev/null
+++ b/drivers/net/wireless/iwlegacy/iwl3945-base.c
@@ -0,0 +1,4293 @@
1/******************************************************************************
2 *
3 * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved.
4 *
5 * Portions of this file are derived from the ipw3945 project, as well
6 * as portions of the ieee80211 subsystem header files.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of version 2 of the GNU General Public License as
10 * published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it will be useful, but WITHOUT
13 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 * more details.
16 *
17 * You should have received a copy of the GNU General Public License along with
18 * this program; if not, write to the Free Software Foundation, Inc.,
19 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
20 *
21 * The full GNU General Public License is included in this distribution in the
22 * file called LICENSE.
23 *
24 * Contact Information:
25 * Intel Linux Wireless <ilw@linux.intel.com>
26 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
27 *
28 *****************************************************************************/
29
30#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
31
32#include <linux/kernel.h>
33#include <linux/module.h>
34#include <linux/init.h>
35#include <linux/pci.h>
36#include <linux/pci-aspm.h>
37#include <linux/slab.h>
38#include <linux/dma-mapping.h>
39#include <linux/delay.h>
40#include <linux/sched.h>
41#include <linux/skbuff.h>
42#include <linux/netdevice.h>
43#include <linux/wireless.h>
44#include <linux/firmware.h>
45#include <linux/etherdevice.h>
46#include <linux/if_arp.h>
47
48#include <net/ieee80211_radiotap.h>
49#include <net/mac80211.h>
50
51#include <asm/div64.h>
52
53#define DRV_NAME "iwl3945"
54
55#include "iwl-fh.h"
56#include "iwl-3945-fh.h"
57#include "iwl-commands.h"
58#include "iwl-sta.h"
59#include "iwl-3945.h"
60#include "iwl-core.h"
61#include "iwl-helpers.h"
62#include "iwl-dev.h"
63#include "iwl-spectrum.h"
64
65/*
66 * module name, copyright, version, etc.
67 */
68
69#define DRV_DESCRIPTION \
70"Intel(R) PRO/Wireless 3945ABG/BG Network Connection driver for Linux"
71
72#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
73#define VD "d"
74#else
75#define VD
76#endif
77
78/*
79 * add "s" to indicate spectrum measurement included.
80 * we add it here to be consistent with previous releases in which
81 * this was configurable.
82 */
83#define DRV_VERSION IWLWIFI_VERSION VD "s"
84#define DRV_COPYRIGHT "Copyright(c) 2003-2011 Intel Corporation"
85#define DRV_AUTHOR "<ilw@linux.intel.com>"
86
87MODULE_DESCRIPTION(DRV_DESCRIPTION);
88MODULE_VERSION(DRV_VERSION);
89MODULE_AUTHOR(DRV_COPYRIGHT " " DRV_AUTHOR);
90MODULE_LICENSE("GPL");
91
92 /* module parameters */
93struct iwl_mod_params iwl3945_mod_params = {
94 .sw_crypto = 1,
95 .restart_fw = 1,
96 /* the rest are 0 by default */
97};
98
99/**
100 * iwl3945_get_antenna_flags - Get antenna flags for RXON command
101 * @priv: eeprom and antenna fields are used to determine antenna flags
102 *
103 * priv->eeprom39 is used to determine if antenna AUX/MAIN are reversed
104 * iwl3945_mod_params.antenna specifies the antenna diversity mode:
105 *
106 * IWL_ANTENNA_DIVERSITY - NIC selects best antenna by itself
107 * IWL_ANTENNA_MAIN - Force MAIN antenna
108 * IWL_ANTENNA_AUX - Force AUX antenna
109 */
110__le32 iwl3945_get_antenna_flags(const struct iwl_priv *priv)
111{
112 struct iwl3945_eeprom *eeprom = (struct iwl3945_eeprom *)priv->eeprom;
113
114 switch (iwl3945_mod_params.antenna) {
115 case IWL_ANTENNA_DIVERSITY:
116 return 0;
117
118 case IWL_ANTENNA_MAIN:
119 if (eeprom->antenna_switch_type)
120 return RXON_FLG_DIS_DIV_MSK | RXON_FLG_ANT_B_MSK;
121 return RXON_FLG_DIS_DIV_MSK | RXON_FLG_ANT_A_MSK;
122
123 case IWL_ANTENNA_AUX:
124 if (eeprom->antenna_switch_type)
125 return RXON_FLG_DIS_DIV_MSK | RXON_FLG_ANT_A_MSK;
126 return RXON_FLG_DIS_DIV_MSK | RXON_FLG_ANT_B_MSK;
127 }
128
129 /* bad antenna selector value */
130 IWL_ERR(priv, "Bad antenna selector value (0x%x)\n",
131 iwl3945_mod_params.antenna);
132
133 return 0; /* "diversity" is default if error */
134}
135
136static int iwl3945_set_ccmp_dynamic_key_info(struct iwl_priv *priv,
137 struct ieee80211_key_conf *keyconf,
138 u8 sta_id)
139{
140 unsigned long flags;
141 __le16 key_flags = 0;
142 int ret;
143
144 key_flags |= (STA_KEY_FLG_CCMP | STA_KEY_FLG_MAP_KEY_MSK);
145 key_flags |= cpu_to_le16(keyconf->keyidx << STA_KEY_FLG_KEYID_POS);
146
147 if (sta_id == priv->contexts[IWL_RXON_CTX_BSS].bcast_sta_id)
148 key_flags |= STA_KEY_MULTICAST_MSK;
149
150 keyconf->flags |= IEEE80211_KEY_FLAG_GENERATE_IV;
151 keyconf->hw_key_idx = keyconf->keyidx;
152 key_flags &= ~STA_KEY_FLG_INVALID;
153
154 spin_lock_irqsave(&priv->sta_lock, flags);
155 priv->stations[sta_id].keyinfo.cipher = keyconf->cipher;
156 priv->stations[sta_id].keyinfo.keylen = keyconf->keylen;
157 memcpy(priv->stations[sta_id].keyinfo.key, keyconf->key,
158 keyconf->keylen);
159
160 memcpy(priv->stations[sta_id].sta.key.key, keyconf->key,
161 keyconf->keylen);
162
163 if ((priv->stations[sta_id].sta.key.key_flags & STA_KEY_FLG_ENCRYPT_MSK)
164 == STA_KEY_FLG_NO_ENC)
165 priv->stations[sta_id].sta.key.key_offset =
166 iwl_legacy_get_free_ucode_key_index(priv);
167 /* else, we are overriding an existing key => no need to allocated room
168 * in uCode. */
169
170 WARN(priv->stations[sta_id].sta.key.key_offset == WEP_INVALID_OFFSET,
171 "no space for a new key");
172
173 priv->stations[sta_id].sta.key.key_flags = key_flags;
174 priv->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_KEY_MASK;
175 priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
176
177 IWL_DEBUG_INFO(priv, "hwcrypto: modify ucode station key info\n");
178
179 ret = iwl_legacy_send_add_sta(priv,
180 &priv->stations[sta_id].sta, CMD_ASYNC);
181
182 spin_unlock_irqrestore(&priv->sta_lock, flags);
183
184 return ret;
185}
186
187static int iwl3945_set_tkip_dynamic_key_info(struct iwl_priv *priv,
188 struct ieee80211_key_conf *keyconf,
189 u8 sta_id)
190{
191 return -EOPNOTSUPP;
192}
193
194static int iwl3945_set_wep_dynamic_key_info(struct iwl_priv *priv,
195 struct ieee80211_key_conf *keyconf,
196 u8 sta_id)
197{
198 return -EOPNOTSUPP;
199}
200
201static int iwl3945_clear_sta_key_info(struct iwl_priv *priv, u8 sta_id)
202{
203 unsigned long flags;
204 struct iwl_legacy_addsta_cmd sta_cmd;
205
206 spin_lock_irqsave(&priv->sta_lock, flags);
207 memset(&priv->stations[sta_id].keyinfo, 0, sizeof(struct iwl_hw_key));
208 memset(&priv->stations[sta_id].sta.key, 0,
209 sizeof(struct iwl4965_keyinfo));
210 priv->stations[sta_id].sta.key.key_flags = STA_KEY_FLG_NO_ENC;
211 priv->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_KEY_MASK;
212 priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
213 memcpy(&sta_cmd, &priv->stations[sta_id].sta, sizeof(struct iwl_legacy_addsta_cmd));
214 spin_unlock_irqrestore(&priv->sta_lock, flags);
215
216 IWL_DEBUG_INFO(priv, "hwcrypto: clear ucode station key info\n");
217 return iwl_legacy_send_add_sta(priv, &sta_cmd, CMD_SYNC);
218}
219
220static int iwl3945_set_dynamic_key(struct iwl_priv *priv,
221 struct ieee80211_key_conf *keyconf, u8 sta_id)
222{
223 int ret = 0;
224
225 keyconf->hw_key_idx = HW_KEY_DYNAMIC;
226
227 switch (keyconf->cipher) {
228 case WLAN_CIPHER_SUITE_CCMP:
229 ret = iwl3945_set_ccmp_dynamic_key_info(priv, keyconf, sta_id);
230 break;
231 case WLAN_CIPHER_SUITE_TKIP:
232 ret = iwl3945_set_tkip_dynamic_key_info(priv, keyconf, sta_id);
233 break;
234 case WLAN_CIPHER_SUITE_WEP40:
235 case WLAN_CIPHER_SUITE_WEP104:
236 ret = iwl3945_set_wep_dynamic_key_info(priv, keyconf, sta_id);
237 break;
238 default:
239 IWL_ERR(priv, "Unknown alg: %s alg=%x\n", __func__,
240 keyconf->cipher);
241 ret = -EINVAL;
242 }
243
244 IWL_DEBUG_WEP(priv, "Set dynamic key: alg=%x len=%d idx=%d sta=%d ret=%d\n",
245 keyconf->cipher, keyconf->keylen, keyconf->keyidx,
246 sta_id, ret);
247
248 return ret;
249}
250
251static int iwl3945_remove_static_key(struct iwl_priv *priv)
252{
253 int ret = -EOPNOTSUPP;
254
255 return ret;
256}
257
258static int iwl3945_set_static_key(struct iwl_priv *priv,
259 struct ieee80211_key_conf *key)
260{
261 if (key->cipher == WLAN_CIPHER_SUITE_WEP40 ||
262 key->cipher == WLAN_CIPHER_SUITE_WEP104)
263 return -EOPNOTSUPP;
264
265 IWL_ERR(priv, "Static key invalid: cipher %x\n", key->cipher);
266 return -EINVAL;
267}
268
269static void iwl3945_clear_free_frames(struct iwl_priv *priv)
270{
271 struct list_head *element;
272
273 IWL_DEBUG_INFO(priv, "%d frames on pre-allocated heap on clear.\n",
274 priv->frames_count);
275
276 while (!list_empty(&priv->free_frames)) {
277 element = priv->free_frames.next;
278 list_del(element);
279 kfree(list_entry(element, struct iwl3945_frame, list));
280 priv->frames_count--;
281 }
282
283 if (priv->frames_count) {
284 IWL_WARN(priv, "%d frames still in use. Did we lose one?\n",
285 priv->frames_count);
286 priv->frames_count = 0;
287 }
288}
289
290static struct iwl3945_frame *iwl3945_get_free_frame(struct iwl_priv *priv)
291{
292 struct iwl3945_frame *frame;
293 struct list_head *element;
294 if (list_empty(&priv->free_frames)) {
295 frame = kzalloc(sizeof(*frame), GFP_KERNEL);
296 if (!frame) {
297 IWL_ERR(priv, "Could not allocate frame!\n");
298 return NULL;
299 }
300
301 priv->frames_count++;
302 return frame;
303 }
304
305 element = priv->free_frames.next;
306 list_del(element);
307 return list_entry(element, struct iwl3945_frame, list);
308}
309
310static void iwl3945_free_frame(struct iwl_priv *priv, struct iwl3945_frame *frame)
311{
312 memset(frame, 0, sizeof(*frame));
313 list_add(&frame->list, &priv->free_frames);
314}
315
316unsigned int iwl3945_fill_beacon_frame(struct iwl_priv *priv,
317 struct ieee80211_hdr *hdr,
318 int left)
319{
320
321 if (!iwl_legacy_is_associated(priv, IWL_RXON_CTX_BSS) || !priv->beacon_skb)
322 return 0;
323
324 if (priv->beacon_skb->len > left)
325 return 0;
326
327 memcpy(hdr, priv->beacon_skb->data, priv->beacon_skb->len);
328
329 return priv->beacon_skb->len;
330}
331
332static int iwl3945_send_beacon_cmd(struct iwl_priv *priv)
333{
334 struct iwl3945_frame *frame;
335 unsigned int frame_size;
336 int rc;
337 u8 rate;
338
339 frame = iwl3945_get_free_frame(priv);
340
341 if (!frame) {
342 IWL_ERR(priv, "Could not obtain free frame buffer for beacon "
343 "command.\n");
344 return -ENOMEM;
345 }
346
347 rate = iwl_legacy_get_lowest_plcp(priv,
348 &priv->contexts[IWL_RXON_CTX_BSS]);
349
350 frame_size = iwl3945_hw_get_beacon_cmd(priv, frame, rate);
351
352 rc = iwl_legacy_send_cmd_pdu(priv, REPLY_TX_BEACON, frame_size,
353 &frame->u.cmd[0]);
354
355 iwl3945_free_frame(priv, frame);
356
357 return rc;
358}
359
360static void iwl3945_unset_hw_params(struct iwl_priv *priv)
361{
362 if (priv->_3945.shared_virt)
363 dma_free_coherent(&priv->pci_dev->dev,
364 sizeof(struct iwl3945_shared),
365 priv->_3945.shared_virt,
366 priv->_3945.shared_phys);
367}
368
369static void iwl3945_build_tx_cmd_hwcrypto(struct iwl_priv *priv,
370 struct ieee80211_tx_info *info,
371 struct iwl_device_cmd *cmd,
372 struct sk_buff *skb_frag,
373 int sta_id)
374{
375 struct iwl3945_tx_cmd *tx_cmd = (struct iwl3945_tx_cmd *)cmd->cmd.payload;
376 struct iwl_hw_key *keyinfo = &priv->stations[sta_id].keyinfo;
377
378 tx_cmd->sec_ctl = 0;
379
380 switch (keyinfo->cipher) {
381 case WLAN_CIPHER_SUITE_CCMP:
382 tx_cmd->sec_ctl = TX_CMD_SEC_CCM;
383 memcpy(tx_cmd->key, keyinfo->key, keyinfo->keylen);
384 IWL_DEBUG_TX(priv, "tx_cmd with AES hwcrypto\n");
385 break;
386
387 case WLAN_CIPHER_SUITE_TKIP:
388 break;
389
390 case WLAN_CIPHER_SUITE_WEP104:
391 tx_cmd->sec_ctl |= TX_CMD_SEC_KEY128;
392 /* fall through */
393 case WLAN_CIPHER_SUITE_WEP40:
394 tx_cmd->sec_ctl |= TX_CMD_SEC_WEP |
395 (info->control.hw_key->hw_key_idx & TX_CMD_SEC_MSK) << TX_CMD_SEC_SHIFT;
396
397 memcpy(&tx_cmd->key[3], keyinfo->key, keyinfo->keylen);
398
399 IWL_DEBUG_TX(priv, "Configuring packet for WEP encryption "
400 "with key %d\n", info->control.hw_key->hw_key_idx);
401 break;
402
403 default:
404 IWL_ERR(priv, "Unknown encode cipher %x\n", keyinfo->cipher);
405 break;
406 }
407}
408
409/*
410 * handle build REPLY_TX command notification.
411 */
412static void iwl3945_build_tx_cmd_basic(struct iwl_priv *priv,
413 struct iwl_device_cmd *cmd,
414 struct ieee80211_tx_info *info,
415 struct ieee80211_hdr *hdr, u8 std_id)
416{
417 struct iwl3945_tx_cmd *tx_cmd = (struct iwl3945_tx_cmd *)cmd->cmd.payload;
418 __le32 tx_flags = tx_cmd->tx_flags;
419 __le16 fc = hdr->frame_control;
420
421 tx_cmd->stop_time.life_time = TX_CMD_LIFE_TIME_INFINITE;
422 if (!(info->flags & IEEE80211_TX_CTL_NO_ACK)) {
423 tx_flags |= TX_CMD_FLG_ACK_MSK;
424 if (ieee80211_is_mgmt(fc))
425 tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK;
426 if (ieee80211_is_probe_resp(fc) &&
427 !(le16_to_cpu(hdr->seq_ctrl) & 0xf))
428 tx_flags |= TX_CMD_FLG_TSF_MSK;
429 } else {
430 tx_flags &= (~TX_CMD_FLG_ACK_MSK);
431 tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK;
432 }
433
434 tx_cmd->sta_id = std_id;
435 if (ieee80211_has_morefrags(fc))
436 tx_flags |= TX_CMD_FLG_MORE_FRAG_MSK;
437
438 if (ieee80211_is_data_qos(fc)) {
439 u8 *qc = ieee80211_get_qos_ctl(hdr);
440 tx_cmd->tid_tspec = qc[0] & 0xf;
441 tx_flags &= ~TX_CMD_FLG_SEQ_CTL_MSK;
442 } else {
443 tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK;
444 }
445
446 iwl_legacy_tx_cmd_protection(priv, info, fc, &tx_flags);
447
448 tx_flags &= ~(TX_CMD_FLG_ANT_SEL_MSK);
449 if (ieee80211_is_mgmt(fc)) {
450 if (ieee80211_is_assoc_req(fc) || ieee80211_is_reassoc_req(fc))
451 tx_cmd->timeout.pm_frame_timeout = cpu_to_le16(3);
452 else
453 tx_cmd->timeout.pm_frame_timeout = cpu_to_le16(2);
454 } else {
455 tx_cmd->timeout.pm_frame_timeout = 0;
456 }
457
458 tx_cmd->driver_txop = 0;
459 tx_cmd->tx_flags = tx_flags;
460 tx_cmd->next_frame_len = 0;
461}
462
463/*
464 * start REPLY_TX command process
465 */
466static int iwl3945_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
467{
468 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
469 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
470 struct iwl3945_tx_cmd *tx_cmd;
471 struct iwl_tx_queue *txq = NULL;
472 struct iwl_queue *q = NULL;
473 struct iwl_device_cmd *out_cmd;
474 struct iwl_cmd_meta *out_meta;
475 dma_addr_t phys_addr;
476 dma_addr_t txcmd_phys;
477 int txq_id = skb_get_queue_mapping(skb);
478 u16 len, idx, hdr_len;
479 u8 id;
480 u8 unicast;
481 u8 sta_id;
482 u8 tid = 0;
483 __le16 fc;
484 u8 wait_write_ptr = 0;
485 unsigned long flags;
486
487 spin_lock_irqsave(&priv->lock, flags);
488 if (iwl_legacy_is_rfkill(priv)) {
489 IWL_DEBUG_DROP(priv, "Dropping - RF KILL\n");
490 goto drop_unlock;
491 }
492
493 if ((ieee80211_get_tx_rate(priv->hw, info)->hw_value & 0xFF) == IWL_INVALID_RATE) {
494 IWL_ERR(priv, "ERROR: No TX rate available.\n");
495 goto drop_unlock;
496 }
497
498 unicast = !is_multicast_ether_addr(hdr->addr1);
499 id = 0;
500
501 fc = hdr->frame_control;
502
503#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
504 if (ieee80211_is_auth(fc))
505 IWL_DEBUG_TX(priv, "Sending AUTH frame\n");
506 else if (ieee80211_is_assoc_req(fc))
507 IWL_DEBUG_TX(priv, "Sending ASSOC frame\n");
508 else if (ieee80211_is_reassoc_req(fc))
509 IWL_DEBUG_TX(priv, "Sending REASSOC frame\n");
510#endif
511
512 spin_unlock_irqrestore(&priv->lock, flags);
513
514 hdr_len = ieee80211_hdrlen(fc);
515
516 /* Find index into station table for destination station */
517 sta_id = iwl_legacy_sta_id_or_broadcast(
518 priv, &priv->contexts[IWL_RXON_CTX_BSS],
519 info->control.sta);
520 if (sta_id == IWL_INVALID_STATION) {
521 IWL_DEBUG_DROP(priv, "Dropping - INVALID STATION: %pM\n",
522 hdr->addr1);
523 goto drop;
524 }
525
526 IWL_DEBUG_RATE(priv, "station Id %d\n", sta_id);
527
528 if (ieee80211_is_data_qos(fc)) {
529 u8 *qc = ieee80211_get_qos_ctl(hdr);
530 tid = qc[0] & IEEE80211_QOS_CTL_TID_MASK;
531 if (unlikely(tid >= MAX_TID_COUNT))
532 goto drop;
533 }
534
535 /* Descriptor for chosen Tx queue */
536 txq = &priv->txq[txq_id];
537 q = &txq->q;
538
539 if ((iwl_legacy_queue_space(q) < q->high_mark))
540 goto drop;
541
542 spin_lock_irqsave(&priv->lock, flags);
543
544 idx = iwl_legacy_get_cmd_index(q, q->write_ptr, 0);
545
546 /* Set up driver data for this TFD */
547 memset(&(txq->txb[q->write_ptr]), 0, sizeof(struct iwl_tx_info));
548 txq->txb[q->write_ptr].skb = skb;
549 txq->txb[q->write_ptr].ctx = &priv->contexts[IWL_RXON_CTX_BSS];
550
551 /* Init first empty entry in queue's array of Tx/cmd buffers */
552 out_cmd = txq->cmd[idx];
553 out_meta = &txq->meta[idx];
554 tx_cmd = (struct iwl3945_tx_cmd *)out_cmd->cmd.payload;
555 memset(&out_cmd->hdr, 0, sizeof(out_cmd->hdr));
556 memset(tx_cmd, 0, sizeof(*tx_cmd));
557
558 /*
559 * Set up the Tx-command (not MAC!) header.
560 * Store the chosen Tx queue and TFD index within the sequence field;
561 * after Tx, uCode's Tx response will return this value so driver can
562 * locate the frame within the tx queue and do post-tx processing.
563 */
564 out_cmd->hdr.cmd = REPLY_TX;
565 out_cmd->hdr.sequence = cpu_to_le16((u16)(QUEUE_TO_SEQ(txq_id) |
566 INDEX_TO_SEQ(q->write_ptr)));
567
568 /* Copy MAC header from skb into command buffer */
569 memcpy(tx_cmd->hdr, hdr, hdr_len);
570
571
572 if (info->control.hw_key)
573 iwl3945_build_tx_cmd_hwcrypto(priv, info, out_cmd, skb, sta_id);
574
575 /* TODO need this for burst mode later on */
576 iwl3945_build_tx_cmd_basic(priv, out_cmd, info, hdr, sta_id);
577
578 /* set is_hcca to 0; it probably will never be implemented */
579 iwl3945_hw_build_tx_cmd_rate(priv, out_cmd, info, hdr, sta_id, 0);
580
581 /* Total # bytes to be transmitted */
582 len = (u16)skb->len;
583 tx_cmd->len = cpu_to_le16(len);
584
585 iwl_legacy_dbg_log_tx_data_frame(priv, len, hdr);
586 iwl_legacy_update_stats(priv, true, fc, len);
587 tx_cmd->tx_flags &= ~TX_CMD_FLG_ANT_A_MSK;
588 tx_cmd->tx_flags &= ~TX_CMD_FLG_ANT_B_MSK;
589
590 if (!ieee80211_has_morefrags(hdr->frame_control)) {
591 txq->need_update = 1;
592 } else {
593 wait_write_ptr = 1;
594 txq->need_update = 0;
595 }
596
597 IWL_DEBUG_TX(priv, "sequence nr = 0X%x\n",
598 le16_to_cpu(out_cmd->hdr.sequence));
599 IWL_DEBUG_TX(priv, "tx_flags = 0X%x\n", le32_to_cpu(tx_cmd->tx_flags));
600 iwl_print_hex_dump(priv, IWL_DL_TX, tx_cmd, sizeof(*tx_cmd));
601 iwl_print_hex_dump(priv, IWL_DL_TX, (u8 *)tx_cmd->hdr,
602 ieee80211_hdrlen(fc));
603
604 /*
605 * Use the first empty entry in this queue's command buffer array
606 * to contain the Tx command and MAC header concatenated together
607 * (payload data will be in another buffer).
608 * Size of this varies, due to varying MAC header length.
609 * If end is not dword aligned, we'll have 2 extra bytes at the end
610 * of the MAC header (device reads on dword boundaries).
611 * We'll tell device about this padding later.
612 */
613 len = sizeof(struct iwl3945_tx_cmd) +
614 sizeof(struct iwl_cmd_header) + hdr_len;
615 len = (len + 3) & ~3;
616
617 /* Physical address of this Tx command's header (not MAC header!),
618 * within command buffer array. */
619 txcmd_phys = pci_map_single(priv->pci_dev, &out_cmd->hdr,
620 len, PCI_DMA_TODEVICE);
621 /* we do not map meta data ... so we can safely access address to
622 * provide to unmap command*/
623 dma_unmap_addr_set(out_meta, mapping, txcmd_phys);
624 dma_unmap_len_set(out_meta, len, len);
625
626 /* Add buffer containing Tx command and MAC(!) header to TFD's
627 * first entry */
628 priv->cfg->ops->lib->txq_attach_buf_to_tfd(priv, txq,
629 txcmd_phys, len, 1, 0);
630
631
632 /* Set up TFD's 2nd entry to point directly to remainder of skb,
633 * if any (802.11 null frames have no payload). */
634 len = skb->len - hdr_len;
635 if (len) {
636 phys_addr = pci_map_single(priv->pci_dev, skb->data + hdr_len,
637 len, PCI_DMA_TODEVICE);
638 priv->cfg->ops->lib->txq_attach_buf_to_tfd(priv, txq,
639 phys_addr, len,
640 0, U32_PAD(len));
641 }
642
643
644 /* Tell device the write index *just past* this latest filled TFD */
645 q->write_ptr = iwl_legacy_queue_inc_wrap(q->write_ptr, q->n_bd);
646 iwl_legacy_txq_update_write_ptr(priv, txq);
647 spin_unlock_irqrestore(&priv->lock, flags);
648
649 if ((iwl_legacy_queue_space(q) < q->high_mark)
650 && priv->mac80211_registered) {
651 if (wait_write_ptr) {
652 spin_lock_irqsave(&priv->lock, flags);
653 txq->need_update = 1;
654 iwl_legacy_txq_update_write_ptr(priv, txq);
655 spin_unlock_irqrestore(&priv->lock, flags);
656 }
657
658 iwl_legacy_stop_queue(priv, txq);
659 }
660
661 return 0;
662
663drop_unlock:
664 spin_unlock_irqrestore(&priv->lock, flags);
665drop:
666 return -1;
667}
668
669static int iwl3945_get_measurement(struct iwl_priv *priv,
670 struct ieee80211_measurement_params *params,
671 u8 type)
672{
673 struct iwl_spectrum_cmd spectrum;
674 struct iwl_rx_packet *pkt;
675 struct iwl_host_cmd cmd = {
676 .id = REPLY_SPECTRUM_MEASUREMENT_CMD,
677 .data = (void *)&spectrum,
678 .flags = CMD_WANT_SKB,
679 };
680 u32 add_time = le64_to_cpu(params->start_time);
681 int rc;
682 int spectrum_resp_status;
683 int duration = le16_to_cpu(params->duration);
684 struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
685
686 if (iwl_legacy_is_associated(priv, IWL_RXON_CTX_BSS))
687 add_time = iwl_legacy_usecs_to_beacons(priv,
688 le64_to_cpu(params->start_time) - priv->_3945.last_tsf,
689 le16_to_cpu(ctx->timing.beacon_interval));
690
691 memset(&spectrum, 0, sizeof(spectrum));
692
693 spectrum.channel_count = cpu_to_le16(1);
694 spectrum.flags =
695 RXON_FLG_TSF2HOST_MSK | RXON_FLG_ANT_A_MSK | RXON_FLG_DIS_DIV_MSK;
696 spectrum.filter_flags = MEASUREMENT_FILTER_FLAG;
697 cmd.len = sizeof(spectrum);
698 spectrum.len = cpu_to_le16(cmd.len - sizeof(spectrum.len));
699
700 if (iwl_legacy_is_associated(priv, IWL_RXON_CTX_BSS))
701 spectrum.start_time =
702 iwl_legacy_add_beacon_time(priv,
703 priv->_3945.last_beacon_time, add_time,
704 le16_to_cpu(ctx->timing.beacon_interval));
705 else
706 spectrum.start_time = 0;
707
708 spectrum.channels[0].duration = cpu_to_le32(duration * TIME_UNIT);
709 spectrum.channels[0].channel = params->channel;
710 spectrum.channels[0].type = type;
711 if (ctx->active.flags & RXON_FLG_BAND_24G_MSK)
712 spectrum.flags |= RXON_FLG_BAND_24G_MSK |
713 RXON_FLG_AUTO_DETECT_MSK | RXON_FLG_TGG_PROTECT_MSK;
714
715 rc = iwl_legacy_send_cmd_sync(priv, &cmd);
716 if (rc)
717 return rc;
718
719 pkt = (struct iwl_rx_packet *)cmd.reply_page;
720 if (pkt->hdr.flags & IWL_CMD_FAILED_MSK) {
721 IWL_ERR(priv, "Bad return from REPLY_RX_ON_ASSOC command\n");
722 rc = -EIO;
723 }
724
725 spectrum_resp_status = le16_to_cpu(pkt->u.spectrum.status);
726 switch (spectrum_resp_status) {
727 case 0: /* Command will be handled */
728 if (pkt->u.spectrum.id != 0xff) {
729 IWL_DEBUG_INFO(priv, "Replaced existing measurement: %d\n",
730 pkt->u.spectrum.id);
731 priv->measurement_status &= ~MEASUREMENT_READY;
732 }
733 priv->measurement_status |= MEASUREMENT_ACTIVE;
734 rc = 0;
735 break;
736
737 case 1: /* Command will not be handled */
738 rc = -EAGAIN;
739 break;
740 }
741
742 iwl_legacy_free_pages(priv, cmd.reply_page);
743
744 return rc;
745}
746
747static void iwl3945_rx_reply_alive(struct iwl_priv *priv,
748 struct iwl_rx_mem_buffer *rxb)
749{
750 struct iwl_rx_packet *pkt = rxb_addr(rxb);
751 struct iwl_alive_resp *palive;
752 struct delayed_work *pwork;
753
754 palive = &pkt->u.alive_frame;
755
756 IWL_DEBUG_INFO(priv, "Alive ucode status 0x%08X revision "
757 "0x%01X 0x%01X\n",
758 palive->is_valid, palive->ver_type,
759 palive->ver_subtype);
760
761 if (palive->ver_subtype == INITIALIZE_SUBTYPE) {
762 IWL_DEBUG_INFO(priv, "Initialization Alive received.\n");
763 memcpy(&priv->card_alive_init, &pkt->u.alive_frame,
764 sizeof(struct iwl_alive_resp));
765 pwork = &priv->init_alive_start;
766 } else {
767 IWL_DEBUG_INFO(priv, "Runtime Alive received.\n");
768 memcpy(&priv->card_alive, &pkt->u.alive_frame,
769 sizeof(struct iwl_alive_resp));
770 pwork = &priv->alive_start;
771 iwl3945_disable_events(priv);
772 }
773
774 /* We delay the ALIVE response by 5ms to
775 * give the HW RF Kill time to activate... */
776 if (palive->is_valid == UCODE_VALID_OK)
777 queue_delayed_work(priv->workqueue, pwork,
778 msecs_to_jiffies(5));
779 else
780 IWL_WARN(priv, "uCode did not respond OK.\n");
781}
782
783static void iwl3945_rx_reply_add_sta(struct iwl_priv *priv,
784 struct iwl_rx_mem_buffer *rxb)
785{
786#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
787 struct iwl_rx_packet *pkt = rxb_addr(rxb);
788#endif
789
790 IWL_DEBUG_RX(priv, "Received REPLY_ADD_STA: 0x%02X\n", pkt->u.status);
791}
792
793static void iwl3945_rx_beacon_notif(struct iwl_priv *priv,
794 struct iwl_rx_mem_buffer *rxb)
795{
796 struct iwl_rx_packet *pkt = rxb_addr(rxb);
797 struct iwl3945_beacon_notif *beacon = &(pkt->u.beacon_status);
798#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
799 u8 rate = beacon->beacon_notify_hdr.rate;
800
801 IWL_DEBUG_RX(priv, "beacon status %x retries %d iss %d "
802 "tsf %d %d rate %d\n",
803 le32_to_cpu(beacon->beacon_notify_hdr.status) & TX_STATUS_MSK,
804 beacon->beacon_notify_hdr.failure_frame,
805 le32_to_cpu(beacon->ibss_mgr_status),
806 le32_to_cpu(beacon->high_tsf),
807 le32_to_cpu(beacon->low_tsf), rate);
808#endif
809
810 priv->ibss_manager = le32_to_cpu(beacon->ibss_mgr_status);
811
812}
813
814/* Handle notification from uCode that card's power state is changing
815 * due to software, hardware, or critical temperature RFKILL */
816static void iwl3945_rx_card_state_notif(struct iwl_priv *priv,
817 struct iwl_rx_mem_buffer *rxb)
818{
819 struct iwl_rx_packet *pkt = rxb_addr(rxb);
820 u32 flags = le32_to_cpu(pkt->u.card_state_notif.flags);
821 unsigned long status = priv->status;
822
823 IWL_WARN(priv, "Card state received: HW:%s SW:%s\n",
824 (flags & HW_CARD_DISABLED) ? "Kill" : "On",
825 (flags & SW_CARD_DISABLED) ? "Kill" : "On");
826
827 iwl_write32(priv, CSR_UCODE_DRV_GP1_SET,
828 CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED);
829
830 if (flags & HW_CARD_DISABLED)
831 set_bit(STATUS_RF_KILL_HW, &priv->status);
832 else
833 clear_bit(STATUS_RF_KILL_HW, &priv->status);
834
835
836 iwl_legacy_scan_cancel(priv);
837
838 if ((test_bit(STATUS_RF_KILL_HW, &status) !=
839 test_bit(STATUS_RF_KILL_HW, &priv->status)))
840 wiphy_rfkill_set_hw_state(priv->hw->wiphy,
841 test_bit(STATUS_RF_KILL_HW, &priv->status));
842 else
843 wake_up_interruptible(&priv->wait_command_queue);
844}
845
846/**
847 * iwl3945_setup_rx_handlers - Initialize Rx handler callbacks
848 *
849 * Setup the RX handlers for each of the reply types sent from the uCode
850 * to the host.
851 *
852 * This function chains into the hardware specific files for them to setup
853 * any hardware specific handlers as well.
854 */
855static void iwl3945_setup_rx_handlers(struct iwl_priv *priv)
856{
857 priv->rx_handlers[REPLY_ALIVE] = iwl3945_rx_reply_alive;
858 priv->rx_handlers[REPLY_ADD_STA] = iwl3945_rx_reply_add_sta;
859 priv->rx_handlers[REPLY_ERROR] = iwl_legacy_rx_reply_error;
860 priv->rx_handlers[CHANNEL_SWITCH_NOTIFICATION] = iwl_legacy_rx_csa;
861 priv->rx_handlers[SPECTRUM_MEASURE_NOTIFICATION] =
862 iwl_legacy_rx_spectrum_measure_notif;
863 priv->rx_handlers[PM_SLEEP_NOTIFICATION] = iwl_legacy_rx_pm_sleep_notif;
864 priv->rx_handlers[PM_DEBUG_STATISTIC_NOTIFIC] =
865 iwl_legacy_rx_pm_debug_statistics_notif;
866 priv->rx_handlers[BEACON_NOTIFICATION] = iwl3945_rx_beacon_notif;
867
868 /*
869 * The same handler is used for both the REPLY to a discrete
870 * statistics request from the host as well as for the periodic
871 * statistics notifications (after received beacons) from the uCode.
872 */
873 priv->rx_handlers[REPLY_STATISTICS_CMD] = iwl3945_reply_statistics;
874 priv->rx_handlers[STATISTICS_NOTIFICATION] = iwl3945_hw_rx_statistics;
875
876 iwl_legacy_setup_rx_scan_handlers(priv);
877 priv->rx_handlers[CARD_STATE_NOTIFICATION] = iwl3945_rx_card_state_notif;
878
879 /* Set up hardware specific Rx handlers */
880 iwl3945_hw_rx_handler_setup(priv);
881}
882
883/************************** RX-FUNCTIONS ****************************/
884/*
885 * Rx theory of operation
886 *
887 * The host allocates 32 DMA target addresses and passes the host address
888 * to the firmware at register IWL_RFDS_TABLE_LOWER + N * RFD_SIZE where N is
889 * 0 to 31
890 *
891 * Rx Queue Indexes
892 * The host/firmware share two index registers for managing the Rx buffers.
893 *
894 * The READ index maps to the first position that the firmware may be writing
895 * to -- the driver can read up to (but not including) this position and get
896 * good data.
897 * The READ index is managed by the firmware once the card is enabled.
898 *
899 * The WRITE index maps to the last position the driver has read from -- the
900 * position preceding WRITE is the last slot the firmware can place a packet.
901 *
902 * The queue is empty (no good data) if WRITE = READ - 1, and is full if
903 * WRITE = READ.
904 *
905 * During initialization, the host sets up the READ queue position to the first
906 * INDEX position, and WRITE to the last (READ - 1 wrapped)
907 *
908 * When the firmware places a packet in a buffer, it will advance the READ index
909 * and fire the RX interrupt. The driver can then query the READ index and
910 * process as many packets as possible, moving the WRITE index forward as it
911 * resets the Rx queue buffers with new memory.
912 *
913 * The management in the driver is as follows:
914 * + A list of pre-allocated SKBs is stored in iwl->rxq->rx_free. When
915 * iwl->rxq->free_count drops to or below RX_LOW_WATERMARK, work is scheduled
916 * to replenish the iwl->rxq->rx_free.
917 * + In iwl3945_rx_replenish (scheduled) if 'processed' != 'read' then the
918 * iwl->rxq is replenished and the READ INDEX is updated (updating the
919 * 'processed' and 'read' driver indexes as well)
920 * + A received packet is processed and handed to the kernel network stack,
921 * detached from the iwl->rxq. The driver 'processed' index is updated.
922 * + The Host/Firmware iwl->rxq is replenished at tasklet time from the rx_free
923 * list. If there are no allocated buffers in iwl->rxq->rx_free, the READ
924 * INDEX is not incremented and iwl->status(RX_STALLED) is set. If there
925 * were enough free buffers and RX_STALLED is set it is cleared.
926 *
927 *
928 * Driver sequence:
929 *
930 * iwl3945_rx_replenish() Replenishes rx_free list from rx_used, and calls
931 * iwl3945_rx_queue_restock
932 * iwl3945_rx_queue_restock() Moves available buffers from rx_free into Rx
933 * queue, updates firmware pointers, and updates
934 * the WRITE index. If insufficient rx_free buffers
935 * are available, schedules iwl3945_rx_replenish
936 *
937 * -- enable interrupts --
938 * ISR - iwl3945_rx() Detach iwl_rx_mem_buffers from pool up to the
939 * READ INDEX, detaching the SKB from the pool.
940 * Moves the packet buffer from queue to rx_used.
941 * Calls iwl3945_rx_queue_restock to refill any empty
942 * slots.
943 * ...
944 *
945 */
946
947/**
948 * iwl3945_dma_addr2rbd_ptr - convert a DMA address to a uCode read buffer ptr
949 */
950static inline __le32 iwl3945_dma_addr2rbd_ptr(struct iwl_priv *priv,
951 dma_addr_t dma_addr)
952{
953 return cpu_to_le32((u32)dma_addr);
954}
955
956/**
957 * iwl3945_rx_queue_restock - refill RX queue from pre-allocated pool
958 *
959 * If there are slots in the RX queue that need to be restocked,
960 * and we have free pre-allocated buffers, fill the ranks as much
961 * as we can, pulling from rx_free.
962 *
963 * This moves the 'write' index forward to catch up with 'processed', and
964 * also updates the memory address in the firmware to reference the new
965 * target buffer.
966 */
967static void iwl3945_rx_queue_restock(struct iwl_priv *priv)
968{
969 struct iwl_rx_queue *rxq = &priv->rxq;
970 struct list_head *element;
971 struct iwl_rx_mem_buffer *rxb;
972 unsigned long flags;
973 int write;
974
975 spin_lock_irqsave(&rxq->lock, flags);
976 write = rxq->write & ~0x7;
977 while ((iwl_legacy_rx_queue_space(rxq) > 0) && (rxq->free_count)) {
978 /* Get next free Rx buffer, remove from free list */
979 element = rxq->rx_free.next;
980 rxb = list_entry(element, struct iwl_rx_mem_buffer, list);
981 list_del(element);
982
983 /* Point to Rx buffer via next RBD in circular buffer */
984 rxq->bd[rxq->write] = iwl3945_dma_addr2rbd_ptr(priv, rxb->page_dma);
985 rxq->queue[rxq->write] = rxb;
986 rxq->write = (rxq->write + 1) & RX_QUEUE_MASK;
987 rxq->free_count--;
988 }
989 spin_unlock_irqrestore(&rxq->lock, flags);
990 /* If the pre-allocated buffer pool is dropping low, schedule to
991 * refill it */
992 if (rxq->free_count <= RX_LOW_WATERMARK)
993 queue_work(priv->workqueue, &priv->rx_replenish);
994
995
996 /* If we've added more space for the firmware to place data, tell it.
997 * Increment device's write pointer in multiples of 8. */
998 if ((rxq->write_actual != (rxq->write & ~0x7))
999 || (abs(rxq->write - rxq->read) > 7)) {
1000 spin_lock_irqsave(&rxq->lock, flags);
1001 rxq->need_update = 1;
1002 spin_unlock_irqrestore(&rxq->lock, flags);
1003 iwl_legacy_rx_queue_update_write_ptr(priv, rxq);
1004 }
1005}
1006
1007/**
1008 * iwl3945_rx_replenish - Move all used packet from rx_used to rx_free
1009 *
1010 * When moving to rx_free an SKB is allocated for the slot.
1011 *
1012 * Also restock the Rx queue via iwl3945_rx_queue_restock.
1013 * This is called as a scheduled work item (except for during initialization)
1014 */
1015static void iwl3945_rx_allocate(struct iwl_priv *priv, gfp_t priority)
1016{
1017 struct iwl_rx_queue *rxq = &priv->rxq;
1018 struct list_head *element;
1019 struct iwl_rx_mem_buffer *rxb;
1020 struct page *page;
1021 unsigned long flags;
1022 gfp_t gfp_mask = priority;
1023
1024 while (1) {
1025 spin_lock_irqsave(&rxq->lock, flags);
1026
1027 if (list_empty(&rxq->rx_used)) {
1028 spin_unlock_irqrestore(&rxq->lock, flags);
1029 return;
1030 }
1031 spin_unlock_irqrestore(&rxq->lock, flags);
1032
1033 if (rxq->free_count > RX_LOW_WATERMARK)
1034 gfp_mask |= __GFP_NOWARN;
1035
1036 if (priv->hw_params.rx_page_order > 0)
1037 gfp_mask |= __GFP_COMP;
1038
1039 /* Alloc a new receive buffer */
1040 page = alloc_pages(gfp_mask, priv->hw_params.rx_page_order);
1041 if (!page) {
1042 if (net_ratelimit())
1043 IWL_DEBUG_INFO(priv, "Failed to allocate SKB buffer.\n");
1044 if ((rxq->free_count <= RX_LOW_WATERMARK) &&
1045 net_ratelimit())
1046 IWL_CRIT(priv, "Failed to allocate SKB buffer with %s. Only %u free buffers remaining.\n",
1047 priority == GFP_ATOMIC ? "GFP_ATOMIC" : "GFP_KERNEL",
1048 rxq->free_count);
1049 /* We don't reschedule replenish work here -- we will
1050 * call the restock method and if it still needs
1051 * more buffers it will schedule replenish */
1052 break;
1053 }
1054
1055 spin_lock_irqsave(&rxq->lock, flags);
1056 if (list_empty(&rxq->rx_used)) {
1057 spin_unlock_irqrestore(&rxq->lock, flags);
1058 __free_pages(page, priv->hw_params.rx_page_order);
1059 return;
1060 }
1061 element = rxq->rx_used.next;
1062 rxb = list_entry(element, struct iwl_rx_mem_buffer, list);
1063 list_del(element);
1064 spin_unlock_irqrestore(&rxq->lock, flags);
1065
1066 rxb->page = page;
1067 /* Get physical address of RB/SKB */
1068 rxb->page_dma = pci_map_page(priv->pci_dev, page, 0,
1069 PAGE_SIZE << priv->hw_params.rx_page_order,
1070 PCI_DMA_FROMDEVICE);
1071
1072 spin_lock_irqsave(&rxq->lock, flags);
1073
1074 list_add_tail(&rxb->list, &rxq->rx_free);
1075 rxq->free_count++;
1076 priv->alloc_rxb_page++;
1077
1078 spin_unlock_irqrestore(&rxq->lock, flags);
1079 }
1080}
1081
1082void iwl3945_rx_queue_reset(struct iwl_priv *priv, struct iwl_rx_queue *rxq)
1083{
1084 unsigned long flags;
1085 int i;
1086 spin_lock_irqsave(&rxq->lock, flags);
1087 INIT_LIST_HEAD(&rxq->rx_free);
1088 INIT_LIST_HEAD(&rxq->rx_used);
1089 /* Fill the rx_used queue with _all_ of the Rx buffers */
1090 for (i = 0; i < RX_FREE_BUFFERS + RX_QUEUE_SIZE; i++) {
1091 /* In the reset function, these buffers may have been allocated
1092 * to an SKB, so we need to unmap and free potential storage */
1093 if (rxq->pool[i].page != NULL) {
1094 pci_unmap_page(priv->pci_dev, rxq->pool[i].page_dma,
1095 PAGE_SIZE << priv->hw_params.rx_page_order,
1096 PCI_DMA_FROMDEVICE);
1097 __iwl_legacy_free_pages(priv, rxq->pool[i].page);
1098 rxq->pool[i].page = NULL;
1099 }
1100 list_add_tail(&rxq->pool[i].list, &rxq->rx_used);
1101 }
1102
1103 /* Set us so that we have processed and used all buffers, but have
1104 * not restocked the Rx queue with fresh buffers */
1105 rxq->read = rxq->write = 0;
1106 rxq->write_actual = 0;
1107 rxq->free_count = 0;
1108 spin_unlock_irqrestore(&rxq->lock, flags);
1109}
1110
1111void iwl3945_rx_replenish(void *data)
1112{
1113 struct iwl_priv *priv = data;
1114 unsigned long flags;
1115
1116 iwl3945_rx_allocate(priv, GFP_KERNEL);
1117
1118 spin_lock_irqsave(&priv->lock, flags);
1119 iwl3945_rx_queue_restock(priv);
1120 spin_unlock_irqrestore(&priv->lock, flags);
1121}
1122
1123static void iwl3945_rx_replenish_now(struct iwl_priv *priv)
1124{
1125 iwl3945_rx_allocate(priv, GFP_ATOMIC);
1126
1127 iwl3945_rx_queue_restock(priv);
1128}
1129
1130
1131/* Assumes that the skb field of the buffers in 'pool' is kept accurate.
1132 * If an SKB has been detached, the POOL needs to have its SKB set to NULL
1133 * This free routine walks the list of POOL entries and if SKB is set to
1134 * non NULL it is unmapped and freed
1135 */
1136static void iwl3945_rx_queue_free(struct iwl_priv *priv, struct iwl_rx_queue *rxq)
1137{
1138 int i;
1139 for (i = 0; i < RX_QUEUE_SIZE + RX_FREE_BUFFERS; i++) {
1140 if (rxq->pool[i].page != NULL) {
1141 pci_unmap_page(priv->pci_dev, rxq->pool[i].page_dma,
1142 PAGE_SIZE << priv->hw_params.rx_page_order,
1143 PCI_DMA_FROMDEVICE);
1144 __iwl_legacy_free_pages(priv, rxq->pool[i].page);
1145 rxq->pool[i].page = NULL;
1146 }
1147 }
1148
1149 dma_free_coherent(&priv->pci_dev->dev, 4 * RX_QUEUE_SIZE, rxq->bd,
1150 rxq->bd_dma);
1151 dma_free_coherent(&priv->pci_dev->dev, sizeof(struct iwl_rb_status),
1152 rxq->rb_stts, rxq->rb_stts_dma);
1153 rxq->bd = NULL;
1154 rxq->rb_stts = NULL;
1155}
1156
1157
1158/* Convert linear signal-to-noise ratio into dB */
1159static u8 ratio2dB[100] = {
1160/* 0 1 2 3 4 5 6 7 8 9 */
1161 0, 0, 6, 10, 12, 14, 16, 17, 18, 19, /* 00 - 09 */
1162 20, 21, 22, 22, 23, 23, 24, 25, 26, 26, /* 10 - 19 */
1163 26, 26, 26, 27, 27, 28, 28, 28, 29, 29, /* 20 - 29 */
1164 29, 30, 30, 30, 31, 31, 31, 31, 32, 32, /* 30 - 39 */
1165 32, 32, 32, 33, 33, 33, 33, 33, 34, 34, /* 40 - 49 */
1166 34, 34, 34, 34, 35, 35, 35, 35, 35, 35, /* 50 - 59 */
1167 36, 36, 36, 36, 36, 36, 36, 37, 37, 37, /* 60 - 69 */
1168 37, 37, 37, 37, 37, 38, 38, 38, 38, 38, /* 70 - 79 */
1169 38, 38, 38, 38, 38, 39, 39, 39, 39, 39, /* 80 - 89 */
1170 39, 39, 39, 39, 39, 40, 40, 40, 40, 40 /* 90 - 99 */
1171};
1172
1173/* Calculates a relative dB value from a ratio of linear
1174 * (i.e. not dB) signal levels.
1175 * Conversion assumes that levels are voltages (20*log), not powers (10*log). */
1176int iwl3945_calc_db_from_ratio(int sig_ratio)
1177{
1178 /* 1000:1 or higher just report as 60 dB */
1179 if (sig_ratio >= 1000)
1180 return 60;
1181
1182 /* 100:1 or higher, divide by 10 and use table,
1183 * add 20 dB to make up for divide by 10 */
1184 if (sig_ratio >= 100)
1185 return 20 + (int)ratio2dB[sig_ratio/10];
1186
1187 /* We shouldn't see this */
1188 if (sig_ratio < 1)
1189 return 0;
1190
1191 /* Use table for ratios 1:1 - 99:1 */
1192 return (int)ratio2dB[sig_ratio];
1193}
1194
1195/**
1196 * iwl3945_rx_handle - Main entry function for receiving responses from uCode
1197 *
1198 * Uses the priv->rx_handlers callback function array to invoke
1199 * the appropriate handlers, including command responses,
1200 * frame-received notifications, and other notifications.
1201 */
1202static void iwl3945_rx_handle(struct iwl_priv *priv)
1203{
1204 struct iwl_rx_mem_buffer *rxb;
1205 struct iwl_rx_packet *pkt;
1206 struct iwl_rx_queue *rxq = &priv->rxq;
1207 u32 r, i;
1208 int reclaim;
1209 unsigned long flags;
1210 u8 fill_rx = 0;
1211 u32 count = 8;
1212 int total_empty = 0;
1213
1214 /* uCode's read index (stored in shared DRAM) indicates the last Rx
1215 * buffer that the driver may process (last buffer filled by ucode). */
1216 r = le16_to_cpu(rxq->rb_stts->closed_rb_num) & 0x0FFF;
1217 i = rxq->read;
1218
1219 /* calculate total frames need to be restock after handling RX */
1220 total_empty = r - rxq->write_actual;
1221 if (total_empty < 0)
1222 total_empty += RX_QUEUE_SIZE;
1223
1224 if (total_empty > (RX_QUEUE_SIZE / 2))
1225 fill_rx = 1;
1226 /* Rx interrupt, but nothing sent from uCode */
1227 if (i == r)
1228 IWL_DEBUG_RX(priv, "r = %d, i = %d\n", r, i);
1229
1230 while (i != r) {
1231 int len;
1232
1233 rxb = rxq->queue[i];
1234
1235 /* If an RXB doesn't have a Rx queue slot associated with it,
1236 * then a bug has been introduced in the queue refilling
1237 * routines -- catch it here */
1238 BUG_ON(rxb == NULL);
1239
1240 rxq->queue[i] = NULL;
1241
1242 pci_unmap_page(priv->pci_dev, rxb->page_dma,
1243 PAGE_SIZE << priv->hw_params.rx_page_order,
1244 PCI_DMA_FROMDEVICE);
1245 pkt = rxb_addr(rxb);
1246
1247 len = le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK;
1248 len += sizeof(u32); /* account for status word */
1249 trace_iwlwifi_legacy_dev_rx(priv, pkt, len);
1250
1251 /* Reclaim a command buffer only if this packet is a response
1252 * to a (driver-originated) command.
1253 * If the packet (e.g. Rx frame) originated from uCode,
1254 * there is no command buffer to reclaim.
1255 * Ucode should set SEQ_RX_FRAME bit if ucode-originated,
1256 * but apparently a few don't get set; catch them here. */
1257 reclaim = !(pkt->hdr.sequence & SEQ_RX_FRAME) &&
1258 (pkt->hdr.cmd != STATISTICS_NOTIFICATION) &&
1259 (pkt->hdr.cmd != REPLY_TX);
1260
1261 /* Based on type of command response or notification,
1262 * handle those that need handling via function in
1263 * rx_handlers table. See iwl3945_setup_rx_handlers() */
1264 if (priv->rx_handlers[pkt->hdr.cmd]) {
1265 IWL_DEBUG_RX(priv, "r = %d, i = %d, %s, 0x%02x\n", r, i,
1266 iwl_legacy_get_cmd_string(pkt->hdr.cmd), pkt->hdr.cmd);
1267 priv->isr_stats.rx_handlers[pkt->hdr.cmd]++;
1268 priv->rx_handlers[pkt->hdr.cmd] (priv, rxb);
1269 } else {
1270 /* No handling needed */
1271 IWL_DEBUG_RX(priv,
1272 "r %d i %d No handler needed for %s, 0x%02x\n",
1273 r, i, iwl_legacy_get_cmd_string(pkt->hdr.cmd),
1274 pkt->hdr.cmd);
1275 }
1276
1277 /*
1278 * XXX: After here, we should always check rxb->page
1279 * against NULL before touching it or its virtual
1280 * memory (pkt). Because some rx_handler might have
1281 * already taken or freed the pages.
1282 */
1283
1284 if (reclaim) {
1285 /* Invoke any callbacks, transfer the buffer to caller,
1286 * and fire off the (possibly) blocking iwl_legacy_send_cmd()
1287 * as we reclaim the driver command queue */
1288 if (rxb->page)
1289 iwl_legacy_tx_cmd_complete(priv, rxb);
1290 else
1291 IWL_WARN(priv, "Claim null rxb?\n");
1292 }
1293
1294 /* Reuse the page if possible. For notification packets and
1295 * SKBs that fail to Rx correctly, add them back into the
1296 * rx_free list for reuse later. */
1297 spin_lock_irqsave(&rxq->lock, flags);
1298 if (rxb->page != NULL) {
1299 rxb->page_dma = pci_map_page(priv->pci_dev, rxb->page,
1300 0, PAGE_SIZE << priv->hw_params.rx_page_order,
1301 PCI_DMA_FROMDEVICE);
1302 list_add_tail(&rxb->list, &rxq->rx_free);
1303 rxq->free_count++;
1304 } else
1305 list_add_tail(&rxb->list, &rxq->rx_used);
1306
1307 spin_unlock_irqrestore(&rxq->lock, flags);
1308
1309 i = (i + 1) & RX_QUEUE_MASK;
1310 /* If there are a lot of unused frames,
1311 * restock the Rx queue so ucode won't assert. */
1312 if (fill_rx) {
1313 count++;
1314 if (count >= 8) {
1315 rxq->read = i;
1316 iwl3945_rx_replenish_now(priv);
1317 count = 0;
1318 }
1319 }
1320 }
1321
1322 /* Backtrack one entry */
1323 rxq->read = i;
1324 if (fill_rx)
1325 iwl3945_rx_replenish_now(priv);
1326 else
1327 iwl3945_rx_queue_restock(priv);
1328}
1329
1330/* call this function to flush any scheduled tasklet */
1331static inline void iwl3945_synchronize_irq(struct iwl_priv *priv)
1332{
1333 /* wait to make sure we flush pending tasklet*/
1334 synchronize_irq(priv->pci_dev->irq);
1335 tasklet_kill(&priv->irq_tasklet);
1336}
1337
1338static const char *iwl3945_desc_lookup(int i)
1339{
1340 switch (i) {
1341 case 1:
1342 return "FAIL";
1343 case 2:
1344 return "BAD_PARAM";
1345 case 3:
1346 return "BAD_CHECKSUM";
1347 case 4:
1348 return "NMI_INTERRUPT";
1349 case 5:
1350 return "SYSASSERT";
1351 case 6:
1352 return "FATAL_ERROR";
1353 }
1354
1355 return "UNKNOWN";
1356}
1357
1358#define ERROR_START_OFFSET (1 * sizeof(u32))
1359#define ERROR_ELEM_SIZE (7 * sizeof(u32))
1360
1361void iwl3945_dump_nic_error_log(struct iwl_priv *priv)
1362{
1363 u32 i;
1364 u32 desc, time, count, base, data1;
1365 u32 blink1, blink2, ilink1, ilink2;
1366
1367 base = le32_to_cpu(priv->card_alive.error_event_table_ptr);
1368
1369 if (!iwl3945_hw_valid_rtc_data_addr(base)) {
1370 IWL_ERR(priv, "Not valid error log pointer 0x%08X\n", base);
1371 return;
1372 }
1373
1374
1375 count = iwl_legacy_read_targ_mem(priv, base);
1376
1377 if (ERROR_START_OFFSET <= count * ERROR_ELEM_SIZE) {
1378 IWL_ERR(priv, "Start IWL Error Log Dump:\n");
1379 IWL_ERR(priv, "Status: 0x%08lX, count: %d\n",
1380 priv->status, count);
1381 }
1382
1383 IWL_ERR(priv, "Desc Time asrtPC blink2 "
1384 "ilink1 nmiPC Line\n");
1385 for (i = ERROR_START_OFFSET;
1386 i < (count * ERROR_ELEM_SIZE) + ERROR_START_OFFSET;
1387 i += ERROR_ELEM_SIZE) {
1388 desc = iwl_legacy_read_targ_mem(priv, base + i);
1389 time =
1390 iwl_legacy_read_targ_mem(priv, base + i + 1 * sizeof(u32));
1391 blink1 =
1392 iwl_legacy_read_targ_mem(priv, base + i + 2 * sizeof(u32));
1393 blink2 =
1394 iwl_legacy_read_targ_mem(priv, base + i + 3 * sizeof(u32));
1395 ilink1 =
1396 iwl_legacy_read_targ_mem(priv, base + i + 4 * sizeof(u32));
1397 ilink2 =
1398 iwl_legacy_read_targ_mem(priv, base + i + 5 * sizeof(u32));
1399 data1 =
1400 iwl_legacy_read_targ_mem(priv, base + i + 6 * sizeof(u32));
1401
1402 IWL_ERR(priv,
1403 "%-13s (0x%X) %010u 0x%05X 0x%05X 0x%05X 0x%05X %u\n\n",
1404 iwl3945_desc_lookup(desc), desc, time, blink1, blink2,
1405 ilink1, ilink2, data1);
1406 trace_iwlwifi_legacy_dev_ucode_error(priv, desc, time, data1, 0,
1407 0, blink1, blink2, ilink1, ilink2);
1408 }
1409}
1410
1411#define EVENT_START_OFFSET (6 * sizeof(u32))
1412
1413/**
1414 * iwl3945_print_event_log - Dump error event log to syslog
1415 *
1416 */
1417static int iwl3945_print_event_log(struct iwl_priv *priv, u32 start_idx,
1418 u32 num_events, u32 mode,
1419 int pos, char **buf, size_t bufsz)
1420{
1421 u32 i;
1422 u32 base; /* SRAM byte address of event log header */
1423 u32 event_size; /* 2 u32s, or 3 u32s if timestamp recorded */
1424 u32 ptr; /* SRAM byte address of log data */
1425 u32 ev, time, data; /* event log data */
1426 unsigned long reg_flags;
1427
1428 if (num_events == 0)
1429 return pos;
1430
1431 base = le32_to_cpu(priv->card_alive.log_event_table_ptr);
1432
1433 if (mode == 0)
1434 event_size = 2 * sizeof(u32);
1435 else
1436 event_size = 3 * sizeof(u32);
1437
1438 ptr = base + EVENT_START_OFFSET + (start_idx * event_size);
1439
1440 /* Make sure device is powered up for SRAM reads */
1441 spin_lock_irqsave(&priv->reg_lock, reg_flags);
1442 iwl_grab_nic_access(priv);
1443
1444 /* Set starting address; reads will auto-increment */
1445 _iwl_legacy_write_direct32(priv, HBUS_TARG_MEM_RADDR, ptr);
1446 rmb();
1447
1448 /* "time" is actually "data" for mode 0 (no timestamp).
1449 * place event id # at far right for easier visual parsing. */
1450 for (i = 0; i < num_events; i++) {
1451 ev = _iwl_legacy_read_direct32(priv, HBUS_TARG_MEM_RDAT);
1452 time = _iwl_legacy_read_direct32(priv, HBUS_TARG_MEM_RDAT);
1453 if (mode == 0) {
1454 /* data, ev */
1455 if (bufsz) {
1456 pos += scnprintf(*buf + pos, bufsz - pos,
1457 "0x%08x:%04u\n",
1458 time, ev);
1459 } else {
1460 IWL_ERR(priv, "0x%08x\t%04u\n", time, ev);
1461 trace_iwlwifi_legacy_dev_ucode_event(priv, 0,
1462 time, ev);
1463 }
1464 } else {
1465 data = _iwl_legacy_read_direct32(priv,
1466 HBUS_TARG_MEM_RDAT);
1467 if (bufsz) {
1468 pos += scnprintf(*buf + pos, bufsz - pos,
1469 "%010u:0x%08x:%04u\n",
1470 time, data, ev);
1471 } else {
1472 IWL_ERR(priv, "%010u\t0x%08x\t%04u\n",
1473 time, data, ev);
1474 trace_iwlwifi_legacy_dev_ucode_event(priv, time,
1475 data, ev);
1476 }
1477 }
1478 }
1479
1480 /* Allow device to power down */
1481 iwl_release_nic_access(priv);
1482 spin_unlock_irqrestore(&priv->reg_lock, reg_flags);
1483 return pos;
1484}
1485
1486/**
1487 * iwl3945_print_last_event_logs - Dump the newest # of event log to syslog
1488 */
1489static int iwl3945_print_last_event_logs(struct iwl_priv *priv, u32 capacity,
1490 u32 num_wraps, u32 next_entry,
1491 u32 size, u32 mode,
1492 int pos, char **buf, size_t bufsz)
1493{
1494 /*
1495 * display the newest DEFAULT_LOG_ENTRIES entries
1496 * i.e the entries just before the next ont that uCode would fill.
1497 */
1498 if (num_wraps) {
1499 if (next_entry < size) {
1500 pos = iwl3945_print_event_log(priv,
1501 capacity - (size - next_entry),
1502 size - next_entry, mode,
1503 pos, buf, bufsz);
1504 pos = iwl3945_print_event_log(priv, 0,
1505 next_entry, mode,
1506 pos, buf, bufsz);
1507 } else
1508 pos = iwl3945_print_event_log(priv, next_entry - size,
1509 size, mode,
1510 pos, buf, bufsz);
1511 } else {
1512 if (next_entry < size)
1513 pos = iwl3945_print_event_log(priv, 0,
1514 next_entry, mode,
1515 pos, buf, bufsz);
1516 else
1517 pos = iwl3945_print_event_log(priv, next_entry - size,
1518 size, mode,
1519 pos, buf, bufsz);
1520 }
1521 return pos;
1522}
1523
1524#define DEFAULT_IWL3945_DUMP_EVENT_LOG_ENTRIES (20)
1525
1526int iwl3945_dump_nic_event_log(struct iwl_priv *priv, bool full_log,
1527 char **buf, bool display)
1528{
1529 u32 base; /* SRAM byte address of event log header */
1530 u32 capacity; /* event log capacity in # entries */
1531 u32 mode; /* 0 - no timestamp, 1 - timestamp recorded */
1532 u32 num_wraps; /* # times uCode wrapped to top of log */
1533 u32 next_entry; /* index of next entry to be written by uCode */
1534 u32 size; /* # entries that we'll print */
1535 int pos = 0;
1536 size_t bufsz = 0;
1537
1538 base = le32_to_cpu(priv->card_alive.log_event_table_ptr);
1539 if (!iwl3945_hw_valid_rtc_data_addr(base)) {
1540 IWL_ERR(priv, "Invalid event log pointer 0x%08X\n", base);
1541 return -EINVAL;
1542 }
1543
1544 /* event log header */
1545 capacity = iwl_legacy_read_targ_mem(priv, base);
1546 mode = iwl_legacy_read_targ_mem(priv, base + (1 * sizeof(u32)));
1547 num_wraps = iwl_legacy_read_targ_mem(priv, base + (2 * sizeof(u32)));
1548 next_entry = iwl_legacy_read_targ_mem(priv, base + (3 * sizeof(u32)));
1549
1550 if (capacity > priv->cfg->base_params->max_event_log_size) {
1551 IWL_ERR(priv, "Log capacity %d is bogus, limit to %d entries\n",
1552 capacity, priv->cfg->base_params->max_event_log_size);
1553 capacity = priv->cfg->base_params->max_event_log_size;
1554 }
1555
1556 if (next_entry > priv->cfg->base_params->max_event_log_size) {
1557 IWL_ERR(priv, "Log write index %d is bogus, limit to %d\n",
1558 next_entry, priv->cfg->base_params->max_event_log_size);
1559 next_entry = priv->cfg->base_params->max_event_log_size;
1560 }
1561
1562 size = num_wraps ? capacity : next_entry;
1563
1564 /* bail out if nothing in log */
1565 if (size == 0) {
1566 IWL_ERR(priv, "Start IWL Event Log Dump: nothing in log\n");
1567 return pos;
1568 }
1569
1570#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
1571 if (!(iwl_legacy_get_debug_level(priv) & IWL_DL_FW_ERRORS) && !full_log)
1572 size = (size > DEFAULT_IWL3945_DUMP_EVENT_LOG_ENTRIES)
1573 ? DEFAULT_IWL3945_DUMP_EVENT_LOG_ENTRIES : size;
1574#else
1575 size = (size > DEFAULT_IWL3945_DUMP_EVENT_LOG_ENTRIES)
1576 ? DEFAULT_IWL3945_DUMP_EVENT_LOG_ENTRIES : size;
1577#endif
1578
1579 IWL_ERR(priv, "Start IWL Event Log Dump: display last %d count\n",
1580 size);
1581
1582#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
1583 if (display) {
1584 if (full_log)
1585 bufsz = capacity * 48;
1586 else
1587 bufsz = size * 48;
1588 *buf = kmalloc(bufsz, GFP_KERNEL);
1589 if (!*buf)
1590 return -ENOMEM;
1591 }
1592 if ((iwl_legacy_get_debug_level(priv) & IWL_DL_FW_ERRORS) || full_log) {
1593 /* if uCode has wrapped back to top of log,
1594 * start at the oldest entry,
1595 * i.e the next one that uCode would fill.
1596 */
1597 if (num_wraps)
1598 pos = iwl3945_print_event_log(priv, next_entry,
1599 capacity - next_entry, mode,
1600 pos, buf, bufsz);
1601
1602 /* (then/else) start at top of log */
1603 pos = iwl3945_print_event_log(priv, 0, next_entry, mode,
1604 pos, buf, bufsz);
1605 } else
1606 pos = iwl3945_print_last_event_logs(priv, capacity, num_wraps,
1607 next_entry, size, mode,
1608 pos, buf, bufsz);
1609#else
1610 pos = iwl3945_print_last_event_logs(priv, capacity, num_wraps,
1611 next_entry, size, mode,
1612 pos, buf, bufsz);
1613#endif
1614 return pos;
1615}
1616
1617static void iwl3945_irq_tasklet(struct iwl_priv *priv)
1618{
1619 u32 inta, handled = 0;
1620 u32 inta_fh;
1621 unsigned long flags;
1622#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
1623 u32 inta_mask;
1624#endif
1625
1626 spin_lock_irqsave(&priv->lock, flags);
1627
1628 /* Ack/clear/reset pending uCode interrupts.
1629 * Note: Some bits in CSR_INT are "OR" of bits in CSR_FH_INT_STATUS,
1630 * and will clear only when CSR_FH_INT_STATUS gets cleared. */
1631 inta = iwl_read32(priv, CSR_INT);
1632 iwl_write32(priv, CSR_INT, inta);
1633
1634 /* Ack/clear/reset pending flow-handler (DMA) interrupts.
1635 * Any new interrupts that happen after this, either while we're
1636 * in this tasklet, or later, will show up in next ISR/tasklet. */
1637 inta_fh = iwl_read32(priv, CSR_FH_INT_STATUS);
1638 iwl_write32(priv, CSR_FH_INT_STATUS, inta_fh);
1639
1640#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
1641 if (iwl_legacy_get_debug_level(priv) & IWL_DL_ISR) {
1642 /* just for debug */
1643 inta_mask = iwl_read32(priv, CSR_INT_MASK);
1644 IWL_DEBUG_ISR(priv, "inta 0x%08x, enabled 0x%08x, fh 0x%08x\n",
1645 inta, inta_mask, inta_fh);
1646 }
1647#endif
1648
1649 spin_unlock_irqrestore(&priv->lock, flags);
1650
1651 /* Since CSR_INT and CSR_FH_INT_STATUS reads and clears are not
1652 * atomic, make sure that inta covers all the interrupts that
1653 * we've discovered, even if FH interrupt came in just after
1654 * reading CSR_INT. */
1655 if (inta_fh & CSR39_FH_INT_RX_MASK)
1656 inta |= CSR_INT_BIT_FH_RX;
1657 if (inta_fh & CSR39_FH_INT_TX_MASK)
1658 inta |= CSR_INT_BIT_FH_TX;
1659
1660 /* Now service all interrupt bits discovered above. */
1661 if (inta & CSR_INT_BIT_HW_ERR) {
1662 IWL_ERR(priv, "Hardware error detected. Restarting.\n");
1663
1664 /* Tell the device to stop sending interrupts */
1665 iwl_legacy_disable_interrupts(priv);
1666
1667 priv->isr_stats.hw++;
1668 iwl_legacy_irq_handle_error(priv);
1669
1670 handled |= CSR_INT_BIT_HW_ERR;
1671
1672 return;
1673 }
1674
1675#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
1676 if (iwl_legacy_get_debug_level(priv) & (IWL_DL_ISR)) {
1677 /* NIC fires this, but we don't use it, redundant with WAKEUP */
1678 if (inta & CSR_INT_BIT_SCD) {
1679 IWL_DEBUG_ISR(priv, "Scheduler finished to transmit "
1680 "the frame/frames.\n");
1681 priv->isr_stats.sch++;
1682 }
1683
1684 /* Alive notification via Rx interrupt will do the real work */
1685 if (inta & CSR_INT_BIT_ALIVE) {
1686 IWL_DEBUG_ISR(priv, "Alive interrupt\n");
1687 priv->isr_stats.alive++;
1688 }
1689 }
1690#endif
1691 /* Safely ignore these bits for debug checks below */
1692 inta &= ~(CSR_INT_BIT_SCD | CSR_INT_BIT_ALIVE);
1693
1694 /* Error detected by uCode */
1695 if (inta & CSR_INT_BIT_SW_ERR) {
1696 IWL_ERR(priv, "Microcode SW error detected. "
1697 "Restarting 0x%X.\n", inta);
1698 priv->isr_stats.sw++;
1699 iwl_legacy_irq_handle_error(priv);
1700 handled |= CSR_INT_BIT_SW_ERR;
1701 }
1702
1703 /* uCode wakes up after power-down sleep */
1704 if (inta & CSR_INT_BIT_WAKEUP) {
1705 IWL_DEBUG_ISR(priv, "Wakeup interrupt\n");
1706 iwl_legacy_rx_queue_update_write_ptr(priv, &priv->rxq);
1707 iwl_legacy_txq_update_write_ptr(priv, &priv->txq[0]);
1708 iwl_legacy_txq_update_write_ptr(priv, &priv->txq[1]);
1709 iwl_legacy_txq_update_write_ptr(priv, &priv->txq[2]);
1710 iwl_legacy_txq_update_write_ptr(priv, &priv->txq[3]);
1711 iwl_legacy_txq_update_write_ptr(priv, &priv->txq[4]);
1712 iwl_legacy_txq_update_write_ptr(priv, &priv->txq[5]);
1713
1714 priv->isr_stats.wakeup++;
1715 handled |= CSR_INT_BIT_WAKEUP;
1716 }
1717
1718 /* All uCode command responses, including Tx command responses,
1719 * Rx "responses" (frame-received notification), and other
1720 * notifications from uCode come through here*/
1721 if (inta & (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX)) {
1722 iwl3945_rx_handle(priv);
1723 priv->isr_stats.rx++;
1724 handled |= (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX);
1725 }
1726
1727 if (inta & CSR_INT_BIT_FH_TX) {
1728 IWL_DEBUG_ISR(priv, "Tx interrupt\n");
1729 priv->isr_stats.tx++;
1730
1731 iwl_write32(priv, CSR_FH_INT_STATUS, (1 << 6));
1732 iwl_legacy_write_direct32(priv, FH39_TCSR_CREDIT
1733 (FH39_SRVC_CHNL), 0x0);
1734 handled |= CSR_INT_BIT_FH_TX;
1735 }
1736
1737 if (inta & ~handled) {
1738 IWL_ERR(priv, "Unhandled INTA bits 0x%08x\n", inta & ~handled);
1739 priv->isr_stats.unhandled++;
1740 }
1741
1742 if (inta & ~priv->inta_mask) {
1743 IWL_WARN(priv, "Disabled INTA bits 0x%08x were pending\n",
1744 inta & ~priv->inta_mask);
1745 IWL_WARN(priv, " with FH_INT = 0x%08x\n", inta_fh);
1746 }
1747
1748 /* Re-enable all interrupts */
1749 /* only Re-enable if disabled by irq */
1750 if (test_bit(STATUS_INT_ENABLED, &priv->status))
1751 iwl_legacy_enable_interrupts(priv);
1752
1753#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
1754 if (iwl_legacy_get_debug_level(priv) & (IWL_DL_ISR)) {
1755 inta = iwl_read32(priv, CSR_INT);
1756 inta_mask = iwl_read32(priv, CSR_INT_MASK);
1757 inta_fh = iwl_read32(priv, CSR_FH_INT_STATUS);
1758 IWL_DEBUG_ISR(priv, "End inta 0x%08x, enabled 0x%08x, fh 0x%08x, "
1759 "flags 0x%08lx\n", inta, inta_mask, inta_fh, flags);
1760 }
1761#endif
1762}
1763
1764static int iwl3945_get_single_channel_for_scan(struct iwl_priv *priv,
1765 struct ieee80211_vif *vif,
1766 enum ieee80211_band band,
1767 struct iwl3945_scan_channel *scan_ch)
1768{
1769 const struct ieee80211_supported_band *sband;
1770 u16 passive_dwell = 0;
1771 u16 active_dwell = 0;
1772 int added = 0;
1773 u8 channel = 0;
1774
1775 sband = iwl_get_hw_mode(priv, band);
1776 if (!sband) {
1777 IWL_ERR(priv, "invalid band\n");
1778 return added;
1779 }
1780
1781 active_dwell = iwl_legacy_get_active_dwell_time(priv, band, 0);
1782 passive_dwell = iwl_legacy_get_passive_dwell_time(priv, band, vif);
1783
1784 if (passive_dwell <= active_dwell)
1785 passive_dwell = active_dwell + 1;
1786
1787
1788 channel = iwl_legacy_get_single_channel_number(priv, band);
1789
1790 if (channel) {
1791 scan_ch->channel = channel;
1792 scan_ch->type = 0; /* passive */
1793 scan_ch->active_dwell = cpu_to_le16(active_dwell);
1794 scan_ch->passive_dwell = cpu_to_le16(passive_dwell);
1795 /* Set txpower levels to defaults */
1796 scan_ch->tpc.dsp_atten = 110;
1797 if (band == IEEE80211_BAND_5GHZ)
1798 scan_ch->tpc.tx_gain = ((1 << 5) | (3 << 3)) | 3;
1799 else
1800 scan_ch->tpc.tx_gain = ((1 << 5) | (5 << 3));
1801 added++;
1802 } else
1803 IWL_ERR(priv, "no valid channel found\n");
1804 return added;
1805}
1806
1807static int iwl3945_get_channels_for_scan(struct iwl_priv *priv,
1808 enum ieee80211_band band,
1809 u8 is_active, u8 n_probes,
1810 struct iwl3945_scan_channel *scan_ch,
1811 struct ieee80211_vif *vif)
1812{
1813 struct ieee80211_channel *chan;
1814 const struct ieee80211_supported_band *sband;
1815 const struct iwl_channel_info *ch_info;
1816 u16 passive_dwell = 0;
1817 u16 active_dwell = 0;
1818 int added, i;
1819
1820 sband = iwl_get_hw_mode(priv, band);
1821 if (!sband)
1822 return 0;
1823
1824 active_dwell = iwl_legacy_get_active_dwell_time(priv, band, n_probes);
1825 passive_dwell = iwl_legacy_get_passive_dwell_time(priv, band, vif);
1826
1827 if (passive_dwell <= active_dwell)
1828 passive_dwell = active_dwell + 1;
1829
1830 for (i = 0, added = 0; i < priv->scan_request->n_channels; i++) {
1831 chan = priv->scan_request->channels[i];
1832
1833 if (chan->band != band)
1834 continue;
1835
1836 scan_ch->channel = chan->hw_value;
1837
1838 ch_info = iwl_legacy_get_channel_info(priv, band,
1839 scan_ch->channel);
1840 if (!iwl_legacy_is_channel_valid(ch_info)) {
1841 IWL_DEBUG_SCAN(priv,
1842 "Channel %d is INVALID for this band.\n",
1843 scan_ch->channel);
1844 continue;
1845 }
1846
1847 scan_ch->active_dwell = cpu_to_le16(active_dwell);
1848 scan_ch->passive_dwell = cpu_to_le16(passive_dwell);
1849 /* If passive , set up for auto-switch
1850 * and use long active_dwell time.
1851 */
1852 if (!is_active || iwl_legacy_is_channel_passive(ch_info) ||
1853 (chan->flags & IEEE80211_CHAN_PASSIVE_SCAN)) {
1854 scan_ch->type = 0; /* passive */
1855 if (IWL_UCODE_API(priv->ucode_ver) == 1)
1856 scan_ch->active_dwell = cpu_to_le16(passive_dwell - 1);
1857 } else {
1858 scan_ch->type = 1; /* active */
1859 }
1860
1861 /* Set direct probe bits. These may be used both for active
1862 * scan channels (probes gets sent right away),
1863 * or for passive channels (probes get se sent only after
1864 * hearing clear Rx packet).*/
1865 if (IWL_UCODE_API(priv->ucode_ver) >= 2) {
1866 if (n_probes)
1867 scan_ch->type |= IWL39_SCAN_PROBE_MASK(n_probes);
1868 } else {
1869 /* uCode v1 does not allow setting direct probe bits on
1870 * passive channel. */
1871 if ((scan_ch->type & 1) && n_probes)
1872 scan_ch->type |= IWL39_SCAN_PROBE_MASK(n_probes);
1873 }
1874
1875 /* Set txpower levels to defaults */
1876 scan_ch->tpc.dsp_atten = 110;
1877 /* scan_pwr_info->tpc.dsp_atten; */
1878
1879 /*scan_pwr_info->tpc.tx_gain; */
1880 if (band == IEEE80211_BAND_5GHZ)
1881 scan_ch->tpc.tx_gain = ((1 << 5) | (3 << 3)) | 3;
1882 else {
1883 scan_ch->tpc.tx_gain = ((1 << 5) | (5 << 3));
1884 /* NOTE: if we were doing 6Mb OFDM for scans we'd use
1885 * power level:
1886 * scan_ch->tpc.tx_gain = ((1 << 5) | (2 << 3)) | 3;
1887 */
1888 }
1889
1890 IWL_DEBUG_SCAN(priv, "Scanning %d [%s %d]\n",
1891 scan_ch->channel,
1892 (scan_ch->type & 1) ? "ACTIVE" : "PASSIVE",
1893 (scan_ch->type & 1) ?
1894 active_dwell : passive_dwell);
1895
1896 scan_ch++;
1897 added++;
1898 }
1899
1900 IWL_DEBUG_SCAN(priv, "total channels to scan %d\n", added);
1901 return added;
1902}
1903
1904static void iwl3945_init_hw_rates(struct iwl_priv *priv,
1905 struct ieee80211_rate *rates)
1906{
1907 int i;
1908
1909 for (i = 0; i < IWL_RATE_COUNT_LEGACY; i++) {
1910 rates[i].bitrate = iwl3945_rates[i].ieee * 5;
1911 rates[i].hw_value = i; /* Rate scaling will work on indexes */
1912 rates[i].hw_value_short = i;
1913 rates[i].flags = 0;
1914 if ((i > IWL39_LAST_OFDM_RATE) || (i < IWL_FIRST_OFDM_RATE)) {
1915 /*
1916 * If CCK != 1M then set short preamble rate flag.
1917 */
1918 rates[i].flags |= (iwl3945_rates[i].plcp == 10) ?
1919 0 : IEEE80211_RATE_SHORT_PREAMBLE;
1920 }
1921 }
1922}
1923
1924/******************************************************************************
1925 *
1926 * uCode download functions
1927 *
1928 ******************************************************************************/
1929
1930static void iwl3945_dealloc_ucode_pci(struct iwl_priv *priv)
1931{
1932 iwl_legacy_free_fw_desc(priv->pci_dev, &priv->ucode_code);
1933 iwl_legacy_free_fw_desc(priv->pci_dev, &priv->ucode_data);
1934 iwl_legacy_free_fw_desc(priv->pci_dev, &priv->ucode_data_backup);
1935 iwl_legacy_free_fw_desc(priv->pci_dev, &priv->ucode_init);
1936 iwl_legacy_free_fw_desc(priv->pci_dev, &priv->ucode_init_data);
1937 iwl_legacy_free_fw_desc(priv->pci_dev, &priv->ucode_boot);
1938}
1939
1940/**
1941 * iwl3945_verify_inst_full - verify runtime uCode image in card vs. host,
1942 * looking at all data.
1943 */
1944static int iwl3945_verify_inst_full(struct iwl_priv *priv, __le32 *image, u32 len)
1945{
1946 u32 val;
1947 u32 save_len = len;
1948 int rc = 0;
1949 u32 errcnt;
1950
1951 IWL_DEBUG_INFO(priv, "ucode inst image size is %u\n", len);
1952
1953 iwl_legacy_write_direct32(priv, HBUS_TARG_MEM_RADDR,
1954 IWL39_RTC_INST_LOWER_BOUND);
1955
1956 errcnt = 0;
1957 for (; len > 0; len -= sizeof(u32), image++) {
1958 /* read data comes through single port, auto-incr addr */
1959 /* NOTE: Use the debugless read so we don't flood kernel log
1960 * if IWL_DL_IO is set */
1961 val = _iwl_legacy_read_direct32(priv, HBUS_TARG_MEM_RDAT);
1962 if (val != le32_to_cpu(*image)) {
1963 IWL_ERR(priv, "uCode INST section is invalid at "
1964 "offset 0x%x, is 0x%x, s/b 0x%x\n",
1965 save_len - len, val, le32_to_cpu(*image));
1966 rc = -EIO;
1967 errcnt++;
1968 if (errcnt >= 20)
1969 break;
1970 }
1971 }
1972
1973
1974 if (!errcnt)
1975 IWL_DEBUG_INFO(priv,
1976 "ucode image in INSTRUCTION memory is good\n");
1977
1978 return rc;
1979}
1980
1981
1982/**
1983 * iwl3945_verify_inst_sparse - verify runtime uCode image in card vs. host,
1984 * using sample data 100 bytes apart. If these sample points are good,
1985 * it's a pretty good bet that everything between them is good, too.
1986 */
1987static int iwl3945_verify_inst_sparse(struct iwl_priv *priv, __le32 *image, u32 len)
1988{
1989 u32 val;
1990 int rc = 0;
1991 u32 errcnt = 0;
1992 u32 i;
1993
1994 IWL_DEBUG_INFO(priv, "ucode inst image size is %u\n", len);
1995
1996 for (i = 0; i < len; i += 100, image += 100/sizeof(u32)) {
1997 /* read data comes through single port, auto-incr addr */
1998 /* NOTE: Use the debugless read so we don't flood kernel log
1999 * if IWL_DL_IO is set */
2000 iwl_legacy_write_direct32(priv, HBUS_TARG_MEM_RADDR,
2001 i + IWL39_RTC_INST_LOWER_BOUND);
2002 val = _iwl_legacy_read_direct32(priv, HBUS_TARG_MEM_RDAT);
2003 if (val != le32_to_cpu(*image)) {
2004#if 0 /* Enable this if you want to see details */
2005 IWL_ERR(priv, "uCode INST section is invalid at "
2006 "offset 0x%x, is 0x%x, s/b 0x%x\n",
2007 i, val, *image);
2008#endif
2009 rc = -EIO;
2010 errcnt++;
2011 if (errcnt >= 3)
2012 break;
2013 }
2014 }
2015
2016 return rc;
2017}
2018
2019
2020/**
2021 * iwl3945_verify_ucode - determine which instruction image is in SRAM,
2022 * and verify its contents
2023 */
2024static int iwl3945_verify_ucode(struct iwl_priv *priv)
2025{
2026 __le32 *image;
2027 u32 len;
2028 int rc = 0;
2029
2030 /* Try bootstrap */
2031 image = (__le32 *)priv->ucode_boot.v_addr;
2032 len = priv->ucode_boot.len;
2033 rc = iwl3945_verify_inst_sparse(priv, image, len);
2034 if (rc == 0) {
2035 IWL_DEBUG_INFO(priv, "Bootstrap uCode is good in inst SRAM\n");
2036 return 0;
2037 }
2038
2039 /* Try initialize */
2040 image = (__le32 *)priv->ucode_init.v_addr;
2041 len = priv->ucode_init.len;
2042 rc = iwl3945_verify_inst_sparse(priv, image, len);
2043 if (rc == 0) {
2044 IWL_DEBUG_INFO(priv, "Initialize uCode is good in inst SRAM\n");
2045 return 0;
2046 }
2047
2048 /* Try runtime/protocol */
2049 image = (__le32 *)priv->ucode_code.v_addr;
2050 len = priv->ucode_code.len;
2051 rc = iwl3945_verify_inst_sparse(priv, image, len);
2052 if (rc == 0) {
2053 IWL_DEBUG_INFO(priv, "Runtime uCode is good in inst SRAM\n");
2054 return 0;
2055 }
2056
2057 IWL_ERR(priv, "NO VALID UCODE IMAGE IN INSTRUCTION SRAM!!\n");
2058
2059 /* Since nothing seems to match, show first several data entries in
2060 * instruction SRAM, so maybe visual inspection will give a clue.
2061 * Selection of bootstrap image (vs. other images) is arbitrary. */
2062 image = (__le32 *)priv->ucode_boot.v_addr;
2063 len = priv->ucode_boot.len;
2064 rc = iwl3945_verify_inst_full(priv, image, len);
2065
2066 return rc;
2067}
2068
2069static void iwl3945_nic_start(struct iwl_priv *priv)
2070{
2071 /* Remove all resets to allow NIC to operate */
2072 iwl_write32(priv, CSR_RESET, 0);
2073}
2074
2075#define IWL3945_UCODE_GET(item) \
2076static u32 iwl3945_ucode_get_##item(const struct iwl_ucode_header *ucode)\
2077{ \
2078 return le32_to_cpu(ucode->v1.item); \
2079}
2080
2081static u32 iwl3945_ucode_get_header_size(u32 api_ver)
2082{
2083 return 24;
2084}
2085
2086static u8 *iwl3945_ucode_get_data(const struct iwl_ucode_header *ucode)
2087{
2088 return (u8 *) ucode->v1.data;
2089}
2090
2091IWL3945_UCODE_GET(inst_size);
2092IWL3945_UCODE_GET(data_size);
2093IWL3945_UCODE_GET(init_size);
2094IWL3945_UCODE_GET(init_data_size);
2095IWL3945_UCODE_GET(boot_size);
2096
2097/**
2098 * iwl3945_read_ucode - Read uCode images from disk file.
2099 *
2100 * Copy into buffers for card to fetch via bus-mastering
2101 */
2102static int iwl3945_read_ucode(struct iwl_priv *priv)
2103{
2104 const struct iwl_ucode_header *ucode;
2105 int ret = -EINVAL, index;
2106 const struct firmware *ucode_raw;
2107 /* firmware file name contains uCode/driver compatibility version */
2108 const char *name_pre = priv->cfg->fw_name_pre;
2109 const unsigned int api_max = priv->cfg->ucode_api_max;
2110 const unsigned int api_min = priv->cfg->ucode_api_min;
2111 char buf[25];
2112 u8 *src;
2113 size_t len;
2114 u32 api_ver, inst_size, data_size, init_size, init_data_size, boot_size;
2115
2116 /* Ask kernel firmware_class module to get the boot firmware off disk.
2117 * request_firmware() is synchronous, file is in memory on return. */
2118 for (index = api_max; index >= api_min; index--) {
2119 sprintf(buf, "%s%u%s", name_pre, index, ".ucode");
2120 ret = request_firmware(&ucode_raw, buf, &priv->pci_dev->dev);
2121 if (ret < 0) {
2122 IWL_ERR(priv, "%s firmware file req failed: %d\n",
2123 buf, ret);
2124 if (ret == -ENOENT)
2125 continue;
2126 else
2127 goto error;
2128 } else {
2129 if (index < api_max)
2130 IWL_ERR(priv, "Loaded firmware %s, "
2131 "which is deprecated. "
2132 " Please use API v%u instead.\n",
2133 buf, api_max);
2134 IWL_DEBUG_INFO(priv, "Got firmware '%s' file "
2135 "(%zd bytes) from disk\n",
2136 buf, ucode_raw->size);
2137 break;
2138 }
2139 }
2140
2141 if (ret < 0)
2142 goto error;
2143
2144 /* Make sure that we got at least our header! */
2145 if (ucode_raw->size < iwl3945_ucode_get_header_size(1)) {
2146 IWL_ERR(priv, "File size way too small!\n");
2147 ret = -EINVAL;
2148 goto err_release;
2149 }
2150
2151 /* Data from ucode file: header followed by uCode images */
2152 ucode = (struct iwl_ucode_header *)ucode_raw->data;
2153
2154 priv->ucode_ver = le32_to_cpu(ucode->ver);
2155 api_ver = IWL_UCODE_API(priv->ucode_ver);
2156 inst_size = iwl3945_ucode_get_inst_size(ucode);
2157 data_size = iwl3945_ucode_get_data_size(ucode);
2158 init_size = iwl3945_ucode_get_init_size(ucode);
2159 init_data_size = iwl3945_ucode_get_init_data_size(ucode);
2160 boot_size = iwl3945_ucode_get_boot_size(ucode);
2161 src = iwl3945_ucode_get_data(ucode);
2162
2163 /* api_ver should match the api version forming part of the
2164 * firmware filename ... but we don't check for that and only rely
2165 * on the API version read from firmware header from here on forward */
2166
2167 if (api_ver < api_min || api_ver > api_max) {
2168 IWL_ERR(priv, "Driver unable to support your firmware API. "
2169 "Driver supports v%u, firmware is v%u.\n",
2170 api_max, api_ver);
2171 priv->ucode_ver = 0;
2172 ret = -EINVAL;
2173 goto err_release;
2174 }
2175 if (api_ver != api_max)
2176 IWL_ERR(priv, "Firmware has old API version. Expected %u, "
2177 "got %u. New firmware can be obtained "
2178 "from http://www.intellinuxwireless.org.\n",
2179 api_max, api_ver);
2180
2181 IWL_INFO(priv, "loaded firmware version %u.%u.%u.%u\n",
2182 IWL_UCODE_MAJOR(priv->ucode_ver),
2183 IWL_UCODE_MINOR(priv->ucode_ver),
2184 IWL_UCODE_API(priv->ucode_ver),
2185 IWL_UCODE_SERIAL(priv->ucode_ver));
2186
2187 snprintf(priv->hw->wiphy->fw_version,
2188 sizeof(priv->hw->wiphy->fw_version),
2189 "%u.%u.%u.%u",
2190 IWL_UCODE_MAJOR(priv->ucode_ver),
2191 IWL_UCODE_MINOR(priv->ucode_ver),
2192 IWL_UCODE_API(priv->ucode_ver),
2193 IWL_UCODE_SERIAL(priv->ucode_ver));
2194
2195 IWL_DEBUG_INFO(priv, "f/w package hdr ucode version raw = 0x%x\n",
2196 priv->ucode_ver);
2197 IWL_DEBUG_INFO(priv, "f/w package hdr runtime inst size = %u\n",
2198 inst_size);
2199 IWL_DEBUG_INFO(priv, "f/w package hdr runtime data size = %u\n",
2200 data_size);
2201 IWL_DEBUG_INFO(priv, "f/w package hdr init inst size = %u\n",
2202 init_size);
2203 IWL_DEBUG_INFO(priv, "f/w package hdr init data size = %u\n",
2204 init_data_size);
2205 IWL_DEBUG_INFO(priv, "f/w package hdr boot inst size = %u\n",
2206 boot_size);
2207
2208
2209 /* Verify size of file vs. image size info in file's header */
2210 if (ucode_raw->size != iwl3945_ucode_get_header_size(api_ver) +
2211 inst_size + data_size + init_size +
2212 init_data_size + boot_size) {
2213
2214 IWL_DEBUG_INFO(priv,
2215 "uCode file size %zd does not match expected size\n",
2216 ucode_raw->size);
2217 ret = -EINVAL;
2218 goto err_release;
2219 }
2220
2221 /* Verify that uCode images will fit in card's SRAM */
2222 if (inst_size > IWL39_MAX_INST_SIZE) {
2223 IWL_DEBUG_INFO(priv, "uCode instr len %d too large to fit in\n",
2224 inst_size);
2225 ret = -EINVAL;
2226 goto err_release;
2227 }
2228
2229 if (data_size > IWL39_MAX_DATA_SIZE) {
2230 IWL_DEBUG_INFO(priv, "uCode data len %d too large to fit in\n",
2231 data_size);
2232 ret = -EINVAL;
2233 goto err_release;
2234 }
2235 if (init_size > IWL39_MAX_INST_SIZE) {
2236 IWL_DEBUG_INFO(priv,
2237 "uCode init instr len %d too large to fit in\n",
2238 init_size);
2239 ret = -EINVAL;
2240 goto err_release;
2241 }
2242 if (init_data_size > IWL39_MAX_DATA_SIZE) {
2243 IWL_DEBUG_INFO(priv,
2244 "uCode init data len %d too large to fit in\n",
2245 init_data_size);
2246 ret = -EINVAL;
2247 goto err_release;
2248 }
2249 if (boot_size > IWL39_MAX_BSM_SIZE) {
2250 IWL_DEBUG_INFO(priv,
2251 "uCode boot instr len %d too large to fit in\n",
2252 boot_size);
2253 ret = -EINVAL;
2254 goto err_release;
2255 }
2256
2257 /* Allocate ucode buffers for card's bus-master loading ... */
2258
2259 /* Runtime instructions and 2 copies of data:
2260 * 1) unmodified from disk
2261 * 2) backup cache for save/restore during power-downs */
2262 priv->ucode_code.len = inst_size;
2263 iwl_legacy_alloc_fw_desc(priv->pci_dev, &priv->ucode_code);
2264
2265 priv->ucode_data.len = data_size;
2266 iwl_legacy_alloc_fw_desc(priv->pci_dev, &priv->ucode_data);
2267
2268 priv->ucode_data_backup.len = data_size;
2269 iwl_legacy_alloc_fw_desc(priv->pci_dev, &priv->ucode_data_backup);
2270
2271 if (!priv->ucode_code.v_addr || !priv->ucode_data.v_addr ||
2272 !priv->ucode_data_backup.v_addr)
2273 goto err_pci_alloc;
2274
2275 /* Initialization instructions and data */
2276 if (init_size && init_data_size) {
2277 priv->ucode_init.len = init_size;
2278 iwl_legacy_alloc_fw_desc(priv->pci_dev, &priv->ucode_init);
2279
2280 priv->ucode_init_data.len = init_data_size;
2281 iwl_legacy_alloc_fw_desc(priv->pci_dev, &priv->ucode_init_data);
2282
2283 if (!priv->ucode_init.v_addr || !priv->ucode_init_data.v_addr)
2284 goto err_pci_alloc;
2285 }
2286
2287 /* Bootstrap (instructions only, no data) */
2288 if (boot_size) {
2289 priv->ucode_boot.len = boot_size;
2290 iwl_legacy_alloc_fw_desc(priv->pci_dev, &priv->ucode_boot);
2291
2292 if (!priv->ucode_boot.v_addr)
2293 goto err_pci_alloc;
2294 }
2295
2296 /* Copy images into buffers for card's bus-master reads ... */
2297
2298 /* Runtime instructions (first block of data in file) */
2299 len = inst_size;
2300 IWL_DEBUG_INFO(priv,
2301 "Copying (but not loading) uCode instr len %zd\n", len);
2302 memcpy(priv->ucode_code.v_addr, src, len);
2303 src += len;
2304
2305 IWL_DEBUG_INFO(priv, "uCode instr buf vaddr = 0x%p, paddr = 0x%08x\n",
2306 priv->ucode_code.v_addr, (u32)priv->ucode_code.p_addr);
2307
2308 /* Runtime data (2nd block)
2309 * NOTE: Copy into backup buffer will be done in iwl3945_up() */
2310 len = data_size;
2311 IWL_DEBUG_INFO(priv,
2312 "Copying (but not loading) uCode data len %zd\n", len);
2313 memcpy(priv->ucode_data.v_addr, src, len);
2314 memcpy(priv->ucode_data_backup.v_addr, src, len);
2315 src += len;
2316
2317 /* Initialization instructions (3rd block) */
2318 if (init_size) {
2319 len = init_size;
2320 IWL_DEBUG_INFO(priv,
2321 "Copying (but not loading) init instr len %zd\n", len);
2322 memcpy(priv->ucode_init.v_addr, src, len);
2323 src += len;
2324 }
2325
2326 /* Initialization data (4th block) */
2327 if (init_data_size) {
2328 len = init_data_size;
2329 IWL_DEBUG_INFO(priv,
2330 "Copying (but not loading) init data len %zd\n", len);
2331 memcpy(priv->ucode_init_data.v_addr, src, len);
2332 src += len;
2333 }
2334
2335 /* Bootstrap instructions (5th block) */
2336 len = boot_size;
2337 IWL_DEBUG_INFO(priv,
2338 "Copying (but not loading) boot instr len %zd\n", len);
2339 memcpy(priv->ucode_boot.v_addr, src, len);
2340
2341 /* We have our copies now, allow OS release its copies */
2342 release_firmware(ucode_raw);
2343 return 0;
2344
2345 err_pci_alloc:
2346 IWL_ERR(priv, "failed to allocate pci memory\n");
2347 ret = -ENOMEM;
2348 iwl3945_dealloc_ucode_pci(priv);
2349
2350 err_release:
2351 release_firmware(ucode_raw);
2352
2353 error:
2354 return ret;
2355}
2356
2357
2358/**
2359 * iwl3945_set_ucode_ptrs - Set uCode address location
2360 *
2361 * Tell initialization uCode where to find runtime uCode.
2362 *
2363 * BSM registers initially contain pointers to initialization uCode.
2364 * We need to replace them to load runtime uCode inst and data,
2365 * and to save runtime data when powering down.
2366 */
2367static int iwl3945_set_ucode_ptrs(struct iwl_priv *priv)
2368{
2369 dma_addr_t pinst;
2370 dma_addr_t pdata;
2371
2372 /* bits 31:0 for 3945 */
2373 pinst = priv->ucode_code.p_addr;
2374 pdata = priv->ucode_data_backup.p_addr;
2375
2376 /* Tell bootstrap uCode where to find image to load */
2377 iwl_legacy_write_prph(priv, BSM_DRAM_INST_PTR_REG, pinst);
2378 iwl_legacy_write_prph(priv, BSM_DRAM_DATA_PTR_REG, pdata);
2379 iwl_legacy_write_prph(priv, BSM_DRAM_DATA_BYTECOUNT_REG,
2380 priv->ucode_data.len);
2381
2382 /* Inst byte count must be last to set up, bit 31 signals uCode
2383 * that all new ptr/size info is in place */
2384 iwl_legacy_write_prph(priv, BSM_DRAM_INST_BYTECOUNT_REG,
2385 priv->ucode_code.len | BSM_DRAM_INST_LOAD);
2386
2387 IWL_DEBUG_INFO(priv, "Runtime uCode pointers are set.\n");
2388
2389 return 0;
2390}
2391
2392/**
2393 * iwl3945_init_alive_start - Called after REPLY_ALIVE notification received
2394 *
2395 * Called after REPLY_ALIVE notification received from "initialize" uCode.
2396 *
2397 * Tell "initialize" uCode to go ahead and load the runtime uCode.
2398 */
2399static void iwl3945_init_alive_start(struct iwl_priv *priv)
2400{
2401 /* Check alive response for "valid" sign from uCode */
2402 if (priv->card_alive_init.is_valid != UCODE_VALID_OK) {
2403 /* We had an error bringing up the hardware, so take it
2404 * all the way back down so we can try again */
2405 IWL_DEBUG_INFO(priv, "Initialize Alive failed.\n");
2406 goto restart;
2407 }
2408
2409 /* Bootstrap uCode has loaded initialize uCode ... verify inst image.
2410 * This is a paranoid check, because we would not have gotten the
2411 * "initialize" alive if code weren't properly loaded. */
2412 if (iwl3945_verify_ucode(priv)) {
2413 /* Runtime instruction load was bad;
2414 * take it all the way back down so we can try again */
2415 IWL_DEBUG_INFO(priv, "Bad \"initialize\" uCode load.\n");
2416 goto restart;
2417 }
2418
2419 /* Send pointers to protocol/runtime uCode image ... init code will
2420 * load and launch runtime uCode, which will send us another "Alive"
2421 * notification. */
2422 IWL_DEBUG_INFO(priv, "Initialization Alive received.\n");
2423 if (iwl3945_set_ucode_ptrs(priv)) {
2424 /* Runtime instruction load won't happen;
2425 * take it all the way back down so we can try again */
2426 IWL_DEBUG_INFO(priv, "Couldn't set up uCode pointers.\n");
2427 goto restart;
2428 }
2429 return;
2430
2431 restart:
2432 queue_work(priv->workqueue, &priv->restart);
2433}
2434
2435/**
2436 * iwl3945_alive_start - called after REPLY_ALIVE notification received
2437 * from protocol/runtime uCode (initialization uCode's
2438 * Alive gets handled by iwl3945_init_alive_start()).
2439 */
2440static void iwl3945_alive_start(struct iwl_priv *priv)
2441{
2442 int thermal_spin = 0;
2443 u32 rfkill;
2444 struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
2445
2446 IWL_DEBUG_INFO(priv, "Runtime Alive received.\n");
2447
2448 if (priv->card_alive.is_valid != UCODE_VALID_OK) {
2449 /* We had an error bringing up the hardware, so take it
2450 * all the way back down so we can try again */
2451 IWL_DEBUG_INFO(priv, "Alive failed.\n");
2452 goto restart;
2453 }
2454
2455 /* Initialize uCode has loaded Runtime uCode ... verify inst image.
2456 * This is a paranoid check, because we would not have gotten the
2457 * "runtime" alive if code weren't properly loaded. */
2458 if (iwl3945_verify_ucode(priv)) {
2459 /* Runtime instruction load was bad;
2460 * take it all the way back down so we can try again */
2461 IWL_DEBUG_INFO(priv, "Bad runtime uCode load.\n");
2462 goto restart;
2463 }
2464
2465 rfkill = iwl_legacy_read_prph(priv, APMG_RFKILL_REG);
2466 IWL_DEBUG_INFO(priv, "RFKILL status: 0x%x\n", rfkill);
2467
2468 if (rfkill & 0x1) {
2469 clear_bit(STATUS_RF_KILL_HW, &priv->status);
2470 /* if RFKILL is not on, then wait for thermal
2471 * sensor in adapter to kick in */
2472 while (iwl3945_hw_get_temperature(priv) == 0) {
2473 thermal_spin++;
2474 udelay(10);
2475 }
2476
2477 if (thermal_spin)
2478 IWL_DEBUG_INFO(priv, "Thermal calibration took %dus\n",
2479 thermal_spin * 10);
2480 } else
2481 set_bit(STATUS_RF_KILL_HW, &priv->status);
2482
2483 /* After the ALIVE response, we can send commands to 3945 uCode */
2484 set_bit(STATUS_ALIVE, &priv->status);
2485
2486 /* Enable watchdog to monitor the driver tx queues */
2487 iwl_legacy_setup_watchdog(priv);
2488
2489 if (iwl_legacy_is_rfkill(priv))
2490 return;
2491
2492 ieee80211_wake_queues(priv->hw);
2493
2494 priv->active_rate = IWL_RATES_MASK_3945;
2495
2496 iwl_legacy_power_update_mode(priv, true);
2497
2498 if (iwl_legacy_is_associated(priv, IWL_RXON_CTX_BSS)) {
2499 struct iwl3945_rxon_cmd *active_rxon =
2500 (struct iwl3945_rxon_cmd *)(&ctx->active);
2501
2502 ctx->staging.filter_flags |= RXON_FILTER_ASSOC_MSK;
2503 active_rxon->filter_flags &= ~RXON_FILTER_ASSOC_MSK;
2504 } else {
2505 /* Initialize our rx_config data */
2506 iwl_legacy_connection_init_rx_config(priv, ctx);
2507 }
2508
2509 /* Configure Bluetooth device coexistence support */
2510 iwl_legacy_send_bt_config(priv);
2511
2512 set_bit(STATUS_READY, &priv->status);
2513
2514 /* Configure the adapter for unassociated operation */
2515 iwl3945_commit_rxon(priv, ctx);
2516
2517 iwl3945_reg_txpower_periodic(priv);
2518
2519 IWL_DEBUG_INFO(priv, "ALIVE processing complete.\n");
2520 wake_up_interruptible(&priv->wait_command_queue);
2521
2522 return;
2523
2524 restart:
2525 queue_work(priv->workqueue, &priv->restart);
2526}
2527
2528static void iwl3945_cancel_deferred_work(struct iwl_priv *priv);
2529
2530static void __iwl3945_down(struct iwl_priv *priv)
2531{
2532 unsigned long flags;
2533 int exit_pending;
2534
2535 IWL_DEBUG_INFO(priv, DRV_NAME " is going down\n");
2536
2537 iwl_legacy_scan_cancel_timeout(priv, 200);
2538
2539 exit_pending = test_and_set_bit(STATUS_EXIT_PENDING, &priv->status);
2540
2541 /* Stop TX queues watchdog. We need to have STATUS_EXIT_PENDING bit set
2542 * to prevent rearm timer */
2543 del_timer_sync(&priv->watchdog);
2544
2545 /* Station information will now be cleared in device */
2546 iwl_legacy_clear_ucode_stations(priv, NULL);
2547 iwl_legacy_dealloc_bcast_stations(priv);
2548 iwl_legacy_clear_driver_stations(priv);
2549
2550 /* Unblock any waiting calls */
2551 wake_up_interruptible_all(&priv->wait_command_queue);
2552
2553 /* Wipe out the EXIT_PENDING status bit if we are not actually
2554 * exiting the module */
2555 if (!exit_pending)
2556 clear_bit(STATUS_EXIT_PENDING, &priv->status);
2557
2558 /* stop and reset the on-board processor */
2559 iwl_write32(priv, CSR_RESET, CSR_RESET_REG_FLAG_NEVO_RESET);
2560
2561 /* tell the device to stop sending interrupts */
2562 spin_lock_irqsave(&priv->lock, flags);
2563 iwl_legacy_disable_interrupts(priv);
2564 spin_unlock_irqrestore(&priv->lock, flags);
2565 iwl3945_synchronize_irq(priv);
2566
2567 if (priv->mac80211_registered)
2568 ieee80211_stop_queues(priv->hw);
2569
2570 /* If we have not previously called iwl3945_init() then
2571 * clear all bits but the RF Kill bits and return */
2572 if (!iwl_legacy_is_init(priv)) {
2573 priv->status = test_bit(STATUS_RF_KILL_HW, &priv->status) <<
2574 STATUS_RF_KILL_HW |
2575 test_bit(STATUS_GEO_CONFIGURED, &priv->status) <<
2576 STATUS_GEO_CONFIGURED |
2577 test_bit(STATUS_EXIT_PENDING, &priv->status) <<
2578 STATUS_EXIT_PENDING;
2579 goto exit;
2580 }
2581
2582 /* ...otherwise clear out all the status bits but the RF Kill
2583 * bit and continue taking the NIC down. */
2584 priv->status &= test_bit(STATUS_RF_KILL_HW, &priv->status) <<
2585 STATUS_RF_KILL_HW |
2586 test_bit(STATUS_GEO_CONFIGURED, &priv->status) <<
2587 STATUS_GEO_CONFIGURED |
2588 test_bit(STATUS_FW_ERROR, &priv->status) <<
2589 STATUS_FW_ERROR |
2590 test_bit(STATUS_EXIT_PENDING, &priv->status) <<
2591 STATUS_EXIT_PENDING;
2592
2593 iwl3945_hw_txq_ctx_stop(priv);
2594 iwl3945_hw_rxq_stop(priv);
2595
2596 /* Power-down device's busmaster DMA clocks */
2597 iwl_legacy_write_prph(priv, APMG_CLK_DIS_REG, APMG_CLK_VAL_DMA_CLK_RQT);
2598 udelay(5);
2599
2600 /* Stop the device, and put it in low power state */
2601 iwl_legacy_apm_stop(priv);
2602
2603 exit:
2604 memset(&priv->card_alive, 0, sizeof(struct iwl_alive_resp));
2605
2606 if (priv->beacon_skb)
2607 dev_kfree_skb(priv->beacon_skb);
2608 priv->beacon_skb = NULL;
2609
2610 /* clear out any free frames */
2611 iwl3945_clear_free_frames(priv);
2612}
2613
2614static void iwl3945_down(struct iwl_priv *priv)
2615{
2616 mutex_lock(&priv->mutex);
2617 __iwl3945_down(priv);
2618 mutex_unlock(&priv->mutex);
2619
2620 iwl3945_cancel_deferred_work(priv);
2621}
2622
2623#define MAX_HW_RESTARTS 5
2624
2625static int iwl3945_alloc_bcast_station(struct iwl_priv *priv)
2626{
2627 struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
2628 unsigned long flags;
2629 u8 sta_id;
2630
2631 spin_lock_irqsave(&priv->sta_lock, flags);
2632 sta_id = iwl_legacy_prep_station(priv, ctx,
2633 iwlegacy_bcast_addr, false, NULL);
2634 if (sta_id == IWL_INVALID_STATION) {
2635 IWL_ERR(priv, "Unable to prepare broadcast station\n");
2636 spin_unlock_irqrestore(&priv->sta_lock, flags);
2637
2638 return -EINVAL;
2639 }
2640
2641 priv->stations[sta_id].used |= IWL_STA_DRIVER_ACTIVE;
2642 priv->stations[sta_id].used |= IWL_STA_BCAST;
2643 spin_unlock_irqrestore(&priv->sta_lock, flags);
2644
2645 return 0;
2646}
2647
2648static int __iwl3945_up(struct iwl_priv *priv)
2649{
2650 int rc, i;
2651
2652 rc = iwl3945_alloc_bcast_station(priv);
2653 if (rc)
2654 return rc;
2655
2656 if (test_bit(STATUS_EXIT_PENDING, &priv->status)) {
2657 IWL_WARN(priv, "Exit pending; will not bring the NIC up\n");
2658 return -EIO;
2659 }
2660
2661 if (!priv->ucode_data_backup.v_addr || !priv->ucode_data.v_addr) {
2662 IWL_ERR(priv, "ucode not available for device bring up\n");
2663 return -EIO;
2664 }
2665
2666 /* If platform's RF_KILL switch is NOT set to KILL */
2667 if (iwl_read32(priv, CSR_GP_CNTRL) &
2668 CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW)
2669 clear_bit(STATUS_RF_KILL_HW, &priv->status);
2670 else {
2671 set_bit(STATUS_RF_KILL_HW, &priv->status);
2672 IWL_WARN(priv, "Radio disabled by HW RF Kill switch\n");
2673 return -ENODEV;
2674 }
2675
2676 iwl_write32(priv, CSR_INT, 0xFFFFFFFF);
2677
2678 rc = iwl3945_hw_nic_init(priv);
2679 if (rc) {
2680 IWL_ERR(priv, "Unable to int nic\n");
2681 return rc;
2682 }
2683
2684 /* make sure rfkill handshake bits are cleared */
2685 iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
2686 iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR,
2687 CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED);
2688
2689 /* clear (again), then enable host interrupts */
2690 iwl_write32(priv, CSR_INT, 0xFFFFFFFF);
2691 iwl_legacy_enable_interrupts(priv);
2692
2693 /* really make sure rfkill handshake bits are cleared */
2694 iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
2695 iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
2696
2697 /* Copy original ucode data image from disk into backup cache.
2698 * This will be used to initialize the on-board processor's
2699 * data SRAM for a clean start when the runtime program first loads. */
2700 memcpy(priv->ucode_data_backup.v_addr, priv->ucode_data.v_addr,
2701 priv->ucode_data.len);
2702
2703 /* We return success when we resume from suspend and rf_kill is on. */
2704 if (test_bit(STATUS_RF_KILL_HW, &priv->status))
2705 return 0;
2706
2707 for (i = 0; i < MAX_HW_RESTARTS; i++) {
2708
2709 /* load bootstrap state machine,
2710 * load bootstrap program into processor's memory,
2711 * prepare to load the "initialize" uCode */
2712 rc = priv->cfg->ops->lib->load_ucode(priv);
2713
2714 if (rc) {
2715 IWL_ERR(priv,
2716 "Unable to set up bootstrap uCode: %d\n", rc);
2717 continue;
2718 }
2719
2720 /* start card; "initialize" will load runtime ucode */
2721 iwl3945_nic_start(priv);
2722
2723 IWL_DEBUG_INFO(priv, DRV_NAME " is coming up\n");
2724
2725 return 0;
2726 }
2727
2728 set_bit(STATUS_EXIT_PENDING, &priv->status);
2729 __iwl3945_down(priv);
2730 clear_bit(STATUS_EXIT_PENDING, &priv->status);
2731
2732 /* tried to restart and config the device for as long as our
2733 * patience could withstand */
2734 IWL_ERR(priv, "Unable to initialize device after %d attempts.\n", i);
2735 return -EIO;
2736}
2737
2738
2739/*****************************************************************************
2740 *
2741 * Workqueue callbacks
2742 *
2743 *****************************************************************************/
2744
2745static void iwl3945_bg_init_alive_start(struct work_struct *data)
2746{
2747 struct iwl_priv *priv =
2748 container_of(data, struct iwl_priv, init_alive_start.work);
2749
2750 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
2751 return;
2752
2753 mutex_lock(&priv->mutex);
2754 iwl3945_init_alive_start(priv);
2755 mutex_unlock(&priv->mutex);
2756}
2757
2758static void iwl3945_bg_alive_start(struct work_struct *data)
2759{
2760 struct iwl_priv *priv =
2761 container_of(data, struct iwl_priv, alive_start.work);
2762
2763 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
2764 return;
2765
2766 mutex_lock(&priv->mutex);
2767 iwl3945_alive_start(priv);
2768 mutex_unlock(&priv->mutex);
2769}
2770
2771/*
2772 * 3945 cannot interrupt driver when hardware rf kill switch toggles;
2773 * driver must poll CSR_GP_CNTRL_REG register for change. This register
2774 * *is* readable even when device has been SW_RESET into low power mode
2775 * (e.g. during RF KILL).
2776 */
2777static void iwl3945_rfkill_poll(struct work_struct *data)
2778{
2779 struct iwl_priv *priv =
2780 container_of(data, struct iwl_priv, _3945.rfkill_poll.work);
2781 bool old_rfkill = test_bit(STATUS_RF_KILL_HW, &priv->status);
2782 bool new_rfkill = !(iwl_read32(priv, CSR_GP_CNTRL)
2783 & CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW);
2784
2785 if (new_rfkill != old_rfkill) {
2786 if (new_rfkill)
2787 set_bit(STATUS_RF_KILL_HW, &priv->status);
2788 else
2789 clear_bit(STATUS_RF_KILL_HW, &priv->status);
2790
2791 wiphy_rfkill_set_hw_state(priv->hw->wiphy, new_rfkill);
2792
2793 IWL_DEBUG_RF_KILL(priv, "RF_KILL bit toggled to %s.\n",
2794 new_rfkill ? "disable radio" : "enable radio");
2795 }
2796
2797 /* Keep this running, even if radio now enabled. This will be
2798 * cancelled in mac_start() if system decides to start again */
2799 queue_delayed_work(priv->workqueue, &priv->_3945.rfkill_poll,
2800 round_jiffies_relative(2 * HZ));
2801
2802}
2803
2804int iwl3945_request_scan(struct iwl_priv *priv, struct ieee80211_vif *vif)
2805{
2806 struct iwl_host_cmd cmd = {
2807 .id = REPLY_SCAN_CMD,
2808 .len = sizeof(struct iwl3945_scan_cmd),
2809 .flags = CMD_SIZE_HUGE,
2810 };
2811 struct iwl3945_scan_cmd *scan;
2812 u8 n_probes = 0;
2813 enum ieee80211_band band;
2814 bool is_active = false;
2815 int ret;
2816
2817 lockdep_assert_held(&priv->mutex);
2818
2819 if (!priv->scan_cmd) {
2820 priv->scan_cmd = kmalloc(sizeof(struct iwl3945_scan_cmd) +
2821 IWL_MAX_SCAN_SIZE, GFP_KERNEL);
2822 if (!priv->scan_cmd) {
2823 IWL_DEBUG_SCAN(priv, "Fail to allocate scan memory\n");
2824 return -ENOMEM;
2825 }
2826 }
2827 scan = priv->scan_cmd;
2828 memset(scan, 0, sizeof(struct iwl3945_scan_cmd) + IWL_MAX_SCAN_SIZE);
2829
2830 scan->quiet_plcp_th = IWL_PLCP_QUIET_THRESH;
2831 scan->quiet_time = IWL_ACTIVE_QUIET_TIME;
2832
2833 if (iwl_legacy_is_associated(priv, IWL_RXON_CTX_BSS)) {
2834 u16 interval = 0;
2835 u32 extra;
2836 u32 suspend_time = 100;
2837 u32 scan_suspend_time = 100;
2838
2839 IWL_DEBUG_INFO(priv, "Scanning while associated...\n");
2840
2841 if (priv->is_internal_short_scan)
2842 interval = 0;
2843 else
2844 interval = vif->bss_conf.beacon_int;
2845
2846 scan->suspend_time = 0;
2847 scan->max_out_time = cpu_to_le32(200 * 1024);
2848 if (!interval)
2849 interval = suspend_time;
2850 /*
2851 * suspend time format:
2852 * 0-19: beacon interval in usec (time before exec.)
2853 * 20-23: 0
2854 * 24-31: number of beacons (suspend between channels)
2855 */
2856
2857 extra = (suspend_time / interval) << 24;
2858 scan_suspend_time = 0xFF0FFFFF &
2859 (extra | ((suspend_time % interval) * 1024));
2860
2861 scan->suspend_time = cpu_to_le32(scan_suspend_time);
2862 IWL_DEBUG_SCAN(priv, "suspend_time 0x%X beacon interval %d\n",
2863 scan_suspend_time, interval);
2864 }
2865
2866 if (priv->is_internal_short_scan) {
2867 IWL_DEBUG_SCAN(priv, "Start internal passive scan.\n");
2868 } else if (priv->scan_request->n_ssids) {
2869 int i, p = 0;
2870 IWL_DEBUG_SCAN(priv, "Kicking off active scan\n");
2871 for (i = 0; i < priv->scan_request->n_ssids; i++) {
2872 /* always does wildcard anyway */
2873 if (!priv->scan_request->ssids[i].ssid_len)
2874 continue;
2875 scan->direct_scan[p].id = WLAN_EID_SSID;
2876 scan->direct_scan[p].len =
2877 priv->scan_request->ssids[i].ssid_len;
2878 memcpy(scan->direct_scan[p].ssid,
2879 priv->scan_request->ssids[i].ssid,
2880 priv->scan_request->ssids[i].ssid_len);
2881 n_probes++;
2882 p++;
2883 }
2884 is_active = true;
2885 } else
2886 IWL_DEBUG_SCAN(priv, "Kicking off passive scan.\n");
2887
2888 /* We don't build a direct scan probe request; the uCode will do
2889 * that based on the direct_mask added to each channel entry */
2890 scan->tx_cmd.tx_flags = TX_CMD_FLG_SEQ_CTL_MSK;
2891 scan->tx_cmd.sta_id = priv->contexts[IWL_RXON_CTX_BSS].bcast_sta_id;
2892 scan->tx_cmd.stop_time.life_time = TX_CMD_LIFE_TIME_INFINITE;
2893
2894 /* flags + rate selection */
2895
2896 switch (priv->scan_band) {
2897 case IEEE80211_BAND_2GHZ:
2898 scan->flags = RXON_FLG_BAND_24G_MSK | RXON_FLG_AUTO_DETECT_MSK;
2899 scan->tx_cmd.rate = IWL_RATE_1M_PLCP;
2900 band = IEEE80211_BAND_2GHZ;
2901 break;
2902 case IEEE80211_BAND_5GHZ:
2903 scan->tx_cmd.rate = IWL_RATE_6M_PLCP;
2904 band = IEEE80211_BAND_5GHZ;
2905 break;
2906 default:
2907 IWL_WARN(priv, "Invalid scan band\n");
2908 return -EIO;
2909 }
2910
2911 /*
2912 * If active scaning is requested but a certain channel
2913 * is marked passive, we can do active scanning if we
2914 * detect transmissions.
2915 */
2916 scan->good_CRC_th = is_active ? IWL_GOOD_CRC_TH_DEFAULT :
2917 IWL_GOOD_CRC_TH_DISABLED;
2918
2919 if (!priv->is_internal_short_scan) {
2920 scan->tx_cmd.len = cpu_to_le16(
2921 iwl_legacy_fill_probe_req(priv,
2922 (struct ieee80211_mgmt *)scan->data,
2923 vif->addr,
2924 priv->scan_request->ie,
2925 priv->scan_request->ie_len,
2926 IWL_MAX_SCAN_SIZE - sizeof(*scan)));
2927 } else {
2928 /* use bcast addr, will not be transmitted but must be valid */
2929 scan->tx_cmd.len = cpu_to_le16(
2930 iwl_legacy_fill_probe_req(priv,
2931 (struct ieee80211_mgmt *)scan->data,
2932 iwlegacy_bcast_addr, NULL, 0,
2933 IWL_MAX_SCAN_SIZE - sizeof(*scan)));
2934 }
2935 /* select Rx antennas */
2936 scan->flags |= iwl3945_get_antenna_flags(priv);
2937
2938 if (priv->is_internal_short_scan) {
2939 scan->channel_count =
2940 iwl3945_get_single_channel_for_scan(priv, vif, band,
2941 (void *)&scan->data[le16_to_cpu(
2942 scan->tx_cmd.len)]);
2943 } else {
2944 scan->channel_count =
2945 iwl3945_get_channels_for_scan(priv, band, is_active, n_probes,
2946 (void *)&scan->data[le16_to_cpu(scan->tx_cmd.len)], vif);
2947 }
2948
2949 if (scan->channel_count == 0) {
2950 IWL_DEBUG_SCAN(priv, "channel count %d\n", scan->channel_count);
2951 return -EIO;
2952 }
2953
2954 cmd.len += le16_to_cpu(scan->tx_cmd.len) +
2955 scan->channel_count * sizeof(struct iwl3945_scan_channel);
2956 cmd.data = scan;
2957 scan->len = cpu_to_le16(cmd.len);
2958
2959 set_bit(STATUS_SCAN_HW, &priv->status);
2960 ret = iwl_legacy_send_cmd_sync(priv, &cmd);
2961 if (ret)
2962 clear_bit(STATUS_SCAN_HW, &priv->status);
2963 return ret;
2964}
2965
2966void iwl3945_post_scan(struct iwl_priv *priv)
2967{
2968 struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
2969
2970 /*
2971 * Since setting the RXON may have been deferred while
2972 * performing the scan, fire one off if needed
2973 */
2974 if (memcmp(&ctx->staging, &ctx->active, sizeof(ctx->staging)))
2975 iwl3945_commit_rxon(priv, ctx);
2976}
2977
2978static void iwl3945_bg_restart(struct work_struct *data)
2979{
2980 struct iwl_priv *priv = container_of(data, struct iwl_priv, restart);
2981
2982 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
2983 return;
2984
2985 if (test_and_clear_bit(STATUS_FW_ERROR, &priv->status)) {
2986 struct iwl_rxon_context *ctx;
2987 mutex_lock(&priv->mutex);
2988 for_each_context(priv, ctx)
2989 ctx->vif = NULL;
2990 priv->is_open = 0;
2991 mutex_unlock(&priv->mutex);
2992 iwl3945_down(priv);
2993 ieee80211_restart_hw(priv->hw);
2994 } else {
2995 iwl3945_down(priv);
2996
2997 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
2998 return;
2999
3000 mutex_lock(&priv->mutex);
3001 __iwl3945_up(priv);
3002 mutex_unlock(&priv->mutex);
3003 }
3004}
3005
3006static void iwl3945_bg_rx_replenish(struct work_struct *data)
3007{
3008 struct iwl_priv *priv =
3009 container_of(data, struct iwl_priv, rx_replenish);
3010
3011 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
3012 return;
3013
3014 mutex_lock(&priv->mutex);
3015 iwl3945_rx_replenish(priv);
3016 mutex_unlock(&priv->mutex);
3017}
3018
3019void iwl3945_post_associate(struct iwl_priv *priv)
3020{
3021 int rc = 0;
3022 struct ieee80211_conf *conf = NULL;
3023 struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
3024
3025 if (!ctx->vif || !priv->is_open)
3026 return;
3027
3028 IWL_DEBUG_ASSOC(priv, "Associated as %d to: %pM\n",
3029 ctx->vif->bss_conf.aid, ctx->active.bssid_addr);
3030
3031 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
3032 return;
3033
3034 iwl_legacy_scan_cancel_timeout(priv, 200);
3035
3036 conf = iwl_legacy_ieee80211_get_hw_conf(priv->hw);
3037
3038 ctx->staging.filter_flags &= ~RXON_FILTER_ASSOC_MSK;
3039 iwl3945_commit_rxon(priv, ctx);
3040
3041 rc = iwl_legacy_send_rxon_timing(priv, ctx);
3042 if (rc)
3043 IWL_WARN(priv, "REPLY_RXON_TIMING failed - "
3044 "Attempting to continue.\n");
3045
3046 ctx->staging.filter_flags |= RXON_FILTER_ASSOC_MSK;
3047
3048 ctx->staging.assoc_id = cpu_to_le16(ctx->vif->bss_conf.aid);
3049
3050 IWL_DEBUG_ASSOC(priv, "assoc id %d beacon interval %d\n",
3051 ctx->vif->bss_conf.aid, ctx->vif->bss_conf.beacon_int);
3052
3053 if (ctx->vif->bss_conf.use_short_preamble)
3054 ctx->staging.flags |= RXON_FLG_SHORT_PREAMBLE_MSK;
3055 else
3056 ctx->staging.flags &= ~RXON_FLG_SHORT_PREAMBLE_MSK;
3057
3058 if (ctx->staging.flags & RXON_FLG_BAND_24G_MSK) {
3059 if (ctx->vif->bss_conf.use_short_slot)
3060 ctx->staging.flags |= RXON_FLG_SHORT_SLOT_MSK;
3061 else
3062 ctx->staging.flags &= ~RXON_FLG_SHORT_SLOT_MSK;
3063 }
3064
3065 iwl3945_commit_rxon(priv, ctx);
3066
3067 switch (ctx->vif->type) {
3068 case NL80211_IFTYPE_STATION:
3069 iwl3945_rate_scale_init(priv->hw, IWL_AP_ID);
3070 break;
3071 case NL80211_IFTYPE_ADHOC:
3072 iwl3945_send_beacon_cmd(priv);
3073 break;
3074 default:
3075 IWL_ERR(priv, "%s Should not be called in %d mode\n",
3076 __func__, ctx->vif->type);
3077 break;
3078 }
3079}
3080
3081/*****************************************************************************
3082 *
3083 * mac80211 entry point functions
3084 *
3085 *****************************************************************************/
3086
3087#define UCODE_READY_TIMEOUT (2 * HZ)
3088
3089static int iwl3945_mac_start(struct ieee80211_hw *hw)
3090{
3091 struct iwl_priv *priv = hw->priv;
3092 int ret;
3093
3094 IWL_DEBUG_MAC80211(priv, "enter\n");
3095
3096 /* we should be verifying the device is ready to be opened */
3097 mutex_lock(&priv->mutex);
3098
3099 /* fetch ucode file from disk, alloc and copy to bus-master buffers ...
3100 * ucode filename and max sizes are card-specific. */
3101
3102 if (!priv->ucode_code.len) {
3103 ret = iwl3945_read_ucode(priv);
3104 if (ret) {
3105 IWL_ERR(priv, "Could not read microcode: %d\n", ret);
3106 mutex_unlock(&priv->mutex);
3107 goto out_release_irq;
3108 }
3109 }
3110
3111 ret = __iwl3945_up(priv);
3112
3113 mutex_unlock(&priv->mutex);
3114
3115 if (ret)
3116 goto out_release_irq;
3117
3118 IWL_DEBUG_INFO(priv, "Start UP work.\n");
3119
3120 /* Wait for START_ALIVE from ucode. Otherwise callbacks from
3121 * mac80211 will not be run successfully. */
3122 ret = wait_event_interruptible_timeout(priv->wait_command_queue,
3123 test_bit(STATUS_READY, &priv->status),
3124 UCODE_READY_TIMEOUT);
3125 if (!ret) {
3126 if (!test_bit(STATUS_READY, &priv->status)) {
3127 IWL_ERR(priv,
3128 "Wait for START_ALIVE timeout after %dms.\n",
3129 jiffies_to_msecs(UCODE_READY_TIMEOUT));
3130 ret = -ETIMEDOUT;
3131 goto out_release_irq;
3132 }
3133 }
3134
3135 /* ucode is running and will send rfkill notifications,
3136 * no need to poll the killswitch state anymore */
3137 cancel_delayed_work(&priv->_3945.rfkill_poll);
3138
3139 priv->is_open = 1;
3140 IWL_DEBUG_MAC80211(priv, "leave\n");
3141 return 0;
3142
3143out_release_irq:
3144 priv->is_open = 0;
3145 IWL_DEBUG_MAC80211(priv, "leave - failed\n");
3146 return ret;
3147}
3148
3149static void iwl3945_mac_stop(struct ieee80211_hw *hw)
3150{
3151 struct iwl_priv *priv = hw->priv;
3152
3153 IWL_DEBUG_MAC80211(priv, "enter\n");
3154
3155 if (!priv->is_open) {
3156 IWL_DEBUG_MAC80211(priv, "leave - skip\n");
3157 return;
3158 }
3159
3160 priv->is_open = 0;
3161
3162 iwl3945_down(priv);
3163
3164 flush_workqueue(priv->workqueue);
3165
3166 /* start polling the killswitch state again */
3167 queue_delayed_work(priv->workqueue, &priv->_3945.rfkill_poll,
3168 round_jiffies_relative(2 * HZ));
3169
3170 IWL_DEBUG_MAC80211(priv, "leave\n");
3171}
3172
3173static void iwl3945_mac_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
3174{
3175 struct iwl_priv *priv = hw->priv;
3176
3177 IWL_DEBUG_MAC80211(priv, "enter\n");
3178
3179 IWL_DEBUG_TX(priv, "dev->xmit(%d bytes) at rate 0x%02x\n", skb->len,
3180 ieee80211_get_tx_rate(hw, IEEE80211_SKB_CB(skb))->bitrate);
3181
3182 if (iwl3945_tx_skb(priv, skb))
3183 dev_kfree_skb_any(skb);
3184
3185 IWL_DEBUG_MAC80211(priv, "leave\n");
3186}
3187
3188void iwl3945_config_ap(struct iwl_priv *priv)
3189{
3190 struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
3191 struct ieee80211_vif *vif = ctx->vif;
3192 int rc = 0;
3193
3194 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
3195 return;
3196
3197 /* The following should be done only at AP bring up */
3198 if (!(iwl_legacy_is_associated(priv, IWL_RXON_CTX_BSS))) {
3199
3200 /* RXON - unassoc (to set timing command) */
3201 ctx->staging.filter_flags &= ~RXON_FILTER_ASSOC_MSK;
3202 iwl3945_commit_rxon(priv, ctx);
3203
3204 /* RXON Timing */
3205 rc = iwl_legacy_send_rxon_timing(priv, ctx);
3206 if (rc)
3207 IWL_WARN(priv, "REPLY_RXON_TIMING failed - "
3208 "Attempting to continue.\n");
3209
3210 ctx->staging.assoc_id = 0;
3211
3212 if (vif->bss_conf.use_short_preamble)
3213 ctx->staging.flags |=
3214 RXON_FLG_SHORT_PREAMBLE_MSK;
3215 else
3216 ctx->staging.flags &=
3217 ~RXON_FLG_SHORT_PREAMBLE_MSK;
3218
3219 if (ctx->staging.flags & RXON_FLG_BAND_24G_MSK) {
3220 if (vif->bss_conf.use_short_slot)
3221 ctx->staging.flags |=
3222 RXON_FLG_SHORT_SLOT_MSK;
3223 else
3224 ctx->staging.flags &=
3225 ~RXON_FLG_SHORT_SLOT_MSK;
3226 }
3227 /* restore RXON assoc */
3228 ctx->staging.filter_flags |= RXON_FILTER_ASSOC_MSK;
3229 iwl3945_commit_rxon(priv, ctx);
3230 }
3231 iwl3945_send_beacon_cmd(priv);
3232}
3233
3234static int iwl3945_mac_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
3235 struct ieee80211_vif *vif,
3236 struct ieee80211_sta *sta,
3237 struct ieee80211_key_conf *key)
3238{
3239 struct iwl_priv *priv = hw->priv;
3240 int ret = 0;
3241 u8 sta_id = IWL_INVALID_STATION;
3242 u8 static_key;
3243
3244 IWL_DEBUG_MAC80211(priv, "enter\n");
3245
3246 if (iwl3945_mod_params.sw_crypto) {
3247 IWL_DEBUG_MAC80211(priv, "leave - hwcrypto disabled\n");
3248 return -EOPNOTSUPP;
3249 }
3250
3251 /*
3252 * To support IBSS RSN, don't program group keys in IBSS, the
3253 * hardware will then not attempt to decrypt the frames.
3254 */
3255 if (vif->type == NL80211_IFTYPE_ADHOC &&
3256 !(key->flags & IEEE80211_KEY_FLAG_PAIRWISE))
3257 return -EOPNOTSUPP;
3258
3259 static_key = !iwl_legacy_is_associated(priv, IWL_RXON_CTX_BSS);
3260
3261 if (!static_key) {
3262 sta_id = iwl_legacy_sta_id_or_broadcast(
3263 priv, &priv->contexts[IWL_RXON_CTX_BSS], sta);
3264 if (sta_id == IWL_INVALID_STATION)
3265 return -EINVAL;
3266 }
3267
3268 mutex_lock(&priv->mutex);
3269 iwl_legacy_scan_cancel_timeout(priv, 100);
3270
3271 switch (cmd) {
3272 case SET_KEY:
3273 if (static_key)
3274 ret = iwl3945_set_static_key(priv, key);
3275 else
3276 ret = iwl3945_set_dynamic_key(priv, key, sta_id);
3277 IWL_DEBUG_MAC80211(priv, "enable hwcrypto key\n");
3278 break;
3279 case DISABLE_KEY:
3280 if (static_key)
3281 ret = iwl3945_remove_static_key(priv);
3282 else
3283 ret = iwl3945_clear_sta_key_info(priv, sta_id);
3284 IWL_DEBUG_MAC80211(priv, "disable hwcrypto key\n");
3285 break;
3286 default:
3287 ret = -EINVAL;
3288 }
3289
3290 mutex_unlock(&priv->mutex);
3291 IWL_DEBUG_MAC80211(priv, "leave\n");
3292
3293 return ret;
3294}
3295
3296static int iwl3945_mac_sta_add(struct ieee80211_hw *hw,
3297 struct ieee80211_vif *vif,
3298 struct ieee80211_sta *sta)
3299{
3300 struct iwl_priv *priv = hw->priv;
3301 struct iwl3945_sta_priv *sta_priv = (void *)sta->drv_priv;
3302 int ret;
3303 bool is_ap = vif->type == NL80211_IFTYPE_STATION;
3304 u8 sta_id;
3305
3306 IWL_DEBUG_INFO(priv, "received request to add station %pM\n",
3307 sta->addr);
3308 mutex_lock(&priv->mutex);
3309 IWL_DEBUG_INFO(priv, "proceeding to add station %pM\n",
3310 sta->addr);
3311 sta_priv->common.sta_id = IWL_INVALID_STATION;
3312
3313
3314 ret = iwl_legacy_add_station_common(priv,
3315 &priv->contexts[IWL_RXON_CTX_BSS],
3316 sta->addr, is_ap, sta, &sta_id);
3317 if (ret) {
3318 IWL_ERR(priv, "Unable to add station %pM (%d)\n",
3319 sta->addr, ret);
3320 /* Should we return success if return code is EEXIST ? */
3321 mutex_unlock(&priv->mutex);
3322 return ret;
3323 }
3324
3325 sta_priv->common.sta_id = sta_id;
3326
3327 /* Initialize rate scaling */
3328 IWL_DEBUG_INFO(priv, "Initializing rate scaling for station %pM\n",
3329 sta->addr);
3330 iwl3945_rs_rate_init(priv, sta, sta_id);
3331 mutex_unlock(&priv->mutex);
3332
3333 return 0;
3334}
3335
3336static void iwl3945_configure_filter(struct ieee80211_hw *hw,
3337 unsigned int changed_flags,
3338 unsigned int *total_flags,
3339 u64 multicast)
3340{
3341 struct iwl_priv *priv = hw->priv;
3342 __le32 filter_or = 0, filter_nand = 0;
3343 struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
3344
3345#define CHK(test, flag) do { \
3346 if (*total_flags & (test)) \
3347 filter_or |= (flag); \
3348 else \
3349 filter_nand |= (flag); \
3350 } while (0)
3351
3352 IWL_DEBUG_MAC80211(priv, "Enter: changed: 0x%x, total: 0x%x\n",
3353 changed_flags, *total_flags);
3354
3355 CHK(FIF_OTHER_BSS | FIF_PROMISC_IN_BSS, RXON_FILTER_PROMISC_MSK);
3356 CHK(FIF_CONTROL, RXON_FILTER_CTL2HOST_MSK);
3357 CHK(FIF_BCN_PRBRESP_PROMISC, RXON_FILTER_BCON_AWARE_MSK);
3358
3359#undef CHK
3360
3361 mutex_lock(&priv->mutex);
3362
3363 ctx->staging.filter_flags &= ~filter_nand;
3364 ctx->staging.filter_flags |= filter_or;
3365
3366 /*
3367 * Not committing directly because hardware can perform a scan,
3368 * but even if hw is ready, committing here breaks for some reason,
3369 * we'll eventually commit the filter flags change anyway.
3370 */
3371
3372 mutex_unlock(&priv->mutex);
3373
3374 /*
3375 * Receiving all multicast frames is always enabled by the
3376 * default flags setup in iwl_legacy_connection_init_rx_config()
3377 * since we currently do not support programming multicast
3378 * filters into the device.
3379 */
3380 *total_flags &= FIF_OTHER_BSS | FIF_ALLMULTI | FIF_PROMISC_IN_BSS |
3381 FIF_BCN_PRBRESP_PROMISC | FIF_CONTROL;
3382}
3383
3384
3385/*****************************************************************************
3386 *
3387 * sysfs attributes
3388 *
3389 *****************************************************************************/
3390
3391#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
3392
3393/*
3394 * The following adds a new attribute to the sysfs representation
3395 * of this device driver (i.e. a new file in /sys/bus/pci/drivers/iwl/)
3396 * used for controlling the debug level.
3397 *
3398 * See the level definitions in iwl for details.
3399 *
3400 * The debug_level being managed using sysfs below is a per device debug
3401 * level that is used instead of the global debug level if it (the per
3402 * device debug level) is set.
3403 */
3404static ssize_t iwl3945_show_debug_level(struct device *d,
3405 struct device_attribute *attr, char *buf)
3406{
3407 struct iwl_priv *priv = dev_get_drvdata(d);
3408 return sprintf(buf, "0x%08X\n", iwl_legacy_get_debug_level(priv));
3409}
3410static ssize_t iwl3945_store_debug_level(struct device *d,
3411 struct device_attribute *attr,
3412 const char *buf, size_t count)
3413{
3414 struct iwl_priv *priv = dev_get_drvdata(d);
3415 unsigned long val;
3416 int ret;
3417
3418 ret = strict_strtoul(buf, 0, &val);
3419 if (ret)
3420 IWL_INFO(priv, "%s is not in hex or decimal form.\n", buf);
3421 else {
3422 priv->debug_level = val;
3423 if (iwl_legacy_alloc_traffic_mem(priv))
3424 IWL_ERR(priv,
3425 "Not enough memory to generate traffic log\n");
3426 }
3427 return strnlen(buf, count);
3428}
3429
3430static DEVICE_ATTR(debug_level, S_IWUSR | S_IRUGO,
3431 iwl3945_show_debug_level, iwl3945_store_debug_level);
3432
3433#endif /* CONFIG_IWLWIFI_LEGACY_DEBUG */
3434
3435static ssize_t iwl3945_show_temperature(struct device *d,
3436 struct device_attribute *attr, char *buf)
3437{
3438 struct iwl_priv *priv = dev_get_drvdata(d);
3439
3440 if (!iwl_legacy_is_alive(priv))
3441 return -EAGAIN;
3442
3443 return sprintf(buf, "%d\n", iwl3945_hw_get_temperature(priv));
3444}
3445
3446static DEVICE_ATTR(temperature, S_IRUGO, iwl3945_show_temperature, NULL);
3447
3448static ssize_t iwl3945_show_tx_power(struct device *d,
3449 struct device_attribute *attr, char *buf)
3450{
3451 struct iwl_priv *priv = dev_get_drvdata(d);
3452 return sprintf(buf, "%d\n", priv->tx_power_user_lmt);
3453}
3454
3455static ssize_t iwl3945_store_tx_power(struct device *d,
3456 struct device_attribute *attr,
3457 const char *buf, size_t count)
3458{
3459 struct iwl_priv *priv = dev_get_drvdata(d);
3460 char *p = (char *)buf;
3461 u32 val;
3462
3463 val = simple_strtoul(p, &p, 10);
3464 if (p == buf)
3465 IWL_INFO(priv, ": %s is not in decimal form.\n", buf);
3466 else
3467 iwl3945_hw_reg_set_txpower(priv, val);
3468
3469 return count;
3470}
3471
3472static DEVICE_ATTR(tx_power, S_IWUSR | S_IRUGO, iwl3945_show_tx_power, iwl3945_store_tx_power);
3473
3474static ssize_t iwl3945_show_flags(struct device *d,
3475 struct device_attribute *attr, char *buf)
3476{
3477 struct iwl_priv *priv = dev_get_drvdata(d);
3478 struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
3479
3480 return sprintf(buf, "0x%04X\n", ctx->active.flags);
3481}
3482
3483static ssize_t iwl3945_store_flags(struct device *d,
3484 struct device_attribute *attr,
3485 const char *buf, size_t count)
3486{
3487 struct iwl_priv *priv = dev_get_drvdata(d);
3488 u32 flags = simple_strtoul(buf, NULL, 0);
3489 struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
3490
3491 mutex_lock(&priv->mutex);
3492 if (le32_to_cpu(ctx->staging.flags) != flags) {
3493 /* Cancel any currently running scans... */
3494 if (iwl_legacy_scan_cancel_timeout(priv, 100))
3495 IWL_WARN(priv, "Could not cancel scan.\n");
3496 else {
3497 IWL_DEBUG_INFO(priv, "Committing rxon.flags = 0x%04X\n",
3498 flags);
3499 ctx->staging.flags = cpu_to_le32(flags);
3500 iwl3945_commit_rxon(priv, ctx);
3501 }
3502 }
3503 mutex_unlock(&priv->mutex);
3504
3505 return count;
3506}
3507
3508static DEVICE_ATTR(flags, S_IWUSR | S_IRUGO, iwl3945_show_flags, iwl3945_store_flags);
3509
3510static ssize_t iwl3945_show_filter_flags(struct device *d,
3511 struct device_attribute *attr, char *buf)
3512{
3513 struct iwl_priv *priv = dev_get_drvdata(d);
3514 struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
3515
3516 return sprintf(buf, "0x%04X\n",
3517 le32_to_cpu(ctx->active.filter_flags));
3518}
3519
3520static ssize_t iwl3945_store_filter_flags(struct device *d,
3521 struct device_attribute *attr,
3522 const char *buf, size_t count)
3523{
3524 struct iwl_priv *priv = dev_get_drvdata(d);
3525 struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
3526 u32 filter_flags = simple_strtoul(buf, NULL, 0);
3527
3528 mutex_lock(&priv->mutex);
3529 if (le32_to_cpu(ctx->staging.filter_flags) != filter_flags) {
3530 /* Cancel any currently running scans... */
3531 if (iwl_legacy_scan_cancel_timeout(priv, 100))
3532 IWL_WARN(priv, "Could not cancel scan.\n");
3533 else {
3534 IWL_DEBUG_INFO(priv, "Committing rxon.filter_flags = "
3535 "0x%04X\n", filter_flags);
3536 ctx->staging.filter_flags =
3537 cpu_to_le32(filter_flags);
3538 iwl3945_commit_rxon(priv, ctx);
3539 }
3540 }
3541 mutex_unlock(&priv->mutex);
3542
3543 return count;
3544}
3545
3546static DEVICE_ATTR(filter_flags, S_IWUSR | S_IRUGO, iwl3945_show_filter_flags,
3547 iwl3945_store_filter_flags);
3548
3549static ssize_t iwl3945_show_measurement(struct device *d,
3550 struct device_attribute *attr, char *buf)
3551{
3552 struct iwl_priv *priv = dev_get_drvdata(d);
3553 struct iwl_spectrum_notification measure_report;
3554 u32 size = sizeof(measure_report), len = 0, ofs = 0;
3555 u8 *data = (u8 *)&measure_report;
3556 unsigned long flags;
3557
3558 spin_lock_irqsave(&priv->lock, flags);
3559 if (!(priv->measurement_status & MEASUREMENT_READY)) {
3560 spin_unlock_irqrestore(&priv->lock, flags);
3561 return 0;
3562 }
3563 memcpy(&measure_report, &priv->measure_report, size);
3564 priv->measurement_status = 0;
3565 spin_unlock_irqrestore(&priv->lock, flags);
3566
3567 while (size && (PAGE_SIZE - len)) {
3568 hex_dump_to_buffer(data + ofs, size, 16, 1, buf + len,
3569 PAGE_SIZE - len, 1);
3570 len = strlen(buf);
3571 if (PAGE_SIZE - len)
3572 buf[len++] = '\n';
3573
3574 ofs += 16;
3575 size -= min(size, 16U);
3576 }
3577
3578 return len;
3579}
3580
3581static ssize_t iwl3945_store_measurement(struct device *d,
3582 struct device_attribute *attr,
3583 const char *buf, size_t count)
3584{
3585 struct iwl_priv *priv = dev_get_drvdata(d);
3586 struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
3587 struct ieee80211_measurement_params params = {
3588 .channel = le16_to_cpu(ctx->active.channel),
3589 .start_time = cpu_to_le64(priv->_3945.last_tsf),
3590 .duration = cpu_to_le16(1),
3591 };
3592 u8 type = IWL_MEASURE_BASIC;
3593 u8 buffer[32];
3594 u8 channel;
3595
3596 if (count) {
3597 char *p = buffer;
3598 strncpy(buffer, buf, min(sizeof(buffer), count));
3599 channel = simple_strtoul(p, NULL, 0);
3600 if (channel)
3601 params.channel = channel;
3602
3603 p = buffer;
3604 while (*p && *p != ' ')
3605 p++;
3606 if (*p)
3607 type = simple_strtoul(p + 1, NULL, 0);
3608 }
3609
3610 IWL_DEBUG_INFO(priv, "Invoking measurement of type %d on "
3611 "channel %d (for '%s')\n", type, params.channel, buf);
3612 iwl3945_get_measurement(priv, &params, type);
3613
3614 return count;
3615}
3616
3617static DEVICE_ATTR(measurement, S_IRUSR | S_IWUSR,
3618 iwl3945_show_measurement, iwl3945_store_measurement);
3619
3620static ssize_t iwl3945_store_retry_rate(struct device *d,
3621 struct device_attribute *attr,
3622 const char *buf, size_t count)
3623{
3624 struct iwl_priv *priv = dev_get_drvdata(d);
3625
3626 priv->retry_rate = simple_strtoul(buf, NULL, 0);
3627 if (priv->retry_rate <= 0)
3628 priv->retry_rate = 1;
3629
3630 return count;
3631}
3632
3633static ssize_t iwl3945_show_retry_rate(struct device *d,
3634 struct device_attribute *attr, char *buf)
3635{
3636 struct iwl_priv *priv = dev_get_drvdata(d);
3637 return sprintf(buf, "%d", priv->retry_rate);
3638}
3639
3640static DEVICE_ATTR(retry_rate, S_IWUSR | S_IRUSR, iwl3945_show_retry_rate,
3641 iwl3945_store_retry_rate);
3642
3643
3644static ssize_t iwl3945_show_channels(struct device *d,
3645 struct device_attribute *attr, char *buf)
3646{
3647 /* all this shit doesn't belong into sysfs anyway */
3648 return 0;
3649}
3650
3651static DEVICE_ATTR(channels, S_IRUSR, iwl3945_show_channels, NULL);
3652
3653static ssize_t iwl3945_show_antenna(struct device *d,
3654 struct device_attribute *attr, char *buf)
3655{
3656 struct iwl_priv *priv = dev_get_drvdata(d);
3657
3658 if (!iwl_legacy_is_alive(priv))
3659 return -EAGAIN;
3660
3661 return sprintf(buf, "%d\n", iwl3945_mod_params.antenna);
3662}
3663
3664static ssize_t iwl3945_store_antenna(struct device *d,
3665 struct device_attribute *attr,
3666 const char *buf, size_t count)
3667{
3668 struct iwl_priv *priv __maybe_unused = dev_get_drvdata(d);
3669 int ant;
3670
3671 if (count == 0)
3672 return 0;
3673
3674 if (sscanf(buf, "%1i", &ant) != 1) {
3675 IWL_DEBUG_INFO(priv, "not in hex or decimal form.\n");
3676 return count;
3677 }
3678
3679 if ((ant >= 0) && (ant <= 2)) {
3680 IWL_DEBUG_INFO(priv, "Setting antenna select to %d.\n", ant);
3681 iwl3945_mod_params.antenna = (enum iwl3945_antenna)ant;
3682 } else
3683 IWL_DEBUG_INFO(priv, "Bad antenna select value %d.\n", ant);
3684
3685
3686 return count;
3687}
3688
3689static DEVICE_ATTR(antenna, S_IWUSR | S_IRUGO, iwl3945_show_antenna, iwl3945_store_antenna);
3690
3691static ssize_t iwl3945_show_status(struct device *d,
3692 struct device_attribute *attr, char *buf)
3693{
3694 struct iwl_priv *priv = dev_get_drvdata(d);
3695 if (!iwl_legacy_is_alive(priv))
3696 return -EAGAIN;
3697 return sprintf(buf, "0x%08x\n", (int)priv->status);
3698}
3699
3700static DEVICE_ATTR(status, S_IRUGO, iwl3945_show_status, NULL);
3701
3702static ssize_t iwl3945_dump_error_log(struct device *d,
3703 struct device_attribute *attr,
3704 const char *buf, size_t count)
3705{
3706 struct iwl_priv *priv = dev_get_drvdata(d);
3707 char *p = (char *)buf;
3708
3709 if (p[0] == '1')
3710 iwl3945_dump_nic_error_log(priv);
3711
3712 return strnlen(buf, count);
3713}
3714
3715static DEVICE_ATTR(dump_errors, S_IWUSR, NULL, iwl3945_dump_error_log);
3716
3717/*****************************************************************************
3718 *
3719 * driver setup and tear down
3720 *
3721 *****************************************************************************/
3722
3723static void iwl3945_setup_deferred_work(struct iwl_priv *priv)
3724{
3725 priv->workqueue = create_singlethread_workqueue(DRV_NAME);
3726
3727 init_waitqueue_head(&priv->wait_command_queue);
3728
3729 INIT_WORK(&priv->restart, iwl3945_bg_restart);
3730 INIT_WORK(&priv->rx_replenish, iwl3945_bg_rx_replenish);
3731 INIT_DELAYED_WORK(&priv->init_alive_start, iwl3945_bg_init_alive_start);
3732 INIT_DELAYED_WORK(&priv->alive_start, iwl3945_bg_alive_start);
3733 INIT_DELAYED_WORK(&priv->_3945.rfkill_poll, iwl3945_rfkill_poll);
3734
3735 iwl_legacy_setup_scan_deferred_work(priv);
3736
3737 iwl3945_hw_setup_deferred_work(priv);
3738
3739 init_timer(&priv->watchdog);
3740 priv->watchdog.data = (unsigned long)priv;
3741 priv->watchdog.function = iwl_legacy_bg_watchdog;
3742
3743 tasklet_init(&priv->irq_tasklet, (void (*)(unsigned long))
3744 iwl3945_irq_tasklet, (unsigned long)priv);
3745}
3746
3747static void iwl3945_cancel_deferred_work(struct iwl_priv *priv)
3748{
3749 iwl3945_hw_cancel_deferred_work(priv);
3750
3751 cancel_delayed_work_sync(&priv->init_alive_start);
3752 cancel_delayed_work(&priv->alive_start);
3753
3754 iwl_legacy_cancel_scan_deferred_work(priv);
3755}
3756
3757static struct attribute *iwl3945_sysfs_entries[] = {
3758 &dev_attr_antenna.attr,
3759 &dev_attr_channels.attr,
3760 &dev_attr_dump_errors.attr,
3761 &dev_attr_flags.attr,
3762 &dev_attr_filter_flags.attr,
3763 &dev_attr_measurement.attr,
3764 &dev_attr_retry_rate.attr,
3765 &dev_attr_status.attr,
3766 &dev_attr_temperature.attr,
3767 &dev_attr_tx_power.attr,
3768#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
3769 &dev_attr_debug_level.attr,
3770#endif
3771 NULL
3772};
3773
3774static struct attribute_group iwl3945_attribute_group = {
3775 .name = NULL, /* put in device directory */
3776 .attrs = iwl3945_sysfs_entries,
3777};
3778
3779struct ieee80211_ops iwl3945_hw_ops = {
3780 .tx = iwl3945_mac_tx,
3781 .start = iwl3945_mac_start,
3782 .stop = iwl3945_mac_stop,
3783 .add_interface = iwl_legacy_mac_add_interface,
3784 .remove_interface = iwl_legacy_mac_remove_interface,
3785 .change_interface = iwl_legacy_mac_change_interface,
3786 .config = iwl_legacy_mac_config,
3787 .configure_filter = iwl3945_configure_filter,
3788 .set_key = iwl3945_mac_set_key,
3789 .conf_tx = iwl_legacy_mac_conf_tx,
3790 .reset_tsf = iwl_legacy_mac_reset_tsf,
3791 .bss_info_changed = iwl_legacy_mac_bss_info_changed,
3792 .hw_scan = iwl_legacy_mac_hw_scan,
3793 .sta_add = iwl3945_mac_sta_add,
3794 .sta_remove = iwl_legacy_mac_sta_remove,
3795 .tx_last_beacon = iwl_legacy_mac_tx_last_beacon,
3796};
3797
3798static int iwl3945_init_drv(struct iwl_priv *priv)
3799{
3800 int ret;
3801 struct iwl3945_eeprom *eeprom = (struct iwl3945_eeprom *)priv->eeprom;
3802
3803 priv->retry_rate = 1;
3804 priv->beacon_skb = NULL;
3805
3806 spin_lock_init(&priv->sta_lock);
3807 spin_lock_init(&priv->hcmd_lock);
3808
3809 INIT_LIST_HEAD(&priv->free_frames);
3810
3811 mutex_init(&priv->mutex);
3812 mutex_init(&priv->sync_cmd_mutex);
3813
3814 priv->ieee_channels = NULL;
3815 priv->ieee_rates = NULL;
3816 priv->band = IEEE80211_BAND_2GHZ;
3817
3818 priv->iw_mode = NL80211_IFTYPE_STATION;
3819 priv->missed_beacon_threshold = IWL_MISSED_BEACON_THRESHOLD_DEF;
3820
3821 /* initialize force reset */
3822 priv->force_reset[IWL_RF_RESET].reset_duration =
3823 IWL_DELAY_NEXT_FORCE_RF_RESET;
3824 priv->force_reset[IWL_FW_RESET].reset_duration =
3825 IWL_DELAY_NEXT_FORCE_FW_RELOAD;
3826
3827
3828 priv->tx_power_user_lmt = IWL_DEFAULT_TX_POWER;
3829 priv->tx_power_next = IWL_DEFAULT_TX_POWER;
3830
3831 if (eeprom->version < EEPROM_3945_EEPROM_VERSION) {
3832 IWL_WARN(priv, "Unsupported EEPROM version: 0x%04X\n",
3833 eeprom->version);
3834 ret = -EINVAL;
3835 goto err;
3836 }
3837 ret = iwl_legacy_init_channel_map(priv);
3838 if (ret) {
3839 IWL_ERR(priv, "initializing regulatory failed: %d\n", ret);
3840 goto err;
3841 }
3842
3843 /* Set up txpower settings in driver for all channels */
3844 if (iwl3945_txpower_set_from_eeprom(priv)) {
3845 ret = -EIO;
3846 goto err_free_channel_map;
3847 }
3848
3849 ret = iwl_legacy_init_geos(priv);
3850 if (ret) {
3851 IWL_ERR(priv, "initializing geos failed: %d\n", ret);
3852 goto err_free_channel_map;
3853 }
3854 iwl3945_init_hw_rates(priv, priv->ieee_rates);
3855
3856 return 0;
3857
3858err_free_channel_map:
3859 iwl_legacy_free_channel_map(priv);
3860err:
3861 return ret;
3862}
3863
3864#define IWL3945_MAX_PROBE_REQUEST 200
3865
3866static int iwl3945_setup_mac(struct iwl_priv *priv)
3867{
3868 int ret;
3869 struct ieee80211_hw *hw = priv->hw;
3870
3871 hw->rate_control_algorithm = "iwl-3945-rs";
3872 hw->sta_data_size = sizeof(struct iwl3945_sta_priv);
3873 hw->vif_data_size = sizeof(struct iwl_vif_priv);
3874
3875 /* Tell mac80211 our characteristics */
3876 hw->flags = IEEE80211_HW_SIGNAL_DBM |
3877 IEEE80211_HW_SPECTRUM_MGMT;
3878
3879 hw->wiphy->interface_modes =
3880 priv->contexts[IWL_RXON_CTX_BSS].interface_modes;
3881
3882 hw->wiphy->flags |= WIPHY_FLAG_CUSTOM_REGULATORY |
3883 WIPHY_FLAG_DISABLE_BEACON_HINTS |
3884 WIPHY_FLAG_IBSS_RSN;
3885
3886 hw->wiphy->max_scan_ssids = PROBE_OPTION_MAX_3945;
3887 /* we create the 802.11 header and a zero-length SSID element */
3888 hw->wiphy->max_scan_ie_len = IWL3945_MAX_PROBE_REQUEST - 24 - 2;
3889
3890 /* Default value; 4 EDCA QOS priorities */
3891 hw->queues = 4;
3892
3893 if (priv->bands[IEEE80211_BAND_2GHZ].n_channels)
3894 priv->hw->wiphy->bands[IEEE80211_BAND_2GHZ] =
3895 &priv->bands[IEEE80211_BAND_2GHZ];
3896
3897 if (priv->bands[IEEE80211_BAND_5GHZ].n_channels)
3898 priv->hw->wiphy->bands[IEEE80211_BAND_5GHZ] =
3899 &priv->bands[IEEE80211_BAND_5GHZ];
3900
3901 iwl_legacy_leds_init(priv);
3902
3903 ret = ieee80211_register_hw(priv->hw);
3904 if (ret) {
3905 IWL_ERR(priv, "Failed to register hw (error %d)\n", ret);
3906 return ret;
3907 }
3908 priv->mac80211_registered = 1;
3909
3910 return 0;
3911}
3912
3913static int iwl3945_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
3914{
3915 int err = 0, i;
3916 struct iwl_priv *priv;
3917 struct ieee80211_hw *hw;
3918 struct iwl_cfg *cfg = (struct iwl_cfg *)(ent->driver_data);
3919 struct iwl3945_eeprom *eeprom;
3920 unsigned long flags;
3921
3922 /***********************
3923 * 1. Allocating HW data
3924 * ********************/
3925
3926 /* mac80211 allocates memory for this device instance, including
3927 * space for this driver's private structure */
3928 hw = iwl_legacy_alloc_all(cfg);
3929 if (hw == NULL) {
3930 pr_err("Can not allocate network device\n");
3931 err = -ENOMEM;
3932 goto out;
3933 }
3934 priv = hw->priv;
3935 SET_IEEE80211_DEV(hw, &pdev->dev);
3936
3937 priv->cmd_queue = IWL39_CMD_QUEUE_NUM;
3938
3939 /* 3945 has only one valid context */
3940 priv->valid_contexts = BIT(IWL_RXON_CTX_BSS);
3941
3942 for (i = 0; i < NUM_IWL_RXON_CTX; i++)
3943 priv->contexts[i].ctxid = i;
3944
3945 priv->contexts[IWL_RXON_CTX_BSS].rxon_cmd = REPLY_RXON;
3946 priv->contexts[IWL_RXON_CTX_BSS].rxon_timing_cmd = REPLY_RXON_TIMING;
3947 priv->contexts[IWL_RXON_CTX_BSS].rxon_assoc_cmd = REPLY_RXON_ASSOC;
3948 priv->contexts[IWL_RXON_CTX_BSS].qos_cmd = REPLY_QOS_PARAM;
3949 priv->contexts[IWL_RXON_CTX_BSS].ap_sta_id = IWL_AP_ID;
3950 priv->contexts[IWL_RXON_CTX_BSS].wep_key_cmd = REPLY_WEPKEY;
3951 priv->contexts[IWL_RXON_CTX_BSS].interface_modes =
3952 BIT(NL80211_IFTYPE_STATION) |
3953 BIT(NL80211_IFTYPE_ADHOC);
3954 priv->contexts[IWL_RXON_CTX_BSS].ibss_devtype = RXON_DEV_TYPE_IBSS;
3955 priv->contexts[IWL_RXON_CTX_BSS].station_devtype = RXON_DEV_TYPE_ESS;
3956 priv->contexts[IWL_RXON_CTX_BSS].unused_devtype = RXON_DEV_TYPE_ESS;
3957
3958 /*
3959 * Disabling hardware scan means that mac80211 will perform scans
3960 * "the hard way", rather than using device's scan.
3961 */
3962 if (iwl3945_mod_params.disable_hw_scan) {
3963 dev_printk(KERN_DEBUG, &(pdev->dev),
3964 "sw scan support is deprecated\n");
3965 iwl3945_hw_ops.hw_scan = NULL;
3966 }
3967
3968 IWL_DEBUG_INFO(priv, "*** LOAD DRIVER ***\n");
3969 priv->cfg = cfg;
3970 priv->pci_dev = pdev;
3971 priv->inta_mask = CSR_INI_SET_MASK;
3972
3973 if (iwl_legacy_alloc_traffic_mem(priv))
3974 IWL_ERR(priv, "Not enough memory to generate traffic log\n");
3975
3976 /***************************
3977 * 2. Initializing PCI bus
3978 * *************************/
3979 pci_disable_link_state(pdev, PCIE_LINK_STATE_L0S | PCIE_LINK_STATE_L1 |
3980 PCIE_LINK_STATE_CLKPM);
3981
3982 if (pci_enable_device(pdev)) {
3983 err = -ENODEV;
3984 goto out_ieee80211_free_hw;
3985 }
3986
3987 pci_set_master(pdev);
3988
3989 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
3990 if (!err)
3991 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
3992 if (err) {
3993 IWL_WARN(priv, "No suitable DMA available.\n");
3994 goto out_pci_disable_device;
3995 }
3996
3997 pci_set_drvdata(pdev, priv);
3998 err = pci_request_regions(pdev, DRV_NAME);
3999 if (err)
4000 goto out_pci_disable_device;
4001
4002 /***********************
4003 * 3. Read REV Register
4004 * ********************/
4005 priv->hw_base = pci_iomap(pdev, 0, 0);
4006 if (!priv->hw_base) {
4007 err = -ENODEV;
4008 goto out_pci_release_regions;
4009 }
4010
4011 IWL_DEBUG_INFO(priv, "pci_resource_len = 0x%08llx\n",
4012 (unsigned long long) pci_resource_len(pdev, 0));
4013 IWL_DEBUG_INFO(priv, "pci_resource_base = %p\n", priv->hw_base);
4014
4015 /* We disable the RETRY_TIMEOUT register (0x41) to keep
4016 * PCI Tx retries from interfering with C3 CPU state */
4017 pci_write_config_byte(pdev, 0x41, 0x00);
4018
4019 /* these spin locks will be used in apm_ops.init and EEPROM access
4020 * we should init now
4021 */
4022 spin_lock_init(&priv->reg_lock);
4023 spin_lock_init(&priv->lock);
4024
4025 /*
4026 * stop and reset the on-board processor just in case it is in a
4027 * strange state ... like being left stranded by a primary kernel
4028 * and this is now the kdump kernel trying to start up
4029 */
4030 iwl_write32(priv, CSR_RESET, CSR_RESET_REG_FLAG_NEVO_RESET);
4031
4032 /***********************
4033 * 4. Read EEPROM
4034 * ********************/
4035
4036 /* Read the EEPROM */
4037 err = iwl_legacy_eeprom_init(priv);
4038 if (err) {
4039 IWL_ERR(priv, "Unable to init EEPROM\n");
4040 goto out_iounmap;
4041 }
4042 /* MAC Address location in EEPROM same for 3945/4965 */
4043 eeprom = (struct iwl3945_eeprom *)priv->eeprom;
4044 IWL_DEBUG_INFO(priv, "MAC address: %pM\n", eeprom->mac_address);
4045 SET_IEEE80211_PERM_ADDR(priv->hw, eeprom->mac_address);
4046
4047 /***********************
4048 * 5. Setup HW Constants
4049 * ********************/
4050 /* Device-specific setup */
4051 if (iwl3945_hw_set_hw_params(priv)) {
4052 IWL_ERR(priv, "failed to set hw settings\n");
4053 goto out_eeprom_free;
4054 }
4055
4056 /***********************
4057 * 6. Setup priv
4058 * ********************/
4059
4060 err = iwl3945_init_drv(priv);
4061 if (err) {
4062 IWL_ERR(priv, "initializing driver failed\n");
4063 goto out_unset_hw_params;
4064 }
4065
4066 IWL_INFO(priv, "Detected Intel Wireless WiFi Link %s\n",
4067 priv->cfg->name);
4068
4069 /***********************
4070 * 7. Setup Services
4071 * ********************/
4072
4073 spin_lock_irqsave(&priv->lock, flags);
4074 iwl_legacy_disable_interrupts(priv);
4075 spin_unlock_irqrestore(&priv->lock, flags);
4076
4077 pci_enable_msi(priv->pci_dev);
4078
4079 err = request_irq(priv->pci_dev->irq, iwl_legacy_isr,
4080 IRQF_SHARED, DRV_NAME, priv);
4081 if (err) {
4082 IWL_ERR(priv, "Error allocating IRQ %d\n", priv->pci_dev->irq);
4083 goto out_disable_msi;
4084 }
4085
4086 err = sysfs_create_group(&pdev->dev.kobj, &iwl3945_attribute_group);
4087 if (err) {
4088 IWL_ERR(priv, "failed to create sysfs device attributes\n");
4089 goto out_release_irq;
4090 }
4091
4092 iwl_legacy_set_rxon_channel(priv,
4093 &priv->bands[IEEE80211_BAND_2GHZ].channels[5],
4094 &priv->contexts[IWL_RXON_CTX_BSS]);
4095 iwl3945_setup_deferred_work(priv);
4096 iwl3945_setup_rx_handlers(priv);
4097 iwl_legacy_power_initialize(priv);
4098
4099 /*********************************
4100 * 8. Setup and Register mac80211
4101 * *******************************/
4102
4103 iwl_legacy_enable_interrupts(priv);
4104
4105 err = iwl3945_setup_mac(priv);
4106 if (err)
4107 goto out_remove_sysfs;
4108
4109 err = iwl_legacy_dbgfs_register(priv, DRV_NAME);
4110 if (err)
4111 IWL_ERR(priv, "failed to create debugfs files. Ignoring error: %d\n", err);
4112
4113 /* Start monitoring the killswitch */
4114 queue_delayed_work(priv->workqueue, &priv->_3945.rfkill_poll,
4115 2 * HZ);
4116
4117 return 0;
4118
4119 out_remove_sysfs:
4120 destroy_workqueue(priv->workqueue);
4121 priv->workqueue = NULL;
4122 sysfs_remove_group(&pdev->dev.kobj, &iwl3945_attribute_group);
4123 out_release_irq:
4124 free_irq(priv->pci_dev->irq, priv);
4125 out_disable_msi:
4126 pci_disable_msi(priv->pci_dev);
4127 iwl_legacy_free_geos(priv);
4128 iwl_legacy_free_channel_map(priv);
4129 out_unset_hw_params:
4130 iwl3945_unset_hw_params(priv);
4131 out_eeprom_free:
4132 iwl_legacy_eeprom_free(priv);
4133 out_iounmap:
4134 pci_iounmap(pdev, priv->hw_base);
4135 out_pci_release_regions:
4136 pci_release_regions(pdev);
4137 out_pci_disable_device:
4138 pci_set_drvdata(pdev, NULL);
4139 pci_disable_device(pdev);
4140 out_ieee80211_free_hw:
4141 iwl_legacy_free_traffic_mem(priv);
4142 ieee80211_free_hw(priv->hw);
4143 out:
4144 return err;
4145}
4146
4147static void __devexit iwl3945_pci_remove(struct pci_dev *pdev)
4148{
4149 struct iwl_priv *priv = pci_get_drvdata(pdev);
4150 unsigned long flags;
4151
4152 if (!priv)
4153 return;
4154
4155 IWL_DEBUG_INFO(priv, "*** UNLOAD DRIVER ***\n");
4156
4157 iwl_legacy_dbgfs_unregister(priv);
4158
4159 set_bit(STATUS_EXIT_PENDING, &priv->status);
4160
4161 iwl_legacy_leds_exit(priv);
4162
4163 if (priv->mac80211_registered) {
4164 ieee80211_unregister_hw(priv->hw);
4165 priv->mac80211_registered = 0;
4166 } else {
4167 iwl3945_down(priv);
4168 }
4169
4170 /*
4171 * Make sure device is reset to low power before unloading driver.
4172 * This may be redundant with iwl_down(), but there are paths to
4173 * run iwl_down() without calling apm_ops.stop(), and there are
4174 * paths to avoid running iwl_down() at all before leaving driver.
4175 * This (inexpensive) call *makes sure* device is reset.
4176 */
4177 iwl_legacy_apm_stop(priv);
4178
4179 /* make sure we flush any pending irq or
4180 * tasklet for the driver
4181 */
4182 spin_lock_irqsave(&priv->lock, flags);
4183 iwl_legacy_disable_interrupts(priv);
4184 spin_unlock_irqrestore(&priv->lock, flags);
4185
4186 iwl3945_synchronize_irq(priv);
4187
4188 sysfs_remove_group(&pdev->dev.kobj, &iwl3945_attribute_group);
4189
4190 cancel_delayed_work_sync(&priv->_3945.rfkill_poll);
4191
4192 iwl3945_dealloc_ucode_pci(priv);
4193
4194 if (priv->rxq.bd)
4195 iwl3945_rx_queue_free(priv, &priv->rxq);
4196 iwl3945_hw_txq_ctx_free(priv);
4197
4198 iwl3945_unset_hw_params(priv);
4199
4200 /*netif_stop_queue(dev); */
4201 flush_workqueue(priv->workqueue);
4202
4203 /* ieee80211_unregister_hw calls iwl3945_mac_stop, which flushes
4204 * priv->workqueue... so we can't take down the workqueue
4205 * until now... */
4206 destroy_workqueue(priv->workqueue);
4207 priv->workqueue = NULL;
4208 iwl_legacy_free_traffic_mem(priv);
4209
4210 free_irq(pdev->irq, priv);
4211 pci_disable_msi(pdev);
4212
4213 pci_iounmap(pdev, priv->hw_base);
4214 pci_release_regions(pdev);
4215 pci_disable_device(pdev);
4216 pci_set_drvdata(pdev, NULL);
4217
4218 iwl_legacy_free_channel_map(priv);
4219 iwl_legacy_free_geos(priv);
4220 kfree(priv->scan_cmd);
4221 if (priv->beacon_skb)
4222 dev_kfree_skb(priv->beacon_skb);
4223
4224 ieee80211_free_hw(priv->hw);
4225}
4226
4227
4228/*****************************************************************************
4229 *
4230 * driver and module entry point
4231 *
4232 *****************************************************************************/
4233
4234static struct pci_driver iwl3945_driver = {
4235 .name = DRV_NAME,
4236 .id_table = iwl3945_hw_card_ids,
4237 .probe = iwl3945_pci_probe,
4238 .remove = __devexit_p(iwl3945_pci_remove),
4239 .driver.pm = IWL_LEGACY_PM_OPS,
4240};
4241
4242static int __init iwl3945_init(void)
4243{
4244
4245 int ret;
4246 pr_info(DRV_DESCRIPTION ", " DRV_VERSION "\n");
4247 pr_info(DRV_COPYRIGHT "\n");
4248
4249 ret = iwl3945_rate_control_register();
4250 if (ret) {
4251 pr_err("Unable to register rate control algorithm: %d\n", ret);
4252 return ret;
4253 }
4254
4255 ret = pci_register_driver(&iwl3945_driver);
4256 if (ret) {
4257 pr_err("Unable to initialize PCI module\n");
4258 goto error_register;
4259 }
4260
4261 return ret;
4262
4263error_register:
4264 iwl3945_rate_control_unregister();
4265 return ret;
4266}
4267
4268static void __exit iwl3945_exit(void)
4269{
4270 pci_unregister_driver(&iwl3945_driver);
4271 iwl3945_rate_control_unregister();
4272}
4273
4274MODULE_FIRMWARE(IWL3945_MODULE_FIRMWARE(IWL3945_UCODE_API_MAX));
4275
4276module_param_named(antenna, iwl3945_mod_params.antenna, int, S_IRUGO);
4277MODULE_PARM_DESC(antenna, "select antenna (1=Main, 2=Aux, default 0 [both])");
4278module_param_named(swcrypto, iwl3945_mod_params.sw_crypto, int, S_IRUGO);
4279MODULE_PARM_DESC(swcrypto,
4280 "using software crypto (default 1 [software])");
4281module_param_named(disable_hw_scan, iwl3945_mod_params.disable_hw_scan,
4282 int, S_IRUGO);
4283MODULE_PARM_DESC(disable_hw_scan,
4284 "disable hardware scanning (default 0) (deprecated)");
4285#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
4286module_param_named(debug, iwlegacy_debug_level, uint, S_IRUGO | S_IWUSR);
4287MODULE_PARM_DESC(debug, "debug output mask");
4288#endif
4289module_param_named(fw_restart, iwl3945_mod_params.restart_fw, int, S_IRUGO);
4290MODULE_PARM_DESC(fw_restart, "restart firmware in case of error");
4291
4292module_exit(iwl3945_exit);
4293module_init(iwl3945_init);
diff --git a/drivers/net/wireless/iwlegacy/iwl4965-base.c b/drivers/net/wireless/iwlegacy/iwl4965-base.c
new file mode 100644
index 000000000000..91b3d8b9d7a5
--- /dev/null
+++ b/drivers/net/wireless/iwlegacy/iwl4965-base.c
@@ -0,0 +1,3632 @@
1/******************************************************************************
2 *
3 * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved.
4 *
5 * Portions of this file are derived from the ipw3945 project, as well
6 * as portions of the ieee80211 subsystem header files.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of version 2 of the GNU General Public License as
10 * published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it will be useful, but WITHOUT
13 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 * more details.
16 *
17 * You should have received a copy of the GNU General Public License along with
18 * this program; if not, write to the Free Software Foundation, Inc.,
19 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
20 *
21 * The full GNU General Public License is included in this distribution in the
22 * file called LICENSE.
23 *
24 * Contact Information:
25 * Intel Linux Wireless <ilw@linux.intel.com>
26 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
27 *
28 *****************************************************************************/
29
30#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
31
32#include <linux/kernel.h>
33#include <linux/module.h>
34#include <linux/init.h>
35#include <linux/pci.h>
36#include <linux/pci-aspm.h>
37#include <linux/slab.h>
38#include <linux/dma-mapping.h>
39#include <linux/delay.h>
40#include <linux/sched.h>
41#include <linux/skbuff.h>
42#include <linux/netdevice.h>
43#include <linux/wireless.h>
44#include <linux/firmware.h>
45#include <linux/etherdevice.h>
46#include <linux/if_arp.h>
47
48#include <net/mac80211.h>
49
50#include <asm/div64.h>
51
52#define DRV_NAME "iwl4965"
53
54#include "iwl-eeprom.h"
55#include "iwl-dev.h"
56#include "iwl-core.h"
57#include "iwl-io.h"
58#include "iwl-helpers.h"
59#include "iwl-sta.h"
60#include "iwl-4965-calib.h"
61#include "iwl-4965.h"
62#include "iwl-4965-led.h"
63
64
65/******************************************************************************
66 *
67 * module boiler plate
68 *
69 ******************************************************************************/
70
71/*
72 * module name, copyright, version, etc.
73 */
74#define DRV_DESCRIPTION "Intel(R) Wireless WiFi 4965 driver for Linux"
75
76#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
77#define VD "d"
78#else
79#define VD
80#endif
81
82#define DRV_VERSION IWLWIFI_VERSION VD
83
84
85MODULE_DESCRIPTION(DRV_DESCRIPTION);
86MODULE_VERSION(DRV_VERSION);
87MODULE_AUTHOR(DRV_COPYRIGHT " " DRV_AUTHOR);
88MODULE_LICENSE("GPL");
89MODULE_ALIAS("iwl4965");
90
91void iwl4965_update_chain_flags(struct iwl_priv *priv)
92{
93 struct iwl_rxon_context *ctx;
94
95 if (priv->cfg->ops->hcmd->set_rxon_chain) {
96 for_each_context(priv, ctx) {
97 priv->cfg->ops->hcmd->set_rxon_chain(priv, ctx);
98 if (ctx->active.rx_chain != ctx->staging.rx_chain)
99 iwl_legacy_commit_rxon(priv, ctx);
100 }
101 }
102}
103
104static void iwl4965_clear_free_frames(struct iwl_priv *priv)
105{
106 struct list_head *element;
107
108 IWL_DEBUG_INFO(priv, "%d frames on pre-allocated heap on clear.\n",
109 priv->frames_count);
110
111 while (!list_empty(&priv->free_frames)) {
112 element = priv->free_frames.next;
113 list_del(element);
114 kfree(list_entry(element, struct iwl_frame, list));
115 priv->frames_count--;
116 }
117
118 if (priv->frames_count) {
119 IWL_WARN(priv, "%d frames still in use. Did we lose one?\n",
120 priv->frames_count);
121 priv->frames_count = 0;
122 }
123}
124
125static struct iwl_frame *iwl4965_get_free_frame(struct iwl_priv *priv)
126{
127 struct iwl_frame *frame;
128 struct list_head *element;
129 if (list_empty(&priv->free_frames)) {
130 frame = kzalloc(sizeof(*frame), GFP_KERNEL);
131 if (!frame) {
132 IWL_ERR(priv, "Could not allocate frame!\n");
133 return NULL;
134 }
135
136 priv->frames_count++;
137 return frame;
138 }
139
140 element = priv->free_frames.next;
141 list_del(element);
142 return list_entry(element, struct iwl_frame, list);
143}
144
145static void iwl4965_free_frame(struct iwl_priv *priv, struct iwl_frame *frame)
146{
147 memset(frame, 0, sizeof(*frame));
148 list_add(&frame->list, &priv->free_frames);
149}
150
151static u32 iwl4965_fill_beacon_frame(struct iwl_priv *priv,
152 struct ieee80211_hdr *hdr,
153 int left)
154{
155 lockdep_assert_held(&priv->mutex);
156
157 if (!priv->beacon_skb)
158 return 0;
159
160 if (priv->beacon_skb->len > left)
161 return 0;
162
163 memcpy(hdr, priv->beacon_skb->data, priv->beacon_skb->len);
164
165 return priv->beacon_skb->len;
166}
167
168/* Parse the beacon frame to find the TIM element and set tim_idx & tim_size */
169static void iwl4965_set_beacon_tim(struct iwl_priv *priv,
170 struct iwl_tx_beacon_cmd *tx_beacon_cmd,
171 u8 *beacon, u32 frame_size)
172{
173 u16 tim_idx;
174 struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *)beacon;
175
176 /*
177 * The index is relative to frame start but we start looking at the
178 * variable-length part of the beacon.
179 */
180 tim_idx = mgmt->u.beacon.variable - beacon;
181
182 /* Parse variable-length elements of beacon to find WLAN_EID_TIM */
183 while ((tim_idx < (frame_size - 2)) &&
184 (beacon[tim_idx] != WLAN_EID_TIM))
185 tim_idx += beacon[tim_idx+1] + 2;
186
187 /* If TIM field was found, set variables */
188 if ((tim_idx < (frame_size - 1)) && (beacon[tim_idx] == WLAN_EID_TIM)) {
189 tx_beacon_cmd->tim_idx = cpu_to_le16(tim_idx);
190 tx_beacon_cmd->tim_size = beacon[tim_idx+1];
191 } else
192 IWL_WARN(priv, "Unable to find TIM Element in beacon\n");
193}
194
195static unsigned int iwl4965_hw_get_beacon_cmd(struct iwl_priv *priv,
196 struct iwl_frame *frame)
197{
198 struct iwl_tx_beacon_cmd *tx_beacon_cmd;
199 u32 frame_size;
200 u32 rate_flags;
201 u32 rate;
202 /*
203 * We have to set up the TX command, the TX Beacon command, and the
204 * beacon contents.
205 */
206
207 lockdep_assert_held(&priv->mutex);
208
209 if (!priv->beacon_ctx) {
210 IWL_ERR(priv, "trying to build beacon w/o beacon context!\n");
211 return 0;
212 }
213
214 /* Initialize memory */
215 tx_beacon_cmd = &frame->u.beacon;
216 memset(tx_beacon_cmd, 0, sizeof(*tx_beacon_cmd));
217
218 /* Set up TX beacon contents */
219 frame_size = iwl4965_fill_beacon_frame(priv, tx_beacon_cmd->frame,
220 sizeof(frame->u) - sizeof(*tx_beacon_cmd));
221 if (WARN_ON_ONCE(frame_size > MAX_MPDU_SIZE))
222 return 0;
223 if (!frame_size)
224 return 0;
225
226 /* Set up TX command fields */
227 tx_beacon_cmd->tx.len = cpu_to_le16((u16)frame_size);
228 tx_beacon_cmd->tx.sta_id = priv->beacon_ctx->bcast_sta_id;
229 tx_beacon_cmd->tx.stop_time.life_time = TX_CMD_LIFE_TIME_INFINITE;
230 tx_beacon_cmd->tx.tx_flags = TX_CMD_FLG_SEQ_CTL_MSK |
231 TX_CMD_FLG_TSF_MSK | TX_CMD_FLG_STA_RATE_MSK;
232
233 /* Set up TX beacon command fields */
234 iwl4965_set_beacon_tim(priv, tx_beacon_cmd, (u8 *)tx_beacon_cmd->frame,
235 frame_size);
236
237 /* Set up packet rate and flags */
238 rate = iwl_legacy_get_lowest_plcp(priv, priv->beacon_ctx);
239 priv->mgmt_tx_ant = iwl4965_toggle_tx_ant(priv, priv->mgmt_tx_ant,
240 priv->hw_params.valid_tx_ant);
241 rate_flags = iwl4965_ant_idx_to_flags(priv->mgmt_tx_ant);
242 if ((rate >= IWL_FIRST_CCK_RATE) && (rate <= IWL_LAST_CCK_RATE))
243 rate_flags |= RATE_MCS_CCK_MSK;
244 tx_beacon_cmd->tx.rate_n_flags = iwl4965_hw_set_rate_n_flags(rate,
245 rate_flags);
246
247 return sizeof(*tx_beacon_cmd) + frame_size;
248}
249
250int iwl4965_send_beacon_cmd(struct iwl_priv *priv)
251{
252 struct iwl_frame *frame;
253 unsigned int frame_size;
254 int rc;
255
256 frame = iwl4965_get_free_frame(priv);
257 if (!frame) {
258 IWL_ERR(priv, "Could not obtain free frame buffer for beacon "
259 "command.\n");
260 return -ENOMEM;
261 }
262
263 frame_size = iwl4965_hw_get_beacon_cmd(priv, frame);
264 if (!frame_size) {
265 IWL_ERR(priv, "Error configuring the beacon command\n");
266 iwl4965_free_frame(priv, frame);
267 return -EINVAL;
268 }
269
270 rc = iwl_legacy_send_cmd_pdu(priv, REPLY_TX_BEACON, frame_size,
271 &frame->u.cmd[0]);
272
273 iwl4965_free_frame(priv, frame);
274
275 return rc;
276}
277
278static inline dma_addr_t iwl4965_tfd_tb_get_addr(struct iwl_tfd *tfd, u8 idx)
279{
280 struct iwl_tfd_tb *tb = &tfd->tbs[idx];
281
282 dma_addr_t addr = get_unaligned_le32(&tb->lo);
283 if (sizeof(dma_addr_t) > sizeof(u32))
284 addr |=
285 ((dma_addr_t)(le16_to_cpu(tb->hi_n_len) & 0xF) << 16) << 16;
286
287 return addr;
288}
289
290static inline u16 iwl4965_tfd_tb_get_len(struct iwl_tfd *tfd, u8 idx)
291{
292 struct iwl_tfd_tb *tb = &tfd->tbs[idx];
293
294 return le16_to_cpu(tb->hi_n_len) >> 4;
295}
296
297static inline void iwl4965_tfd_set_tb(struct iwl_tfd *tfd, u8 idx,
298 dma_addr_t addr, u16 len)
299{
300 struct iwl_tfd_tb *tb = &tfd->tbs[idx];
301 u16 hi_n_len = len << 4;
302
303 put_unaligned_le32(addr, &tb->lo);
304 if (sizeof(dma_addr_t) > sizeof(u32))
305 hi_n_len |= ((addr >> 16) >> 16) & 0xF;
306
307 tb->hi_n_len = cpu_to_le16(hi_n_len);
308
309 tfd->num_tbs = idx + 1;
310}
311
312static inline u8 iwl4965_tfd_get_num_tbs(struct iwl_tfd *tfd)
313{
314 return tfd->num_tbs & 0x1f;
315}
316
317/**
318 * iwl4965_hw_txq_free_tfd - Free all chunks referenced by TFD [txq->q.read_ptr]
319 * @priv - driver private data
320 * @txq - tx queue
321 *
322 * Does NOT advance any TFD circular buffer read/write indexes
323 * Does NOT free the TFD itself (which is within circular buffer)
324 */
325void iwl4965_hw_txq_free_tfd(struct iwl_priv *priv, struct iwl_tx_queue *txq)
326{
327 struct iwl_tfd *tfd_tmp = (struct iwl_tfd *)txq->tfds;
328 struct iwl_tfd *tfd;
329 struct pci_dev *dev = priv->pci_dev;
330 int index = txq->q.read_ptr;
331 int i;
332 int num_tbs;
333
334 tfd = &tfd_tmp[index];
335
336 /* Sanity check on number of chunks */
337 num_tbs = iwl4965_tfd_get_num_tbs(tfd);
338
339 if (num_tbs >= IWL_NUM_OF_TBS) {
340 IWL_ERR(priv, "Too many chunks: %i\n", num_tbs);
341 /* @todo issue fatal error, it is quite serious situation */
342 return;
343 }
344
345 /* Unmap tx_cmd */
346 if (num_tbs)
347 pci_unmap_single(dev,
348 dma_unmap_addr(&txq->meta[index], mapping),
349 dma_unmap_len(&txq->meta[index], len),
350 PCI_DMA_BIDIRECTIONAL);
351
352 /* Unmap chunks, if any. */
353 for (i = 1; i < num_tbs; i++)
354 pci_unmap_single(dev, iwl4965_tfd_tb_get_addr(tfd, i),
355 iwl4965_tfd_tb_get_len(tfd, i),
356 PCI_DMA_TODEVICE);
357
358 /* free SKB */
359 if (txq->txb) {
360 struct sk_buff *skb;
361
362 skb = txq->txb[txq->q.read_ptr].skb;
363
364 /* can be called from irqs-disabled context */
365 if (skb) {
366 dev_kfree_skb_any(skb);
367 txq->txb[txq->q.read_ptr].skb = NULL;
368 }
369 }
370}
371
372int iwl4965_hw_txq_attach_buf_to_tfd(struct iwl_priv *priv,
373 struct iwl_tx_queue *txq,
374 dma_addr_t addr, u16 len,
375 u8 reset, u8 pad)
376{
377 struct iwl_queue *q;
378 struct iwl_tfd *tfd, *tfd_tmp;
379 u32 num_tbs;
380
381 q = &txq->q;
382 tfd_tmp = (struct iwl_tfd *)txq->tfds;
383 tfd = &tfd_tmp[q->write_ptr];
384
385 if (reset)
386 memset(tfd, 0, sizeof(*tfd));
387
388 num_tbs = iwl4965_tfd_get_num_tbs(tfd);
389
390 /* Each TFD can point to a maximum 20 Tx buffers */
391 if (num_tbs >= IWL_NUM_OF_TBS) {
392 IWL_ERR(priv, "Error can not send more than %d chunks\n",
393 IWL_NUM_OF_TBS);
394 return -EINVAL;
395 }
396
397 BUG_ON(addr & ~DMA_BIT_MASK(36));
398 if (unlikely(addr & ~IWL_TX_DMA_MASK))
399 IWL_ERR(priv, "Unaligned address = %llx\n",
400 (unsigned long long)addr);
401
402 iwl4965_tfd_set_tb(tfd, num_tbs, addr, len);
403
404 return 0;
405}
406
407/*
408 * Tell nic where to find circular buffer of Tx Frame Descriptors for
409 * given Tx queue, and enable the DMA channel used for that queue.
410 *
411 * 4965 supports up to 16 Tx queues in DRAM, mapped to up to 8 Tx DMA
412 * channels supported in hardware.
413 */
414int iwl4965_hw_tx_queue_init(struct iwl_priv *priv,
415 struct iwl_tx_queue *txq)
416{
417 int txq_id = txq->q.id;
418
419 /* Circular buffer (TFD queue in DRAM) physical base address */
420 iwl_legacy_write_direct32(priv, FH_MEM_CBBC_QUEUE(txq_id),
421 txq->q.dma_addr >> 8);
422
423 return 0;
424}
425
426/******************************************************************************
427 *
428 * Generic RX handler implementations
429 *
430 ******************************************************************************/
431static void iwl4965_rx_reply_alive(struct iwl_priv *priv,
432 struct iwl_rx_mem_buffer *rxb)
433{
434 struct iwl_rx_packet *pkt = rxb_addr(rxb);
435 struct iwl_alive_resp *palive;
436 struct delayed_work *pwork;
437
438 palive = &pkt->u.alive_frame;
439
440 IWL_DEBUG_INFO(priv, "Alive ucode status 0x%08X revision "
441 "0x%01X 0x%01X\n",
442 palive->is_valid, palive->ver_type,
443 palive->ver_subtype);
444
445 if (palive->ver_subtype == INITIALIZE_SUBTYPE) {
446 IWL_DEBUG_INFO(priv, "Initialization Alive received.\n");
447 memcpy(&priv->card_alive_init,
448 &pkt->u.alive_frame,
449 sizeof(struct iwl_init_alive_resp));
450 pwork = &priv->init_alive_start;
451 } else {
452 IWL_DEBUG_INFO(priv, "Runtime Alive received.\n");
453 memcpy(&priv->card_alive, &pkt->u.alive_frame,
454 sizeof(struct iwl_alive_resp));
455 pwork = &priv->alive_start;
456 }
457
458 /* We delay the ALIVE response by 5ms to
459 * give the HW RF Kill time to activate... */
460 if (palive->is_valid == UCODE_VALID_OK)
461 queue_delayed_work(priv->workqueue, pwork,
462 msecs_to_jiffies(5));
463 else
464 IWL_WARN(priv, "uCode did not respond OK.\n");
465}
466
467/**
468 * iwl4965_bg_statistics_periodic - Timer callback to queue statistics
469 *
470 * This callback is provided in order to send a statistics request.
471 *
472 * This timer function is continually reset to execute within
473 * REG_RECALIB_PERIOD seconds since the last STATISTICS_NOTIFICATION
474 * was received. We need to ensure we receive the statistics in order
475 * to update the temperature used for calibrating the TXPOWER.
476 */
477static void iwl4965_bg_statistics_periodic(unsigned long data)
478{
479 struct iwl_priv *priv = (struct iwl_priv *)data;
480
481 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
482 return;
483
484 /* dont send host command if rf-kill is on */
485 if (!iwl_legacy_is_ready_rf(priv))
486 return;
487
488 iwl_legacy_send_statistics_request(priv, CMD_ASYNC, false);
489}
490
491
492static void iwl4965_print_cont_event_trace(struct iwl_priv *priv, u32 base,
493 u32 start_idx, u32 num_events,
494 u32 mode)
495{
496 u32 i;
497 u32 ptr; /* SRAM byte address of log data */
498 u32 ev, time, data; /* event log data */
499 unsigned long reg_flags;
500
501 if (mode == 0)
502 ptr = base + (4 * sizeof(u32)) + (start_idx * 2 * sizeof(u32));
503 else
504 ptr = base + (4 * sizeof(u32)) + (start_idx * 3 * sizeof(u32));
505
506 /* Make sure device is powered up for SRAM reads */
507 spin_lock_irqsave(&priv->reg_lock, reg_flags);
508 if (iwl_grab_nic_access(priv)) {
509 spin_unlock_irqrestore(&priv->reg_lock, reg_flags);
510 return;
511 }
512
513 /* Set starting address; reads will auto-increment */
514 _iwl_legacy_write_direct32(priv, HBUS_TARG_MEM_RADDR, ptr);
515 rmb();
516
517 /*
518 * "time" is actually "data" for mode 0 (no timestamp).
519 * place event id # at far right for easier visual parsing.
520 */
521 for (i = 0; i < num_events; i++) {
522 ev = _iwl_legacy_read_direct32(priv, HBUS_TARG_MEM_RDAT);
523 time = _iwl_legacy_read_direct32(priv, HBUS_TARG_MEM_RDAT);
524 if (mode == 0) {
525 trace_iwlwifi_legacy_dev_ucode_cont_event(priv,
526 0, time, ev);
527 } else {
528 data = _iwl_legacy_read_direct32(priv,
529 HBUS_TARG_MEM_RDAT);
530 trace_iwlwifi_legacy_dev_ucode_cont_event(priv,
531 time, data, ev);
532 }
533 }
534 /* Allow device to power down */
535 iwl_release_nic_access(priv);
536 spin_unlock_irqrestore(&priv->reg_lock, reg_flags);
537}
538
539static void iwl4965_continuous_event_trace(struct iwl_priv *priv)
540{
541 u32 capacity; /* event log capacity in # entries */
542 u32 base; /* SRAM byte address of event log header */
543 u32 mode; /* 0 - no timestamp, 1 - timestamp recorded */
544 u32 num_wraps; /* # times uCode wrapped to top of log */
545 u32 next_entry; /* index of next entry to be written by uCode */
546
547 if (priv->ucode_type == UCODE_INIT)
548 base = le32_to_cpu(priv->card_alive_init.error_event_table_ptr);
549 else
550 base = le32_to_cpu(priv->card_alive.log_event_table_ptr);
551 if (priv->cfg->ops->lib->is_valid_rtc_data_addr(base)) {
552 capacity = iwl_legacy_read_targ_mem(priv, base);
553 num_wraps = iwl_legacy_read_targ_mem(priv,
554 base + (2 * sizeof(u32)));
555 mode = iwl_legacy_read_targ_mem(priv, base + (1 * sizeof(u32)));
556 next_entry = iwl_legacy_read_targ_mem(priv,
557 base + (3 * sizeof(u32)));
558 } else
559 return;
560
561 if (num_wraps == priv->event_log.num_wraps) {
562 iwl4965_print_cont_event_trace(priv,
563 base, priv->event_log.next_entry,
564 next_entry - priv->event_log.next_entry,
565 mode);
566 priv->event_log.non_wraps_count++;
567 } else {
568 if ((num_wraps - priv->event_log.num_wraps) > 1)
569 priv->event_log.wraps_more_count++;
570 else
571 priv->event_log.wraps_once_count++;
572 trace_iwlwifi_legacy_dev_ucode_wrap_event(priv,
573 num_wraps - priv->event_log.num_wraps,
574 next_entry, priv->event_log.next_entry);
575 if (next_entry < priv->event_log.next_entry) {
576 iwl4965_print_cont_event_trace(priv, base,
577 priv->event_log.next_entry,
578 capacity - priv->event_log.next_entry,
579 mode);
580
581 iwl4965_print_cont_event_trace(priv, base, 0,
582 next_entry, mode);
583 } else {
584 iwl4965_print_cont_event_trace(priv, base,
585 next_entry, capacity - next_entry,
586 mode);
587
588 iwl4965_print_cont_event_trace(priv, base, 0,
589 next_entry, mode);
590 }
591 }
592 priv->event_log.num_wraps = num_wraps;
593 priv->event_log.next_entry = next_entry;
594}
595
596/**
597 * iwl4965_bg_ucode_trace - Timer callback to log ucode event
598 *
599 * The timer is continually set to execute every
600 * UCODE_TRACE_PERIOD milliseconds after the last timer expired
601 * this function is to perform continuous uCode event logging operation
602 * if enabled
603 */
604static void iwl4965_bg_ucode_trace(unsigned long data)
605{
606 struct iwl_priv *priv = (struct iwl_priv *)data;
607
608 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
609 return;
610
611 if (priv->event_log.ucode_trace) {
612 iwl4965_continuous_event_trace(priv);
613 /* Reschedule the timer to occur in UCODE_TRACE_PERIOD */
614 mod_timer(&priv->ucode_trace,
615 jiffies + msecs_to_jiffies(UCODE_TRACE_PERIOD));
616 }
617}
618
619static void iwl4965_rx_beacon_notif(struct iwl_priv *priv,
620 struct iwl_rx_mem_buffer *rxb)
621{
622 struct iwl_rx_packet *pkt = rxb_addr(rxb);
623 struct iwl4965_beacon_notif *beacon =
624 (struct iwl4965_beacon_notif *)pkt->u.raw;
625#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
626 u8 rate = iwl4965_hw_get_rate(beacon->beacon_notify_hdr.rate_n_flags);
627
628 IWL_DEBUG_RX(priv, "beacon status %x retries %d iss %d "
629 "tsf %d %d rate %d\n",
630 le32_to_cpu(beacon->beacon_notify_hdr.u.status) & TX_STATUS_MSK,
631 beacon->beacon_notify_hdr.failure_frame,
632 le32_to_cpu(beacon->ibss_mgr_status),
633 le32_to_cpu(beacon->high_tsf),
634 le32_to_cpu(beacon->low_tsf), rate);
635#endif
636
637 priv->ibss_manager = le32_to_cpu(beacon->ibss_mgr_status);
638}
639
640static void iwl4965_perform_ct_kill_task(struct iwl_priv *priv)
641{
642 unsigned long flags;
643
644 IWL_DEBUG_POWER(priv, "Stop all queues\n");
645
646 if (priv->mac80211_registered)
647 ieee80211_stop_queues(priv->hw);
648
649 iwl_write32(priv, CSR_UCODE_DRV_GP1_SET,
650 CSR_UCODE_DRV_GP1_REG_BIT_CT_KILL_EXIT);
651 iwl_read32(priv, CSR_UCODE_DRV_GP1);
652
653 spin_lock_irqsave(&priv->reg_lock, flags);
654 if (!iwl_grab_nic_access(priv))
655 iwl_release_nic_access(priv);
656 spin_unlock_irqrestore(&priv->reg_lock, flags);
657}
658
659/* Handle notification from uCode that card's power state is changing
660 * due to software, hardware, or critical temperature RFKILL */
661static void iwl4965_rx_card_state_notif(struct iwl_priv *priv,
662 struct iwl_rx_mem_buffer *rxb)
663{
664 struct iwl_rx_packet *pkt = rxb_addr(rxb);
665 u32 flags = le32_to_cpu(pkt->u.card_state_notif.flags);
666 unsigned long status = priv->status;
667
668 IWL_DEBUG_RF_KILL(priv, "Card state received: HW:%s SW:%s CT:%s\n",
669 (flags & HW_CARD_DISABLED) ? "Kill" : "On",
670 (flags & SW_CARD_DISABLED) ? "Kill" : "On",
671 (flags & CT_CARD_DISABLED) ?
672 "Reached" : "Not reached");
673
674 if (flags & (SW_CARD_DISABLED | HW_CARD_DISABLED |
675 CT_CARD_DISABLED)) {
676
677 iwl_write32(priv, CSR_UCODE_DRV_GP1_SET,
678 CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED);
679
680 iwl_legacy_write_direct32(priv, HBUS_TARG_MBX_C,
681 HBUS_TARG_MBX_C_REG_BIT_CMD_BLOCKED);
682
683 if (!(flags & RXON_CARD_DISABLED)) {
684 iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR,
685 CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED);
686 iwl_legacy_write_direct32(priv, HBUS_TARG_MBX_C,
687 HBUS_TARG_MBX_C_REG_BIT_CMD_BLOCKED);
688 }
689 }
690
691 if (flags & CT_CARD_DISABLED)
692 iwl4965_perform_ct_kill_task(priv);
693
694 if (flags & HW_CARD_DISABLED)
695 set_bit(STATUS_RF_KILL_HW, &priv->status);
696 else
697 clear_bit(STATUS_RF_KILL_HW, &priv->status);
698
699 if (!(flags & RXON_CARD_DISABLED))
700 iwl_legacy_scan_cancel(priv);
701
702 if ((test_bit(STATUS_RF_KILL_HW, &status) !=
703 test_bit(STATUS_RF_KILL_HW, &priv->status)))
704 wiphy_rfkill_set_hw_state(priv->hw->wiphy,
705 test_bit(STATUS_RF_KILL_HW, &priv->status));
706 else
707 wake_up_interruptible(&priv->wait_command_queue);
708}
709
710/**
711 * iwl4965_setup_rx_handlers - Initialize Rx handler callbacks
712 *
713 * Setup the RX handlers for each of the reply types sent from the uCode
714 * to the host.
715 *
716 * This function chains into the hardware specific files for them to setup
717 * any hardware specific handlers as well.
718 */
719static void iwl4965_setup_rx_handlers(struct iwl_priv *priv)
720{
721 priv->rx_handlers[REPLY_ALIVE] = iwl4965_rx_reply_alive;
722 priv->rx_handlers[REPLY_ERROR] = iwl_legacy_rx_reply_error;
723 priv->rx_handlers[CHANNEL_SWITCH_NOTIFICATION] = iwl_legacy_rx_csa;
724 priv->rx_handlers[SPECTRUM_MEASURE_NOTIFICATION] =
725 iwl_legacy_rx_spectrum_measure_notif;
726 priv->rx_handlers[PM_SLEEP_NOTIFICATION] = iwl_legacy_rx_pm_sleep_notif;
727 priv->rx_handlers[PM_DEBUG_STATISTIC_NOTIFIC] =
728 iwl_legacy_rx_pm_debug_statistics_notif;
729 priv->rx_handlers[BEACON_NOTIFICATION] = iwl4965_rx_beacon_notif;
730
731 /*
732 * The same handler is used for both the REPLY to a discrete
733 * statistics request from the host as well as for the periodic
734 * statistics notifications (after received beacons) from the uCode.
735 */
736 priv->rx_handlers[REPLY_STATISTICS_CMD] = iwl4965_reply_statistics;
737 priv->rx_handlers[STATISTICS_NOTIFICATION] = iwl4965_rx_statistics;
738
739 iwl_legacy_setup_rx_scan_handlers(priv);
740
741 /* status change handler */
742 priv->rx_handlers[CARD_STATE_NOTIFICATION] =
743 iwl4965_rx_card_state_notif;
744
745 priv->rx_handlers[MISSED_BEACONS_NOTIFICATION] =
746 iwl4965_rx_missed_beacon_notif;
747 /* Rx handlers */
748 priv->rx_handlers[REPLY_RX_PHY_CMD] = iwl4965_rx_reply_rx_phy;
749 priv->rx_handlers[REPLY_RX_MPDU_CMD] = iwl4965_rx_reply_rx;
750 /* block ack */
751 priv->rx_handlers[REPLY_COMPRESSED_BA] = iwl4965_rx_reply_compressed_ba;
752 /* Set up hardware specific Rx handlers */
753 priv->cfg->ops->lib->rx_handler_setup(priv);
754}
755
756/**
757 * iwl4965_rx_handle - Main entry function for receiving responses from uCode
758 *
759 * Uses the priv->rx_handlers callback function array to invoke
760 * the appropriate handlers, including command responses,
761 * frame-received notifications, and other notifications.
762 */
763void iwl4965_rx_handle(struct iwl_priv *priv)
764{
765 struct iwl_rx_mem_buffer *rxb;
766 struct iwl_rx_packet *pkt;
767 struct iwl_rx_queue *rxq = &priv->rxq;
768 u32 r, i;
769 int reclaim;
770 unsigned long flags;
771 u8 fill_rx = 0;
772 u32 count = 8;
773 int total_empty;
774
775 /* uCode's read index (stored in shared DRAM) indicates the last Rx
776 * buffer that the driver may process (last buffer filled by ucode). */
777 r = le16_to_cpu(rxq->rb_stts->closed_rb_num) & 0x0FFF;
778 i = rxq->read;
779
780 /* Rx interrupt, but nothing sent from uCode */
781 if (i == r)
782 IWL_DEBUG_RX(priv, "r = %d, i = %d\n", r, i);
783
784 /* calculate total frames need to be restock after handling RX */
785 total_empty = r - rxq->write_actual;
786 if (total_empty < 0)
787 total_empty += RX_QUEUE_SIZE;
788
789 if (total_empty > (RX_QUEUE_SIZE / 2))
790 fill_rx = 1;
791
792 while (i != r) {
793 int len;
794
795 rxb = rxq->queue[i];
796
797 /* If an RXB doesn't have a Rx queue slot associated with it,
798 * then a bug has been introduced in the queue refilling
799 * routines -- catch it here */
800 BUG_ON(rxb == NULL);
801
802 rxq->queue[i] = NULL;
803
804 pci_unmap_page(priv->pci_dev, rxb->page_dma,
805 PAGE_SIZE << priv->hw_params.rx_page_order,
806 PCI_DMA_FROMDEVICE);
807 pkt = rxb_addr(rxb);
808
809 len = le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK;
810 len += sizeof(u32); /* account for status word */
811 trace_iwlwifi_legacy_dev_rx(priv, pkt, len);
812
813 /* Reclaim a command buffer only if this packet is a response
814 * to a (driver-originated) command.
815 * If the packet (e.g. Rx frame) originated from uCode,
816 * there is no command buffer to reclaim.
817 * Ucode should set SEQ_RX_FRAME bit if ucode-originated,
818 * but apparently a few don't get set; catch them here. */
819 reclaim = !(pkt->hdr.sequence & SEQ_RX_FRAME) &&
820 (pkt->hdr.cmd != REPLY_RX_PHY_CMD) &&
821 (pkt->hdr.cmd != REPLY_RX) &&
822 (pkt->hdr.cmd != REPLY_RX_MPDU_CMD) &&
823 (pkt->hdr.cmd != REPLY_COMPRESSED_BA) &&
824 (pkt->hdr.cmd != STATISTICS_NOTIFICATION) &&
825 (pkt->hdr.cmd != REPLY_TX);
826
827 /* Based on type of command response or notification,
828 * handle those that need handling via function in
829 * rx_handlers table. See iwl4965_setup_rx_handlers() */
830 if (priv->rx_handlers[pkt->hdr.cmd]) {
831 IWL_DEBUG_RX(priv, "r = %d, i = %d, %s, 0x%02x\n", r,
832 i, iwl_legacy_get_cmd_string(pkt->hdr.cmd),
833 pkt->hdr.cmd);
834 priv->isr_stats.rx_handlers[pkt->hdr.cmd]++;
835 priv->rx_handlers[pkt->hdr.cmd] (priv, rxb);
836 } else {
837 /* No handling needed */
838 IWL_DEBUG_RX(priv,
839 "r %d i %d No handler needed for %s, 0x%02x\n",
840 r, i, iwl_legacy_get_cmd_string(pkt->hdr.cmd),
841 pkt->hdr.cmd);
842 }
843
844 /*
845 * XXX: After here, we should always check rxb->page
846 * against NULL before touching it or its virtual
847 * memory (pkt). Because some rx_handler might have
848 * already taken or freed the pages.
849 */
850
851 if (reclaim) {
852 /* Invoke any callbacks, transfer the buffer to caller,
853 * and fire off the (possibly) blocking iwl_legacy_send_cmd()
854 * as we reclaim the driver command queue */
855 if (rxb->page)
856 iwl_legacy_tx_cmd_complete(priv, rxb);
857 else
858 IWL_WARN(priv, "Claim null rxb?\n");
859 }
860
861 /* Reuse the page if possible. For notification packets and
862 * SKBs that fail to Rx correctly, add them back into the
863 * rx_free list for reuse later. */
864 spin_lock_irqsave(&rxq->lock, flags);
865 if (rxb->page != NULL) {
866 rxb->page_dma = pci_map_page(priv->pci_dev, rxb->page,
867 0, PAGE_SIZE << priv->hw_params.rx_page_order,
868 PCI_DMA_FROMDEVICE);
869 list_add_tail(&rxb->list, &rxq->rx_free);
870 rxq->free_count++;
871 } else
872 list_add_tail(&rxb->list, &rxq->rx_used);
873
874 spin_unlock_irqrestore(&rxq->lock, flags);
875
876 i = (i + 1) & RX_QUEUE_MASK;
877 /* If there are a lot of unused frames,
878 * restock the Rx queue so ucode wont assert. */
879 if (fill_rx) {
880 count++;
881 if (count >= 8) {
882 rxq->read = i;
883 iwl4965_rx_replenish_now(priv);
884 count = 0;
885 }
886 }
887 }
888
889 /* Backtrack one entry */
890 rxq->read = i;
891 if (fill_rx)
892 iwl4965_rx_replenish_now(priv);
893 else
894 iwl4965_rx_queue_restock(priv);
895}
896
897/* call this function to flush any scheduled tasklet */
898static inline void iwl4965_synchronize_irq(struct iwl_priv *priv)
899{
900 /* wait to make sure we flush pending tasklet*/
901 synchronize_irq(priv->pci_dev->irq);
902 tasklet_kill(&priv->irq_tasklet);
903}
904
905static void iwl4965_irq_tasklet(struct iwl_priv *priv)
906{
907 u32 inta, handled = 0;
908 u32 inta_fh;
909 unsigned long flags;
910 u32 i;
911#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
912 u32 inta_mask;
913#endif
914
915 spin_lock_irqsave(&priv->lock, flags);
916
917 /* Ack/clear/reset pending uCode interrupts.
918 * Note: Some bits in CSR_INT are "OR" of bits in CSR_FH_INT_STATUS,
919 * and will clear only when CSR_FH_INT_STATUS gets cleared. */
920 inta = iwl_read32(priv, CSR_INT);
921 iwl_write32(priv, CSR_INT, inta);
922
923 /* Ack/clear/reset pending flow-handler (DMA) interrupts.
924 * Any new interrupts that happen after this, either while we're
925 * in this tasklet, or later, will show up in next ISR/tasklet. */
926 inta_fh = iwl_read32(priv, CSR_FH_INT_STATUS);
927 iwl_write32(priv, CSR_FH_INT_STATUS, inta_fh);
928
929#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
930 if (iwl_legacy_get_debug_level(priv) & IWL_DL_ISR) {
931 /* just for debug */
932 inta_mask = iwl_read32(priv, CSR_INT_MASK);
933 IWL_DEBUG_ISR(priv, "inta 0x%08x, enabled 0x%08x, fh 0x%08x\n",
934 inta, inta_mask, inta_fh);
935 }
936#endif
937
938 spin_unlock_irqrestore(&priv->lock, flags);
939
940 /* Since CSR_INT and CSR_FH_INT_STATUS reads and clears are not
941 * atomic, make sure that inta covers all the interrupts that
942 * we've discovered, even if FH interrupt came in just after
943 * reading CSR_INT. */
944 if (inta_fh & CSR49_FH_INT_RX_MASK)
945 inta |= CSR_INT_BIT_FH_RX;
946 if (inta_fh & CSR49_FH_INT_TX_MASK)
947 inta |= CSR_INT_BIT_FH_TX;
948
949 /* Now service all interrupt bits discovered above. */
950 if (inta & CSR_INT_BIT_HW_ERR) {
951 IWL_ERR(priv, "Hardware error detected. Restarting.\n");
952
953 /* Tell the device to stop sending interrupts */
954 iwl_legacy_disable_interrupts(priv);
955
956 priv->isr_stats.hw++;
957 iwl_legacy_irq_handle_error(priv);
958
959 handled |= CSR_INT_BIT_HW_ERR;
960
961 return;
962 }
963
964#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
965 if (iwl_legacy_get_debug_level(priv) & (IWL_DL_ISR)) {
966 /* NIC fires this, but we don't use it, redundant with WAKEUP */
967 if (inta & CSR_INT_BIT_SCD) {
968 IWL_DEBUG_ISR(priv, "Scheduler finished to transmit "
969 "the frame/frames.\n");
970 priv->isr_stats.sch++;
971 }
972
973 /* Alive notification via Rx interrupt will do the real work */
974 if (inta & CSR_INT_BIT_ALIVE) {
975 IWL_DEBUG_ISR(priv, "Alive interrupt\n");
976 priv->isr_stats.alive++;
977 }
978 }
979#endif
980 /* Safely ignore these bits for debug checks below */
981 inta &= ~(CSR_INT_BIT_SCD | CSR_INT_BIT_ALIVE);
982
983 /* HW RF KILL switch toggled */
984 if (inta & CSR_INT_BIT_RF_KILL) {
985 int hw_rf_kill = 0;
986 if (!(iwl_read32(priv, CSR_GP_CNTRL) &
987 CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW))
988 hw_rf_kill = 1;
989
990 IWL_WARN(priv, "RF_KILL bit toggled to %s.\n",
991 hw_rf_kill ? "disable radio" : "enable radio");
992
993 priv->isr_stats.rfkill++;
994
995 /* driver only loads ucode once setting the interface up.
996 * the driver allows loading the ucode even if the radio
997 * is killed. Hence update the killswitch state here. The
998 * rfkill handler will care about restarting if needed.
999 */
1000 if (!test_bit(STATUS_ALIVE, &priv->status)) {
1001 if (hw_rf_kill)
1002 set_bit(STATUS_RF_KILL_HW, &priv->status);
1003 else
1004 clear_bit(STATUS_RF_KILL_HW, &priv->status);
1005 wiphy_rfkill_set_hw_state(priv->hw->wiphy, hw_rf_kill);
1006 }
1007
1008 handled |= CSR_INT_BIT_RF_KILL;
1009 }
1010
1011 /* Chip got too hot and stopped itself */
1012 if (inta & CSR_INT_BIT_CT_KILL) {
1013 IWL_ERR(priv, "Microcode CT kill error detected.\n");
1014 priv->isr_stats.ctkill++;
1015 handled |= CSR_INT_BIT_CT_KILL;
1016 }
1017
1018 /* Error detected by uCode */
1019 if (inta & CSR_INT_BIT_SW_ERR) {
1020 IWL_ERR(priv, "Microcode SW error detected. "
1021 " Restarting 0x%X.\n", inta);
1022 priv->isr_stats.sw++;
1023 iwl_legacy_irq_handle_error(priv);
1024 handled |= CSR_INT_BIT_SW_ERR;
1025 }
1026
1027 /*
1028 * uCode wakes up after power-down sleep.
1029 * Tell device about any new tx or host commands enqueued,
1030 * and about any Rx buffers made available while asleep.
1031 */
1032 if (inta & CSR_INT_BIT_WAKEUP) {
1033 IWL_DEBUG_ISR(priv, "Wakeup interrupt\n");
1034 iwl_legacy_rx_queue_update_write_ptr(priv, &priv->rxq);
1035 for (i = 0; i < priv->hw_params.max_txq_num; i++)
1036 iwl_legacy_txq_update_write_ptr(priv, &priv->txq[i]);
1037 priv->isr_stats.wakeup++;
1038 handled |= CSR_INT_BIT_WAKEUP;
1039 }
1040
1041 /* All uCode command responses, including Tx command responses,
1042 * Rx "responses" (frame-received notification), and other
1043 * notifications from uCode come through here*/
1044 if (inta & (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX)) {
1045 iwl4965_rx_handle(priv);
1046 priv->isr_stats.rx++;
1047 handled |= (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX);
1048 }
1049
1050 /* This "Tx" DMA channel is used only for loading uCode */
1051 if (inta & CSR_INT_BIT_FH_TX) {
1052 IWL_DEBUG_ISR(priv, "uCode load interrupt\n");
1053 priv->isr_stats.tx++;
1054 handled |= CSR_INT_BIT_FH_TX;
1055 /* Wake up uCode load routine, now that load is complete */
1056 priv->ucode_write_complete = 1;
1057 wake_up_interruptible(&priv->wait_command_queue);
1058 }
1059
1060 if (inta & ~handled) {
1061 IWL_ERR(priv, "Unhandled INTA bits 0x%08x\n", inta & ~handled);
1062 priv->isr_stats.unhandled++;
1063 }
1064
1065 if (inta & ~(priv->inta_mask)) {
1066 IWL_WARN(priv, "Disabled INTA bits 0x%08x were pending\n",
1067 inta & ~priv->inta_mask);
1068 IWL_WARN(priv, " with FH_INT = 0x%08x\n", inta_fh);
1069 }
1070
1071 /* Re-enable all interrupts */
1072 /* only Re-enable if diabled by irq */
1073 if (test_bit(STATUS_INT_ENABLED, &priv->status))
1074 iwl_legacy_enable_interrupts(priv);
1075
1076#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
1077 if (iwl_legacy_get_debug_level(priv) & (IWL_DL_ISR)) {
1078 inta = iwl_read32(priv, CSR_INT);
1079 inta_mask = iwl_read32(priv, CSR_INT_MASK);
1080 inta_fh = iwl_read32(priv, CSR_FH_INT_STATUS);
1081 IWL_DEBUG_ISR(priv,
1082 "End inta 0x%08x, enabled 0x%08x, fh 0x%08x, "
1083 "flags 0x%08lx\n", inta, inta_mask, inta_fh, flags);
1084 }
1085#endif
1086}
1087
1088/*****************************************************************************
1089 *
1090 * sysfs attributes
1091 *
1092 *****************************************************************************/
1093
1094#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
1095
1096/*
1097 * The following adds a new attribute to the sysfs representation
1098 * of this device driver (i.e. a new file in /sys/class/net/wlan0/device/)
1099 * used for controlling the debug level.
1100 *
1101 * See the level definitions in iwl for details.
1102 *
1103 * The debug_level being managed using sysfs below is a per device debug
1104 * level that is used instead of the global debug level if it (the per
1105 * device debug level) is set.
1106 */
1107static ssize_t iwl4965_show_debug_level(struct device *d,
1108 struct device_attribute *attr, char *buf)
1109{
1110 struct iwl_priv *priv = dev_get_drvdata(d);
1111 return sprintf(buf, "0x%08X\n", iwl_legacy_get_debug_level(priv));
1112}
1113static ssize_t iwl4965_store_debug_level(struct device *d,
1114 struct device_attribute *attr,
1115 const char *buf, size_t count)
1116{
1117 struct iwl_priv *priv = dev_get_drvdata(d);
1118 unsigned long val;
1119 int ret;
1120
1121 ret = strict_strtoul(buf, 0, &val);
1122 if (ret)
1123 IWL_ERR(priv, "%s is not in hex or decimal form.\n", buf);
1124 else {
1125 priv->debug_level = val;
1126 if (iwl_legacy_alloc_traffic_mem(priv))
1127 IWL_ERR(priv,
1128 "Not enough memory to generate traffic log\n");
1129 }
1130 return strnlen(buf, count);
1131}
1132
1133static DEVICE_ATTR(debug_level, S_IWUSR | S_IRUGO,
1134 iwl4965_show_debug_level, iwl4965_store_debug_level);
1135
1136
1137#endif /* CONFIG_IWLWIFI_LEGACY_DEBUG */
1138
1139
1140static ssize_t iwl4965_show_temperature(struct device *d,
1141 struct device_attribute *attr, char *buf)
1142{
1143 struct iwl_priv *priv = dev_get_drvdata(d);
1144
1145 if (!iwl_legacy_is_alive(priv))
1146 return -EAGAIN;
1147
1148 return sprintf(buf, "%d\n", priv->temperature);
1149}
1150
1151static DEVICE_ATTR(temperature, S_IRUGO, iwl4965_show_temperature, NULL);
1152
1153static ssize_t iwl4965_show_tx_power(struct device *d,
1154 struct device_attribute *attr, char *buf)
1155{
1156 struct iwl_priv *priv = dev_get_drvdata(d);
1157
1158 if (!iwl_legacy_is_ready_rf(priv))
1159 return sprintf(buf, "off\n");
1160 else
1161 return sprintf(buf, "%d\n", priv->tx_power_user_lmt);
1162}
1163
1164static ssize_t iwl4965_store_tx_power(struct device *d,
1165 struct device_attribute *attr,
1166 const char *buf, size_t count)
1167{
1168 struct iwl_priv *priv = dev_get_drvdata(d);
1169 unsigned long val;
1170 int ret;
1171
1172 ret = strict_strtoul(buf, 10, &val);
1173 if (ret)
1174 IWL_INFO(priv, "%s is not in decimal form.\n", buf);
1175 else {
1176 ret = iwl_legacy_set_tx_power(priv, val, false);
1177 if (ret)
1178 IWL_ERR(priv, "failed setting tx power (0x%d).\n",
1179 ret);
1180 else
1181 ret = count;
1182 }
1183 return ret;
1184}
1185
1186static DEVICE_ATTR(tx_power, S_IWUSR | S_IRUGO,
1187 iwl4965_show_tx_power, iwl4965_store_tx_power);
1188
1189static struct attribute *iwl_sysfs_entries[] = {
1190 &dev_attr_temperature.attr,
1191 &dev_attr_tx_power.attr,
1192#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
1193 &dev_attr_debug_level.attr,
1194#endif
1195 NULL
1196};
1197
1198static struct attribute_group iwl_attribute_group = {
1199 .name = NULL, /* put in device directory */
1200 .attrs = iwl_sysfs_entries,
1201};
1202
1203/******************************************************************************
1204 *
1205 * uCode download functions
1206 *
1207 ******************************************************************************/
1208
1209static void iwl4965_dealloc_ucode_pci(struct iwl_priv *priv)
1210{
1211 iwl_legacy_free_fw_desc(priv->pci_dev, &priv->ucode_code);
1212 iwl_legacy_free_fw_desc(priv->pci_dev, &priv->ucode_data);
1213 iwl_legacy_free_fw_desc(priv->pci_dev, &priv->ucode_data_backup);
1214 iwl_legacy_free_fw_desc(priv->pci_dev, &priv->ucode_init);
1215 iwl_legacy_free_fw_desc(priv->pci_dev, &priv->ucode_init_data);
1216 iwl_legacy_free_fw_desc(priv->pci_dev, &priv->ucode_boot);
1217}
1218
1219static void iwl4965_nic_start(struct iwl_priv *priv)
1220{
1221 /* Remove all resets to allow NIC to operate */
1222 iwl_write32(priv, CSR_RESET, 0);
1223}
1224
1225static void iwl4965_ucode_callback(const struct firmware *ucode_raw,
1226 void *context);
1227static int iwl4965_mac_setup_register(struct iwl_priv *priv,
1228 u32 max_probe_length);
1229
1230static int __must_check iwl4965_request_firmware(struct iwl_priv *priv, bool first)
1231{
1232 const char *name_pre = priv->cfg->fw_name_pre;
1233 char tag[8];
1234
1235 if (first) {
1236 priv->fw_index = priv->cfg->ucode_api_max;
1237 sprintf(tag, "%d", priv->fw_index);
1238 } else {
1239 priv->fw_index--;
1240 sprintf(tag, "%d", priv->fw_index);
1241 }
1242
1243 if (priv->fw_index < priv->cfg->ucode_api_min) {
1244 IWL_ERR(priv, "no suitable firmware found!\n");
1245 return -ENOENT;
1246 }
1247
1248 sprintf(priv->firmware_name, "%s%s%s", name_pre, tag, ".ucode");
1249
1250 IWL_DEBUG_INFO(priv, "attempting to load firmware '%s'\n",
1251 priv->firmware_name);
1252
1253 return request_firmware_nowait(THIS_MODULE, 1, priv->firmware_name,
1254 &priv->pci_dev->dev, GFP_KERNEL, priv,
1255 iwl4965_ucode_callback);
1256}
1257
1258struct iwl4965_firmware_pieces {
1259 const void *inst, *data, *init, *init_data, *boot;
1260 size_t inst_size, data_size, init_size, init_data_size, boot_size;
1261};
1262
1263static int iwl4965_load_firmware(struct iwl_priv *priv,
1264 const struct firmware *ucode_raw,
1265 struct iwl4965_firmware_pieces *pieces)
1266{
1267 struct iwl_ucode_header *ucode = (void *)ucode_raw->data;
1268 u32 api_ver, hdr_size;
1269 const u8 *src;
1270
1271 priv->ucode_ver = le32_to_cpu(ucode->ver);
1272 api_ver = IWL_UCODE_API(priv->ucode_ver);
1273
1274 switch (api_ver) {
1275 default:
1276 case 0:
1277 case 1:
1278 case 2:
1279 hdr_size = 24;
1280 if (ucode_raw->size < hdr_size) {
1281 IWL_ERR(priv, "File size too small!\n");
1282 return -EINVAL;
1283 }
1284 pieces->inst_size = le32_to_cpu(ucode->v1.inst_size);
1285 pieces->data_size = le32_to_cpu(ucode->v1.data_size);
1286 pieces->init_size = le32_to_cpu(ucode->v1.init_size);
1287 pieces->init_data_size =
1288 le32_to_cpu(ucode->v1.init_data_size);
1289 pieces->boot_size = le32_to_cpu(ucode->v1.boot_size);
1290 src = ucode->v1.data;
1291 break;
1292 }
1293
1294 /* Verify size of file vs. image size info in file's header */
1295 if (ucode_raw->size != hdr_size + pieces->inst_size +
1296 pieces->data_size + pieces->init_size +
1297 pieces->init_data_size + pieces->boot_size) {
1298
1299 IWL_ERR(priv,
1300 "uCode file size %d does not match expected size\n",
1301 (int)ucode_raw->size);
1302 return -EINVAL;
1303 }
1304
1305 pieces->inst = src;
1306 src += pieces->inst_size;
1307 pieces->data = src;
1308 src += pieces->data_size;
1309 pieces->init = src;
1310 src += pieces->init_size;
1311 pieces->init_data = src;
1312 src += pieces->init_data_size;
1313 pieces->boot = src;
1314 src += pieces->boot_size;
1315
1316 return 0;
1317}
1318
1319/**
1320 * iwl4965_ucode_callback - callback when firmware was loaded
1321 *
1322 * If loaded successfully, copies the firmware into buffers
1323 * for the card to fetch (via DMA).
1324 */
1325static void
1326iwl4965_ucode_callback(const struct firmware *ucode_raw, void *context)
1327{
1328 struct iwl_priv *priv = context;
1329 struct iwl_ucode_header *ucode;
1330 int err;
1331 struct iwl4965_firmware_pieces pieces;
1332 const unsigned int api_max = priv->cfg->ucode_api_max;
1333 const unsigned int api_min = priv->cfg->ucode_api_min;
1334 u32 api_ver;
1335
1336 u32 max_probe_length = 200;
1337 u32 standard_phy_calibration_size =
1338 IWL_DEFAULT_STANDARD_PHY_CALIBRATE_TBL_SIZE;
1339
1340 memset(&pieces, 0, sizeof(pieces));
1341
1342 if (!ucode_raw) {
1343 if (priv->fw_index <= priv->cfg->ucode_api_max)
1344 IWL_ERR(priv,
1345 "request for firmware file '%s' failed.\n",
1346 priv->firmware_name);
1347 goto try_again;
1348 }
1349
1350 IWL_DEBUG_INFO(priv, "Loaded firmware file '%s' (%zd bytes).\n",
1351 priv->firmware_name, ucode_raw->size);
1352
1353 /* Make sure that we got at least the API version number */
1354 if (ucode_raw->size < 4) {
1355 IWL_ERR(priv, "File size way too small!\n");
1356 goto try_again;
1357 }
1358
1359 /* Data from ucode file: header followed by uCode images */
1360 ucode = (struct iwl_ucode_header *)ucode_raw->data;
1361
1362 err = iwl4965_load_firmware(priv, ucode_raw, &pieces);
1363
1364 if (err)
1365 goto try_again;
1366
1367 api_ver = IWL_UCODE_API(priv->ucode_ver);
1368
1369 /*
1370 * api_ver should match the api version forming part of the
1371 * firmware filename ... but we don't check for that and only rely
1372 * on the API version read from firmware header from here on forward
1373 */
1374 if (api_ver < api_min || api_ver > api_max) {
1375 IWL_ERR(priv,
1376 "Driver unable to support your firmware API. "
1377 "Driver supports v%u, firmware is v%u.\n",
1378 api_max, api_ver);
1379 goto try_again;
1380 }
1381
1382 if (api_ver != api_max)
1383 IWL_ERR(priv,
1384 "Firmware has old API version. Expected v%u, "
1385 "got v%u. New firmware can be obtained "
1386 "from http://www.intellinuxwireless.org.\n",
1387 api_max, api_ver);
1388
1389 IWL_INFO(priv, "loaded firmware version %u.%u.%u.%u\n",
1390 IWL_UCODE_MAJOR(priv->ucode_ver),
1391 IWL_UCODE_MINOR(priv->ucode_ver),
1392 IWL_UCODE_API(priv->ucode_ver),
1393 IWL_UCODE_SERIAL(priv->ucode_ver));
1394
1395 snprintf(priv->hw->wiphy->fw_version,
1396 sizeof(priv->hw->wiphy->fw_version),
1397 "%u.%u.%u.%u",
1398 IWL_UCODE_MAJOR(priv->ucode_ver),
1399 IWL_UCODE_MINOR(priv->ucode_ver),
1400 IWL_UCODE_API(priv->ucode_ver),
1401 IWL_UCODE_SERIAL(priv->ucode_ver));
1402
1403 /*
1404 * For any of the failures below (before allocating pci memory)
1405 * we will try to load a version with a smaller API -- maybe the
1406 * user just got a corrupted version of the latest API.
1407 */
1408
1409 IWL_DEBUG_INFO(priv, "f/w package hdr ucode version raw = 0x%x\n",
1410 priv->ucode_ver);
1411 IWL_DEBUG_INFO(priv, "f/w package hdr runtime inst size = %Zd\n",
1412 pieces.inst_size);
1413 IWL_DEBUG_INFO(priv, "f/w package hdr runtime data size = %Zd\n",
1414 pieces.data_size);
1415 IWL_DEBUG_INFO(priv, "f/w package hdr init inst size = %Zd\n",
1416 pieces.init_size);
1417 IWL_DEBUG_INFO(priv, "f/w package hdr init data size = %Zd\n",
1418 pieces.init_data_size);
1419 IWL_DEBUG_INFO(priv, "f/w package hdr boot inst size = %Zd\n",
1420 pieces.boot_size);
1421
1422 /* Verify that uCode images will fit in card's SRAM */
1423 if (pieces.inst_size > priv->hw_params.max_inst_size) {
1424 IWL_ERR(priv, "uCode instr len %Zd too large to fit in\n",
1425 pieces.inst_size);
1426 goto try_again;
1427 }
1428
1429 if (pieces.data_size > priv->hw_params.max_data_size) {
1430 IWL_ERR(priv, "uCode data len %Zd too large to fit in\n",
1431 pieces.data_size);
1432 goto try_again;
1433 }
1434
1435 if (pieces.init_size > priv->hw_params.max_inst_size) {
1436 IWL_ERR(priv, "uCode init instr len %Zd too large to fit in\n",
1437 pieces.init_size);
1438 goto try_again;
1439 }
1440
1441 if (pieces.init_data_size > priv->hw_params.max_data_size) {
1442 IWL_ERR(priv, "uCode init data len %Zd too large to fit in\n",
1443 pieces.init_data_size);
1444 goto try_again;
1445 }
1446
1447 if (pieces.boot_size > priv->hw_params.max_bsm_size) {
1448 IWL_ERR(priv, "uCode boot instr len %Zd too large to fit in\n",
1449 pieces.boot_size);
1450 goto try_again;
1451 }
1452
1453 /* Allocate ucode buffers for card's bus-master loading ... */
1454
1455 /* Runtime instructions and 2 copies of data:
1456 * 1) unmodified from disk
1457 * 2) backup cache for save/restore during power-downs */
1458 priv->ucode_code.len = pieces.inst_size;
1459 iwl_legacy_alloc_fw_desc(priv->pci_dev, &priv->ucode_code);
1460
1461 priv->ucode_data.len = pieces.data_size;
1462 iwl_legacy_alloc_fw_desc(priv->pci_dev, &priv->ucode_data);
1463
1464 priv->ucode_data_backup.len = pieces.data_size;
1465 iwl_legacy_alloc_fw_desc(priv->pci_dev, &priv->ucode_data_backup);
1466
1467 if (!priv->ucode_code.v_addr || !priv->ucode_data.v_addr ||
1468 !priv->ucode_data_backup.v_addr)
1469 goto err_pci_alloc;
1470
1471 /* Initialization instructions and data */
1472 if (pieces.init_size && pieces.init_data_size) {
1473 priv->ucode_init.len = pieces.init_size;
1474 iwl_legacy_alloc_fw_desc(priv->pci_dev, &priv->ucode_init);
1475
1476 priv->ucode_init_data.len = pieces.init_data_size;
1477 iwl_legacy_alloc_fw_desc(priv->pci_dev, &priv->ucode_init_data);
1478
1479 if (!priv->ucode_init.v_addr || !priv->ucode_init_data.v_addr)
1480 goto err_pci_alloc;
1481 }
1482
1483 /* Bootstrap (instructions only, no data) */
1484 if (pieces.boot_size) {
1485 priv->ucode_boot.len = pieces.boot_size;
1486 iwl_legacy_alloc_fw_desc(priv->pci_dev, &priv->ucode_boot);
1487
1488 if (!priv->ucode_boot.v_addr)
1489 goto err_pci_alloc;
1490 }
1491
1492 /* Now that we can no longer fail, copy information */
1493
1494 priv->sta_key_max_num = STA_KEY_MAX_NUM;
1495
1496 /* Copy images into buffers for card's bus-master reads ... */
1497
1498 /* Runtime instructions (first block of data in file) */
1499 IWL_DEBUG_INFO(priv, "Copying (but not loading) uCode instr len %Zd\n",
1500 pieces.inst_size);
1501 memcpy(priv->ucode_code.v_addr, pieces.inst, pieces.inst_size);
1502
1503 IWL_DEBUG_INFO(priv, "uCode instr buf vaddr = 0x%p, paddr = 0x%08x\n",
1504 priv->ucode_code.v_addr, (u32)priv->ucode_code.p_addr);
1505
1506 /*
1507 * Runtime data
1508 * NOTE: Copy into backup buffer will be done in iwl_up()
1509 */
1510 IWL_DEBUG_INFO(priv, "Copying (but not loading) uCode data len %Zd\n",
1511 pieces.data_size);
1512 memcpy(priv->ucode_data.v_addr, pieces.data, pieces.data_size);
1513 memcpy(priv->ucode_data_backup.v_addr, pieces.data, pieces.data_size);
1514
1515 /* Initialization instructions */
1516 if (pieces.init_size) {
1517 IWL_DEBUG_INFO(priv,
1518 "Copying (but not loading) init instr len %Zd\n",
1519 pieces.init_size);
1520 memcpy(priv->ucode_init.v_addr, pieces.init, pieces.init_size);
1521 }
1522
1523 /* Initialization data */
1524 if (pieces.init_data_size) {
1525 IWL_DEBUG_INFO(priv,
1526 "Copying (but not loading) init data len %Zd\n",
1527 pieces.init_data_size);
1528 memcpy(priv->ucode_init_data.v_addr, pieces.init_data,
1529 pieces.init_data_size);
1530 }
1531
1532 /* Bootstrap instructions */
1533 IWL_DEBUG_INFO(priv, "Copying (but not loading) boot instr len %Zd\n",
1534 pieces.boot_size);
1535 memcpy(priv->ucode_boot.v_addr, pieces.boot, pieces.boot_size);
1536
1537 /*
1538 * figure out the offset of chain noise reset and gain commands
1539 * base on the size of standard phy calibration commands table size
1540 */
1541 priv->_4965.phy_calib_chain_noise_reset_cmd =
1542 standard_phy_calibration_size;
1543 priv->_4965.phy_calib_chain_noise_gain_cmd =
1544 standard_phy_calibration_size + 1;
1545
1546 /**************************************************
1547 * This is still part of probe() in a sense...
1548 *
1549 * 9. Setup and register with mac80211 and debugfs
1550 **************************************************/
1551 err = iwl4965_mac_setup_register(priv, max_probe_length);
1552 if (err)
1553 goto out_unbind;
1554
1555 err = iwl_legacy_dbgfs_register(priv, DRV_NAME);
1556 if (err)
1557 IWL_ERR(priv,
1558 "failed to create debugfs files. Ignoring error: %d\n", err);
1559
1560 err = sysfs_create_group(&priv->pci_dev->dev.kobj,
1561 &iwl_attribute_group);
1562 if (err) {
1563 IWL_ERR(priv, "failed to create sysfs device attributes\n");
1564 goto out_unbind;
1565 }
1566
1567 /* We have our copies now, allow OS release its copies */
1568 release_firmware(ucode_raw);
1569 complete(&priv->_4965.firmware_loading_complete);
1570 return;
1571
1572 try_again:
1573 /* try next, if any */
1574 if (iwl4965_request_firmware(priv, false))
1575 goto out_unbind;
1576 release_firmware(ucode_raw);
1577 return;
1578
1579 err_pci_alloc:
1580 IWL_ERR(priv, "failed to allocate pci memory\n");
1581 iwl4965_dealloc_ucode_pci(priv);
1582 out_unbind:
1583 complete(&priv->_4965.firmware_loading_complete);
1584 device_release_driver(&priv->pci_dev->dev);
1585 release_firmware(ucode_raw);
1586}
1587
1588static const char * const desc_lookup_text[] = {
1589 "OK",
1590 "FAIL",
1591 "BAD_PARAM",
1592 "BAD_CHECKSUM",
1593 "NMI_INTERRUPT_WDG",
1594 "SYSASSERT",
1595 "FATAL_ERROR",
1596 "BAD_COMMAND",
1597 "HW_ERROR_TUNE_LOCK",
1598 "HW_ERROR_TEMPERATURE",
1599 "ILLEGAL_CHAN_FREQ",
1600 "VCC_NOT_STABLE",
1601 "FH_ERROR",
1602 "NMI_INTERRUPT_HOST",
1603 "NMI_INTERRUPT_ACTION_PT",
1604 "NMI_INTERRUPT_UNKNOWN",
1605 "UCODE_VERSION_MISMATCH",
1606 "HW_ERROR_ABS_LOCK",
1607 "HW_ERROR_CAL_LOCK_FAIL",
1608 "NMI_INTERRUPT_INST_ACTION_PT",
1609 "NMI_INTERRUPT_DATA_ACTION_PT",
1610 "NMI_TRM_HW_ER",
1611 "NMI_INTERRUPT_TRM",
1612 "NMI_INTERRUPT_BREAK_POINT"
1613 "DEBUG_0",
1614 "DEBUG_1",
1615 "DEBUG_2",
1616 "DEBUG_3",
1617};
1618
1619static struct { char *name; u8 num; } advanced_lookup[] = {
1620 { "NMI_INTERRUPT_WDG", 0x34 },
1621 { "SYSASSERT", 0x35 },
1622 { "UCODE_VERSION_MISMATCH", 0x37 },
1623 { "BAD_COMMAND", 0x38 },
1624 { "NMI_INTERRUPT_DATA_ACTION_PT", 0x3C },
1625 { "FATAL_ERROR", 0x3D },
1626 { "NMI_TRM_HW_ERR", 0x46 },
1627 { "NMI_INTERRUPT_TRM", 0x4C },
1628 { "NMI_INTERRUPT_BREAK_POINT", 0x54 },
1629 { "NMI_INTERRUPT_WDG_RXF_FULL", 0x5C },
1630 { "NMI_INTERRUPT_WDG_NO_RBD_RXF_FULL", 0x64 },
1631 { "NMI_INTERRUPT_HOST", 0x66 },
1632 { "NMI_INTERRUPT_ACTION_PT", 0x7C },
1633 { "NMI_INTERRUPT_UNKNOWN", 0x84 },
1634 { "NMI_INTERRUPT_INST_ACTION_PT", 0x86 },
1635 { "ADVANCED_SYSASSERT", 0 },
1636};
1637
1638static const char *iwl4965_desc_lookup(u32 num)
1639{
1640 int i;
1641 int max = ARRAY_SIZE(desc_lookup_text);
1642
1643 if (num < max)
1644 return desc_lookup_text[num];
1645
1646 max = ARRAY_SIZE(advanced_lookup) - 1;
1647 for (i = 0; i < max; i++) {
1648 if (advanced_lookup[i].num == num)
1649 break;
1650 }
1651 return advanced_lookup[i].name;
1652}
1653
1654#define ERROR_START_OFFSET (1 * sizeof(u32))
1655#define ERROR_ELEM_SIZE (7 * sizeof(u32))
1656
1657void iwl4965_dump_nic_error_log(struct iwl_priv *priv)
1658{
1659 u32 data2, line;
1660 u32 desc, time, count, base, data1;
1661 u32 blink1, blink2, ilink1, ilink2;
1662 u32 pc, hcmd;
1663
1664 if (priv->ucode_type == UCODE_INIT) {
1665 base = le32_to_cpu(priv->card_alive_init.error_event_table_ptr);
1666 } else {
1667 base = le32_to_cpu(priv->card_alive.error_event_table_ptr);
1668 }
1669
1670 if (!priv->cfg->ops->lib->is_valid_rtc_data_addr(base)) {
1671 IWL_ERR(priv,
1672 "Not valid error log pointer 0x%08X for %s uCode\n",
1673 base, (priv->ucode_type == UCODE_INIT) ? "Init" : "RT");
1674 return;
1675 }
1676
1677 count = iwl_legacy_read_targ_mem(priv, base);
1678
1679 if (ERROR_START_OFFSET <= count * ERROR_ELEM_SIZE) {
1680 IWL_ERR(priv, "Start IWL Error Log Dump:\n");
1681 IWL_ERR(priv, "Status: 0x%08lX, count: %d\n",
1682 priv->status, count);
1683 }
1684
1685 desc = iwl_legacy_read_targ_mem(priv, base + 1 * sizeof(u32));
1686 priv->isr_stats.err_code = desc;
1687 pc = iwl_legacy_read_targ_mem(priv, base + 2 * sizeof(u32));
1688 blink1 = iwl_legacy_read_targ_mem(priv, base + 3 * sizeof(u32));
1689 blink2 = iwl_legacy_read_targ_mem(priv, base + 4 * sizeof(u32));
1690 ilink1 = iwl_legacy_read_targ_mem(priv, base + 5 * sizeof(u32));
1691 ilink2 = iwl_legacy_read_targ_mem(priv, base + 6 * sizeof(u32));
1692 data1 = iwl_legacy_read_targ_mem(priv, base + 7 * sizeof(u32));
1693 data2 = iwl_legacy_read_targ_mem(priv, base + 8 * sizeof(u32));
1694 line = iwl_legacy_read_targ_mem(priv, base + 9 * sizeof(u32));
1695 time = iwl_legacy_read_targ_mem(priv, base + 11 * sizeof(u32));
1696 hcmd = iwl_legacy_read_targ_mem(priv, base + 22 * sizeof(u32));
1697
1698 trace_iwlwifi_legacy_dev_ucode_error(priv, desc,
1699 time, data1, data2, line,
1700 blink1, blink2, ilink1, ilink2);
1701
1702 IWL_ERR(priv, "Desc Time "
1703 "data1 data2 line\n");
1704 IWL_ERR(priv, "%-28s (0x%04X) %010u 0x%08X 0x%08X %u\n",
1705 iwl4965_desc_lookup(desc), desc, time, data1, data2, line);
1706 IWL_ERR(priv, "pc blink1 blink2 ilink1 ilink2 hcmd\n");
1707 IWL_ERR(priv, "0x%05X 0x%05X 0x%05X 0x%05X 0x%05X 0x%05X\n",
1708 pc, blink1, blink2, ilink1, ilink2, hcmd);
1709}
1710
1711#define EVENT_START_OFFSET (4 * sizeof(u32))
1712
1713/**
1714 * iwl4965_print_event_log - Dump error event log to syslog
1715 *
1716 */
1717static int iwl4965_print_event_log(struct iwl_priv *priv, u32 start_idx,
1718 u32 num_events, u32 mode,
1719 int pos, char **buf, size_t bufsz)
1720{
1721 u32 i;
1722 u32 base; /* SRAM byte address of event log header */
1723 u32 event_size; /* 2 u32s, or 3 u32s if timestamp recorded */
1724 u32 ptr; /* SRAM byte address of log data */
1725 u32 ev, time, data; /* event log data */
1726 unsigned long reg_flags;
1727
1728 if (num_events == 0)
1729 return pos;
1730
1731 if (priv->ucode_type == UCODE_INIT) {
1732 base = le32_to_cpu(priv->card_alive_init.log_event_table_ptr);
1733 } else {
1734 base = le32_to_cpu(priv->card_alive.log_event_table_ptr);
1735 }
1736
1737 if (mode == 0)
1738 event_size = 2 * sizeof(u32);
1739 else
1740 event_size = 3 * sizeof(u32);
1741
1742 ptr = base + EVENT_START_OFFSET + (start_idx * event_size);
1743
1744 /* Make sure device is powered up for SRAM reads */
1745 spin_lock_irqsave(&priv->reg_lock, reg_flags);
1746 iwl_grab_nic_access(priv);
1747
1748 /* Set starting address; reads will auto-increment */
1749 _iwl_legacy_write_direct32(priv, HBUS_TARG_MEM_RADDR, ptr);
1750 rmb();
1751
1752 /* "time" is actually "data" for mode 0 (no timestamp).
1753 * place event id # at far right for easier visual parsing. */
1754 for (i = 0; i < num_events; i++) {
1755 ev = _iwl_legacy_read_direct32(priv, HBUS_TARG_MEM_RDAT);
1756 time = _iwl_legacy_read_direct32(priv, HBUS_TARG_MEM_RDAT);
1757 if (mode == 0) {
1758 /* data, ev */
1759 if (bufsz) {
1760 pos += scnprintf(*buf + pos, bufsz - pos,
1761 "EVT_LOG:0x%08x:%04u\n",
1762 time, ev);
1763 } else {
1764 trace_iwlwifi_legacy_dev_ucode_event(priv, 0,
1765 time, ev);
1766 IWL_ERR(priv, "EVT_LOG:0x%08x:%04u\n",
1767 time, ev);
1768 }
1769 } else {
1770 data = _iwl_legacy_read_direct32(priv,
1771 HBUS_TARG_MEM_RDAT);
1772 if (bufsz) {
1773 pos += scnprintf(*buf + pos, bufsz - pos,
1774 "EVT_LOGT:%010u:0x%08x:%04u\n",
1775 time, data, ev);
1776 } else {
1777 IWL_ERR(priv, "EVT_LOGT:%010u:0x%08x:%04u\n",
1778 time, data, ev);
1779 trace_iwlwifi_legacy_dev_ucode_event(priv, time,
1780 data, ev);
1781 }
1782 }
1783 }
1784
1785 /* Allow device to power down */
1786 iwl_release_nic_access(priv);
1787 spin_unlock_irqrestore(&priv->reg_lock, reg_flags);
1788 return pos;
1789}
1790
1791/**
1792 * iwl4965_print_last_event_logs - Dump the newest # of event log to syslog
1793 */
1794static int iwl4965_print_last_event_logs(struct iwl_priv *priv, u32 capacity,
1795 u32 num_wraps, u32 next_entry,
1796 u32 size, u32 mode,
1797 int pos, char **buf, size_t bufsz)
1798{
1799 /*
1800 * display the newest DEFAULT_LOG_ENTRIES entries
1801 * i.e the entries just before the next ont that uCode would fill.
1802 */
1803 if (num_wraps) {
1804 if (next_entry < size) {
1805 pos = iwl4965_print_event_log(priv,
1806 capacity - (size - next_entry),
1807 size - next_entry, mode,
1808 pos, buf, bufsz);
1809 pos = iwl4965_print_event_log(priv, 0,
1810 next_entry, mode,
1811 pos, buf, bufsz);
1812 } else
1813 pos = iwl4965_print_event_log(priv, next_entry - size,
1814 size, mode, pos, buf, bufsz);
1815 } else {
1816 if (next_entry < size) {
1817 pos = iwl4965_print_event_log(priv, 0, next_entry,
1818 mode, pos, buf, bufsz);
1819 } else {
1820 pos = iwl4965_print_event_log(priv, next_entry - size,
1821 size, mode, pos, buf, bufsz);
1822 }
1823 }
1824 return pos;
1825}
1826
1827#define DEFAULT_DUMP_EVENT_LOG_ENTRIES (20)
1828
1829int iwl4965_dump_nic_event_log(struct iwl_priv *priv, bool full_log,
1830 char **buf, bool display)
1831{
1832 u32 base; /* SRAM byte address of event log header */
1833 u32 capacity; /* event log capacity in # entries */
1834 u32 mode; /* 0 - no timestamp, 1 - timestamp recorded */
1835 u32 num_wraps; /* # times uCode wrapped to top of log */
1836 u32 next_entry; /* index of next entry to be written by uCode */
1837 u32 size; /* # entries that we'll print */
1838 int pos = 0;
1839 size_t bufsz = 0;
1840
1841 if (priv->ucode_type == UCODE_INIT) {
1842 base = le32_to_cpu(priv->card_alive_init.log_event_table_ptr);
1843 } else {
1844 base = le32_to_cpu(priv->card_alive.log_event_table_ptr);
1845 }
1846
1847 if (!priv->cfg->ops->lib->is_valid_rtc_data_addr(base)) {
1848 IWL_ERR(priv,
1849 "Invalid event log pointer 0x%08X for %s uCode\n",
1850 base, (priv->ucode_type == UCODE_INIT) ? "Init" : "RT");
1851 return -EINVAL;
1852 }
1853
1854 /* event log header */
1855 capacity = iwl_legacy_read_targ_mem(priv, base);
1856 mode = iwl_legacy_read_targ_mem(priv, base + (1 * sizeof(u32)));
1857 num_wraps = iwl_legacy_read_targ_mem(priv, base + (2 * sizeof(u32)));
1858 next_entry = iwl_legacy_read_targ_mem(priv, base + (3 * sizeof(u32)));
1859
1860 size = num_wraps ? capacity : next_entry;
1861
1862 /* bail out if nothing in log */
1863 if (size == 0) {
1864 IWL_ERR(priv, "Start IWL Event Log Dump: nothing in log\n");
1865 return pos;
1866 }
1867
1868#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
1869 if (!(iwl_legacy_get_debug_level(priv) & IWL_DL_FW_ERRORS) && !full_log)
1870 size = (size > DEFAULT_DUMP_EVENT_LOG_ENTRIES)
1871 ? DEFAULT_DUMP_EVENT_LOG_ENTRIES : size;
1872#else
1873 size = (size > DEFAULT_DUMP_EVENT_LOG_ENTRIES)
1874 ? DEFAULT_DUMP_EVENT_LOG_ENTRIES : size;
1875#endif
1876 IWL_ERR(priv, "Start IWL Event Log Dump: display last %u entries\n",
1877 size);
1878
1879#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
1880 if (display) {
1881 if (full_log)
1882 bufsz = capacity * 48;
1883 else
1884 bufsz = size * 48;
1885 *buf = kmalloc(bufsz, GFP_KERNEL);
1886 if (!*buf)
1887 return -ENOMEM;
1888 }
1889 if ((iwl_legacy_get_debug_level(priv) & IWL_DL_FW_ERRORS) || full_log) {
1890 /*
1891 * if uCode has wrapped back to top of log,
1892 * start at the oldest entry,
1893 * i.e the next one that uCode would fill.
1894 */
1895 if (num_wraps)
1896 pos = iwl4965_print_event_log(priv, next_entry,
1897 capacity - next_entry, mode,
1898 pos, buf, bufsz);
1899 /* (then/else) start at top of log */
1900 pos = iwl4965_print_event_log(priv, 0,
1901 next_entry, mode, pos, buf, bufsz);
1902 } else
1903 pos = iwl4965_print_last_event_logs(priv, capacity, num_wraps,
1904 next_entry, size, mode,
1905 pos, buf, bufsz);
1906#else
1907 pos = iwl4965_print_last_event_logs(priv, capacity, num_wraps,
1908 next_entry, size, mode,
1909 pos, buf, bufsz);
1910#endif
1911 return pos;
1912}
1913
1914static void iwl4965_rf_kill_ct_config(struct iwl_priv *priv)
1915{
1916 struct iwl_ct_kill_config cmd;
1917 unsigned long flags;
1918 int ret = 0;
1919
1920 spin_lock_irqsave(&priv->lock, flags);
1921 iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR,
1922 CSR_UCODE_DRV_GP1_REG_BIT_CT_KILL_EXIT);
1923 spin_unlock_irqrestore(&priv->lock, flags);
1924
1925 cmd.critical_temperature_R =
1926 cpu_to_le32(priv->hw_params.ct_kill_threshold);
1927
1928 ret = iwl_legacy_send_cmd_pdu(priv, REPLY_CT_KILL_CONFIG_CMD,
1929 sizeof(cmd), &cmd);
1930 if (ret)
1931 IWL_ERR(priv, "REPLY_CT_KILL_CONFIG_CMD failed\n");
1932 else
1933 IWL_DEBUG_INFO(priv, "REPLY_CT_KILL_CONFIG_CMD "
1934 "succeeded, "
1935 "critical temperature is %d\n",
1936 priv->hw_params.ct_kill_threshold);
1937}
1938
1939static const s8 default_queue_to_tx_fifo[] = {
1940 IWL_TX_FIFO_VO,
1941 IWL_TX_FIFO_VI,
1942 IWL_TX_FIFO_BE,
1943 IWL_TX_FIFO_BK,
1944 IWL49_CMD_FIFO_NUM,
1945 IWL_TX_FIFO_UNUSED,
1946 IWL_TX_FIFO_UNUSED,
1947};
1948
1949static int iwl4965_alive_notify(struct iwl_priv *priv)
1950{
1951 u32 a;
1952 unsigned long flags;
1953 int i, chan;
1954 u32 reg_val;
1955
1956 spin_lock_irqsave(&priv->lock, flags);
1957
1958 /* Clear 4965's internal Tx Scheduler data base */
1959 priv->scd_base_addr = iwl_legacy_read_prph(priv,
1960 IWL49_SCD_SRAM_BASE_ADDR);
1961 a = priv->scd_base_addr + IWL49_SCD_CONTEXT_DATA_OFFSET;
1962 for (; a < priv->scd_base_addr + IWL49_SCD_TX_STTS_BITMAP_OFFSET; a += 4)
1963 iwl_legacy_write_targ_mem(priv, a, 0);
1964 for (; a < priv->scd_base_addr + IWL49_SCD_TRANSLATE_TBL_OFFSET; a += 4)
1965 iwl_legacy_write_targ_mem(priv, a, 0);
1966 for (; a < priv->scd_base_addr +
1967 IWL49_SCD_TRANSLATE_TBL_OFFSET_QUEUE(priv->hw_params.max_txq_num); a += 4)
1968 iwl_legacy_write_targ_mem(priv, a, 0);
1969
1970 /* Tel 4965 where to find Tx byte count tables */
1971 iwl_legacy_write_prph(priv, IWL49_SCD_DRAM_BASE_ADDR,
1972 priv->scd_bc_tbls.dma >> 10);
1973
1974 /* Enable DMA channel */
1975 for (chan = 0; chan < FH49_TCSR_CHNL_NUM ; chan++)
1976 iwl_legacy_write_direct32(priv,
1977 FH_TCSR_CHNL_TX_CONFIG_REG(chan),
1978 FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE |
1979 FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE);
1980
1981 /* Update FH chicken bits */
1982 reg_val = iwl_legacy_read_direct32(priv, FH_TX_CHICKEN_BITS_REG);
1983 iwl_legacy_write_direct32(priv, FH_TX_CHICKEN_BITS_REG,
1984 reg_val | FH_TX_CHICKEN_BITS_SCD_AUTO_RETRY_EN);
1985
1986 /* Disable chain mode for all queues */
1987 iwl_legacy_write_prph(priv, IWL49_SCD_QUEUECHAIN_SEL, 0);
1988
1989 /* Initialize each Tx queue (including the command queue) */
1990 for (i = 0; i < priv->hw_params.max_txq_num; i++) {
1991
1992 /* TFD circular buffer read/write indexes */
1993 iwl_legacy_write_prph(priv, IWL49_SCD_QUEUE_RDPTR(i), 0);
1994 iwl_legacy_write_direct32(priv, HBUS_TARG_WRPTR, 0 | (i << 8));
1995
1996 /* Max Tx Window size for Scheduler-ACK mode */
1997 iwl_legacy_write_targ_mem(priv, priv->scd_base_addr +
1998 IWL49_SCD_CONTEXT_QUEUE_OFFSET(i),
1999 (SCD_WIN_SIZE <<
2000 IWL49_SCD_QUEUE_CTX_REG1_WIN_SIZE_POS) &
2001 IWL49_SCD_QUEUE_CTX_REG1_WIN_SIZE_MSK);
2002
2003 /* Frame limit */
2004 iwl_legacy_write_targ_mem(priv, priv->scd_base_addr +
2005 IWL49_SCD_CONTEXT_QUEUE_OFFSET(i) +
2006 sizeof(u32),
2007 (SCD_FRAME_LIMIT <<
2008 IWL49_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS) &
2009 IWL49_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK);
2010
2011 }
2012 iwl_legacy_write_prph(priv, IWL49_SCD_INTERRUPT_MASK,
2013 (1 << priv->hw_params.max_txq_num) - 1);
2014
2015 /* Activate all Tx DMA/FIFO channels */
2016 iwl4965_txq_set_sched(priv, IWL_MASK(0, 6));
2017
2018 iwl4965_set_wr_ptrs(priv, IWL_DEFAULT_CMD_QUEUE_NUM, 0);
2019
2020 /* make sure all queue are not stopped */
2021 memset(&priv->queue_stopped[0], 0, sizeof(priv->queue_stopped));
2022 for (i = 0; i < 4; i++)
2023 atomic_set(&priv->queue_stop_count[i], 0);
2024
2025 /* reset to 0 to enable all the queue first */
2026 priv->txq_ctx_active_msk = 0;
2027 /* Map each Tx/cmd queue to its corresponding fifo */
2028 BUILD_BUG_ON(ARRAY_SIZE(default_queue_to_tx_fifo) != 7);
2029
2030 for (i = 0; i < ARRAY_SIZE(default_queue_to_tx_fifo); i++) {
2031 int ac = default_queue_to_tx_fifo[i];
2032
2033 iwl_txq_ctx_activate(priv, i);
2034
2035 if (ac == IWL_TX_FIFO_UNUSED)
2036 continue;
2037
2038 iwl4965_tx_queue_set_status(priv, &priv->txq[i], ac, 0);
2039 }
2040
2041 spin_unlock_irqrestore(&priv->lock, flags);
2042
2043 return 0;
2044}
2045
2046/**
2047 * iwl4965_alive_start - called after REPLY_ALIVE notification received
2048 * from protocol/runtime uCode (initialization uCode's
2049 * Alive gets handled by iwl_init_alive_start()).
2050 */
2051static void iwl4965_alive_start(struct iwl_priv *priv)
2052{
2053 int ret = 0;
2054 struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
2055
2056 IWL_DEBUG_INFO(priv, "Runtime Alive received.\n");
2057
2058 if (priv->card_alive.is_valid != UCODE_VALID_OK) {
2059 /* We had an error bringing up the hardware, so take it
2060 * all the way back down so we can try again */
2061 IWL_DEBUG_INFO(priv, "Alive failed.\n");
2062 goto restart;
2063 }
2064
2065 /* Initialize uCode has loaded Runtime uCode ... verify inst image.
2066 * This is a paranoid check, because we would not have gotten the
2067 * "runtime" alive if code weren't properly loaded. */
2068 if (iwl4965_verify_ucode(priv)) {
2069 /* Runtime instruction load was bad;
2070 * take it all the way back down so we can try again */
2071 IWL_DEBUG_INFO(priv, "Bad runtime uCode load.\n");
2072 goto restart;
2073 }
2074
2075 ret = iwl4965_alive_notify(priv);
2076 if (ret) {
2077 IWL_WARN(priv,
2078 "Could not complete ALIVE transition [ntf]: %d\n", ret);
2079 goto restart;
2080 }
2081
2082
2083 /* After the ALIVE response, we can send host commands to the uCode */
2084 set_bit(STATUS_ALIVE, &priv->status);
2085
2086 /* Enable watchdog to monitor the driver tx queues */
2087 iwl_legacy_setup_watchdog(priv);
2088
2089 if (iwl_legacy_is_rfkill(priv))
2090 return;
2091
2092 ieee80211_wake_queues(priv->hw);
2093
2094 priv->active_rate = IWL_RATES_MASK;
2095
2096 if (iwl_legacy_is_associated_ctx(ctx)) {
2097 struct iwl_legacy_rxon_cmd *active_rxon =
2098 (struct iwl_legacy_rxon_cmd *)&ctx->active;
2099 /* apply any changes in staging */
2100 ctx->staging.filter_flags |= RXON_FILTER_ASSOC_MSK;
2101 active_rxon->filter_flags &= ~RXON_FILTER_ASSOC_MSK;
2102 } else {
2103 struct iwl_rxon_context *tmp;
2104 /* Initialize our rx_config data */
2105 for_each_context(priv, tmp)
2106 iwl_legacy_connection_init_rx_config(priv, tmp);
2107
2108 if (priv->cfg->ops->hcmd->set_rxon_chain)
2109 priv->cfg->ops->hcmd->set_rxon_chain(priv, ctx);
2110 }
2111
2112 /* Configure bluetooth coexistence if enabled */
2113 iwl_legacy_send_bt_config(priv);
2114
2115 iwl4965_reset_run_time_calib(priv);
2116
2117 set_bit(STATUS_READY, &priv->status);
2118
2119 /* Configure the adapter for unassociated operation */
2120 iwl_legacy_commit_rxon(priv, ctx);
2121
2122 /* At this point, the NIC is initialized and operational */
2123 iwl4965_rf_kill_ct_config(priv);
2124
2125 IWL_DEBUG_INFO(priv, "ALIVE processing complete.\n");
2126 wake_up_interruptible(&priv->wait_command_queue);
2127
2128 iwl_legacy_power_update_mode(priv, true);
2129 IWL_DEBUG_INFO(priv, "Updated power mode\n");
2130
2131 return;
2132
2133 restart:
2134 queue_work(priv->workqueue, &priv->restart);
2135}
2136
2137static void iwl4965_cancel_deferred_work(struct iwl_priv *priv);
2138
2139static void __iwl4965_down(struct iwl_priv *priv)
2140{
2141 unsigned long flags;
2142 int exit_pending = test_bit(STATUS_EXIT_PENDING, &priv->status);
2143
2144 IWL_DEBUG_INFO(priv, DRV_NAME " is going down\n");
2145
2146 iwl_legacy_scan_cancel_timeout(priv, 200);
2147
2148 exit_pending = test_and_set_bit(STATUS_EXIT_PENDING, &priv->status);
2149
2150 /* Stop TX queues watchdog. We need to have STATUS_EXIT_PENDING bit set
2151 * to prevent rearm timer */
2152 del_timer_sync(&priv->watchdog);
2153
2154 iwl_legacy_clear_ucode_stations(priv, NULL);
2155 iwl_legacy_dealloc_bcast_stations(priv);
2156 iwl_legacy_clear_driver_stations(priv);
2157
2158 /* Unblock any waiting calls */
2159 wake_up_interruptible_all(&priv->wait_command_queue);
2160
2161 /* Wipe out the EXIT_PENDING status bit if we are not actually
2162 * exiting the module */
2163 if (!exit_pending)
2164 clear_bit(STATUS_EXIT_PENDING, &priv->status);
2165
2166 /* stop and reset the on-board processor */
2167 iwl_write32(priv, CSR_RESET, CSR_RESET_REG_FLAG_NEVO_RESET);
2168
2169 /* tell the device to stop sending interrupts */
2170 spin_lock_irqsave(&priv->lock, flags);
2171 iwl_legacy_disable_interrupts(priv);
2172 spin_unlock_irqrestore(&priv->lock, flags);
2173 iwl4965_synchronize_irq(priv);
2174
2175 if (priv->mac80211_registered)
2176 ieee80211_stop_queues(priv->hw);
2177
2178 /* If we have not previously called iwl_init() then
2179 * clear all bits but the RF Kill bit and return */
2180 if (!iwl_legacy_is_init(priv)) {
2181 priv->status = test_bit(STATUS_RF_KILL_HW, &priv->status) <<
2182 STATUS_RF_KILL_HW |
2183 test_bit(STATUS_GEO_CONFIGURED, &priv->status) <<
2184 STATUS_GEO_CONFIGURED |
2185 test_bit(STATUS_EXIT_PENDING, &priv->status) <<
2186 STATUS_EXIT_PENDING;
2187 goto exit;
2188 }
2189
2190 /* ...otherwise clear out all the status bits but the RF Kill
2191 * bit and continue taking the NIC down. */
2192 priv->status &= test_bit(STATUS_RF_KILL_HW, &priv->status) <<
2193 STATUS_RF_KILL_HW |
2194 test_bit(STATUS_GEO_CONFIGURED, &priv->status) <<
2195 STATUS_GEO_CONFIGURED |
2196 test_bit(STATUS_FW_ERROR, &priv->status) <<
2197 STATUS_FW_ERROR |
2198 test_bit(STATUS_EXIT_PENDING, &priv->status) <<
2199 STATUS_EXIT_PENDING;
2200
2201 iwl4965_txq_ctx_stop(priv);
2202 iwl4965_rxq_stop(priv);
2203
2204 /* Power-down device's busmaster DMA clocks */
2205 iwl_legacy_write_prph(priv, APMG_CLK_DIS_REG, APMG_CLK_VAL_DMA_CLK_RQT);
2206 udelay(5);
2207
2208 /* Make sure (redundant) we've released our request to stay awake */
2209 iwl_legacy_clear_bit(priv, CSR_GP_CNTRL,
2210 CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
2211
2212 /* Stop the device, and put it in low power state */
2213 iwl_legacy_apm_stop(priv);
2214
2215 exit:
2216 memset(&priv->card_alive, 0, sizeof(struct iwl_alive_resp));
2217
2218 dev_kfree_skb(priv->beacon_skb);
2219 priv->beacon_skb = NULL;
2220
2221 /* clear out any free frames */
2222 iwl4965_clear_free_frames(priv);
2223}
2224
2225static void iwl4965_down(struct iwl_priv *priv)
2226{
2227 mutex_lock(&priv->mutex);
2228 __iwl4965_down(priv);
2229 mutex_unlock(&priv->mutex);
2230
2231 iwl4965_cancel_deferred_work(priv);
2232}
2233
2234#define HW_READY_TIMEOUT (50)
2235
2236static int iwl4965_set_hw_ready(struct iwl_priv *priv)
2237{
2238 int ret = 0;
2239
2240 iwl_legacy_set_bit(priv, CSR_HW_IF_CONFIG_REG,
2241 CSR_HW_IF_CONFIG_REG_BIT_NIC_READY);
2242
2243 /* See if we got it */
2244 ret = iwl_poll_bit(priv, CSR_HW_IF_CONFIG_REG,
2245 CSR_HW_IF_CONFIG_REG_BIT_NIC_READY,
2246 CSR_HW_IF_CONFIG_REG_BIT_NIC_READY,
2247 HW_READY_TIMEOUT);
2248 if (ret != -ETIMEDOUT)
2249 priv->hw_ready = true;
2250 else
2251 priv->hw_ready = false;
2252
2253 IWL_DEBUG_INFO(priv, "hardware %s\n",
2254 (priv->hw_ready == 1) ? "ready" : "not ready");
2255 return ret;
2256}
2257
2258static int iwl4965_prepare_card_hw(struct iwl_priv *priv)
2259{
2260 int ret = 0;
2261
2262 IWL_DEBUG_INFO(priv, "iwl4965_prepare_card_hw enter\n");
2263
2264 ret = iwl4965_set_hw_ready(priv);
2265 if (priv->hw_ready)
2266 return ret;
2267
2268 /* If HW is not ready, prepare the conditions to check again */
2269 iwl_legacy_set_bit(priv, CSR_HW_IF_CONFIG_REG,
2270 CSR_HW_IF_CONFIG_REG_PREPARE);
2271
2272 ret = iwl_poll_bit(priv, CSR_HW_IF_CONFIG_REG,
2273 ~CSR_HW_IF_CONFIG_REG_BIT_NIC_PREPARE_DONE,
2274 CSR_HW_IF_CONFIG_REG_BIT_NIC_PREPARE_DONE, 150000);
2275
2276 /* HW should be ready by now, check again. */
2277 if (ret != -ETIMEDOUT)
2278 iwl4965_set_hw_ready(priv);
2279
2280 return ret;
2281}
2282
2283#define MAX_HW_RESTARTS 5
2284
2285static int __iwl4965_up(struct iwl_priv *priv)
2286{
2287 struct iwl_rxon_context *ctx;
2288 int i;
2289 int ret;
2290
2291 if (test_bit(STATUS_EXIT_PENDING, &priv->status)) {
2292 IWL_WARN(priv, "Exit pending; will not bring the NIC up\n");
2293 return -EIO;
2294 }
2295
2296 if (!priv->ucode_data_backup.v_addr || !priv->ucode_data.v_addr) {
2297 IWL_ERR(priv, "ucode not available for device bringup\n");
2298 return -EIO;
2299 }
2300
2301 for_each_context(priv, ctx) {
2302 ret = iwl4965_alloc_bcast_station(priv, ctx);
2303 if (ret) {
2304 iwl_legacy_dealloc_bcast_stations(priv);
2305 return ret;
2306 }
2307 }
2308
2309 iwl4965_prepare_card_hw(priv);
2310
2311 if (!priv->hw_ready) {
2312 IWL_WARN(priv, "Exit HW not ready\n");
2313 return -EIO;
2314 }
2315
2316 /* If platform's RF_KILL switch is NOT set to KILL */
2317 if (iwl_read32(priv,
2318 CSR_GP_CNTRL) & CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW)
2319 clear_bit(STATUS_RF_KILL_HW, &priv->status);
2320 else
2321 set_bit(STATUS_RF_KILL_HW, &priv->status);
2322
2323 if (iwl_legacy_is_rfkill(priv)) {
2324 wiphy_rfkill_set_hw_state(priv->hw->wiphy, true);
2325
2326 iwl_legacy_enable_interrupts(priv);
2327 IWL_WARN(priv, "Radio disabled by HW RF Kill switch\n");
2328 return 0;
2329 }
2330
2331 iwl_write32(priv, CSR_INT, 0xFFFFFFFF);
2332
2333 /* must be initialised before iwl_hw_nic_init */
2334 priv->cmd_queue = IWL_DEFAULT_CMD_QUEUE_NUM;
2335
2336 ret = iwl4965_hw_nic_init(priv);
2337 if (ret) {
2338 IWL_ERR(priv, "Unable to init nic\n");
2339 return ret;
2340 }
2341
2342 /* make sure rfkill handshake bits are cleared */
2343 iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
2344 iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR,
2345 CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED);
2346
2347 /* clear (again), then enable host interrupts */
2348 iwl_write32(priv, CSR_INT, 0xFFFFFFFF);
2349 iwl_legacy_enable_interrupts(priv);
2350
2351 /* really make sure rfkill handshake bits are cleared */
2352 iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
2353 iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
2354
2355 /* Copy original ucode data image from disk into backup cache.
2356 * This will be used to initialize the on-board processor's
2357 * data SRAM for a clean start when the runtime program first loads. */
2358 memcpy(priv->ucode_data_backup.v_addr, priv->ucode_data.v_addr,
2359 priv->ucode_data.len);
2360
2361 for (i = 0; i < MAX_HW_RESTARTS; i++) {
2362
2363 /* load bootstrap state machine,
2364 * load bootstrap program into processor's memory,
2365 * prepare to load the "initialize" uCode */
2366 ret = priv->cfg->ops->lib->load_ucode(priv);
2367
2368 if (ret) {
2369 IWL_ERR(priv, "Unable to set up bootstrap uCode: %d\n",
2370 ret);
2371 continue;
2372 }
2373
2374 /* start card; "initialize" will load runtime ucode */
2375 iwl4965_nic_start(priv);
2376
2377 IWL_DEBUG_INFO(priv, DRV_NAME " is coming up\n");
2378
2379 return 0;
2380 }
2381
2382 set_bit(STATUS_EXIT_PENDING, &priv->status);
2383 __iwl4965_down(priv);
2384 clear_bit(STATUS_EXIT_PENDING, &priv->status);
2385
2386 /* tried to restart and config the device for as long as our
2387 * patience could withstand */
2388 IWL_ERR(priv, "Unable to initialize device after %d attempts.\n", i);
2389 return -EIO;
2390}
2391
2392
2393/*****************************************************************************
2394 *
2395 * Workqueue callbacks
2396 *
2397 *****************************************************************************/
2398
2399static void iwl4965_bg_init_alive_start(struct work_struct *data)
2400{
2401 struct iwl_priv *priv =
2402 container_of(data, struct iwl_priv, init_alive_start.work);
2403
2404 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
2405 return;
2406
2407 mutex_lock(&priv->mutex);
2408 priv->cfg->ops->lib->init_alive_start(priv);
2409 mutex_unlock(&priv->mutex);
2410}
2411
2412static void iwl4965_bg_alive_start(struct work_struct *data)
2413{
2414 struct iwl_priv *priv =
2415 container_of(data, struct iwl_priv, alive_start.work);
2416
2417 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
2418 return;
2419
2420 mutex_lock(&priv->mutex);
2421 iwl4965_alive_start(priv);
2422 mutex_unlock(&priv->mutex);
2423}
2424
2425static void iwl4965_bg_run_time_calib_work(struct work_struct *work)
2426{
2427 struct iwl_priv *priv = container_of(work, struct iwl_priv,
2428 run_time_calib_work);
2429
2430 mutex_lock(&priv->mutex);
2431
2432 if (test_bit(STATUS_EXIT_PENDING, &priv->status) ||
2433 test_bit(STATUS_SCANNING, &priv->status)) {
2434 mutex_unlock(&priv->mutex);
2435 return;
2436 }
2437
2438 if (priv->start_calib) {
2439 iwl4965_chain_noise_calibration(priv,
2440 (void *)&priv->_4965.statistics);
2441 iwl4965_sensitivity_calibration(priv,
2442 (void *)&priv->_4965.statistics);
2443 }
2444
2445 mutex_unlock(&priv->mutex);
2446}
2447
2448static void iwl4965_bg_restart(struct work_struct *data)
2449{
2450 struct iwl_priv *priv = container_of(data, struct iwl_priv, restart);
2451
2452 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
2453 return;
2454
2455 if (test_and_clear_bit(STATUS_FW_ERROR, &priv->status)) {
2456 struct iwl_rxon_context *ctx;
2457
2458 mutex_lock(&priv->mutex);
2459 for_each_context(priv, ctx)
2460 ctx->vif = NULL;
2461 priv->is_open = 0;
2462
2463 __iwl4965_down(priv);
2464
2465 mutex_unlock(&priv->mutex);
2466 iwl4965_cancel_deferred_work(priv);
2467 ieee80211_restart_hw(priv->hw);
2468 } else {
2469 iwl4965_down(priv);
2470
2471 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
2472 return;
2473
2474 mutex_lock(&priv->mutex);
2475 __iwl4965_up(priv);
2476 mutex_unlock(&priv->mutex);
2477 }
2478}
2479
2480static void iwl4965_bg_rx_replenish(struct work_struct *data)
2481{
2482 struct iwl_priv *priv =
2483 container_of(data, struct iwl_priv, rx_replenish);
2484
2485 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
2486 return;
2487
2488 mutex_lock(&priv->mutex);
2489 iwl4965_rx_replenish(priv);
2490 mutex_unlock(&priv->mutex);
2491}
2492
2493/*****************************************************************************
2494 *
2495 * mac80211 entry point functions
2496 *
2497 *****************************************************************************/
2498
2499#define UCODE_READY_TIMEOUT (4 * HZ)
2500
2501/*
2502 * Not a mac80211 entry point function, but it fits in with all the
2503 * other mac80211 functions grouped here.
2504 */
2505static int iwl4965_mac_setup_register(struct iwl_priv *priv,
2506 u32 max_probe_length)
2507{
2508 int ret;
2509 struct ieee80211_hw *hw = priv->hw;
2510 struct iwl_rxon_context *ctx;
2511
2512 hw->rate_control_algorithm = "iwl-4965-rs";
2513
2514 /* Tell mac80211 our characteristics */
2515 hw->flags = IEEE80211_HW_SIGNAL_DBM |
2516 IEEE80211_HW_AMPDU_AGGREGATION |
2517 IEEE80211_HW_NEED_DTIM_PERIOD |
2518 IEEE80211_HW_SPECTRUM_MGMT |
2519 IEEE80211_HW_REPORTS_TX_ACK_STATUS;
2520
2521 if (priv->cfg->sku & IWL_SKU_N)
2522 hw->flags |= IEEE80211_HW_SUPPORTS_DYNAMIC_SMPS |
2523 IEEE80211_HW_SUPPORTS_STATIC_SMPS;
2524
2525 hw->sta_data_size = sizeof(struct iwl_station_priv);
2526 hw->vif_data_size = sizeof(struct iwl_vif_priv);
2527
2528 for_each_context(priv, ctx) {
2529 hw->wiphy->interface_modes |= ctx->interface_modes;
2530 hw->wiphy->interface_modes |= ctx->exclusive_interface_modes;
2531 }
2532
2533 hw->wiphy->flags |= WIPHY_FLAG_CUSTOM_REGULATORY |
2534 WIPHY_FLAG_DISABLE_BEACON_HINTS;
2535
2536 /*
2537 * For now, disable PS by default because it affects
2538 * RX performance significantly.
2539 */
2540 hw->wiphy->flags &= ~WIPHY_FLAG_PS_ON_BY_DEFAULT;
2541
2542 hw->wiphy->max_scan_ssids = PROBE_OPTION_MAX;
2543 /* we create the 802.11 header and a zero-length SSID element */
2544 hw->wiphy->max_scan_ie_len = max_probe_length - 24 - 2;
2545
2546 /* Default value; 4 EDCA QOS priorities */
2547 hw->queues = 4;
2548
2549 hw->max_listen_interval = IWL_CONN_MAX_LISTEN_INTERVAL;
2550
2551 if (priv->bands[IEEE80211_BAND_2GHZ].n_channels)
2552 priv->hw->wiphy->bands[IEEE80211_BAND_2GHZ] =
2553 &priv->bands[IEEE80211_BAND_2GHZ];
2554 if (priv->bands[IEEE80211_BAND_5GHZ].n_channels)
2555 priv->hw->wiphy->bands[IEEE80211_BAND_5GHZ] =
2556 &priv->bands[IEEE80211_BAND_5GHZ];
2557
2558 iwl_legacy_leds_init(priv);
2559
2560 ret = ieee80211_register_hw(priv->hw);
2561 if (ret) {
2562 IWL_ERR(priv, "Failed to register hw (error %d)\n", ret);
2563 return ret;
2564 }
2565 priv->mac80211_registered = 1;
2566
2567 return 0;
2568}
2569
2570
2571int iwl4965_mac_start(struct ieee80211_hw *hw)
2572{
2573 struct iwl_priv *priv = hw->priv;
2574 int ret;
2575
2576 IWL_DEBUG_MAC80211(priv, "enter\n");
2577
2578 /* we should be verifying the device is ready to be opened */
2579 mutex_lock(&priv->mutex);
2580 ret = __iwl4965_up(priv);
2581 mutex_unlock(&priv->mutex);
2582
2583 if (ret)
2584 return ret;
2585
2586 if (iwl_legacy_is_rfkill(priv))
2587 goto out;
2588
2589 IWL_DEBUG_INFO(priv, "Start UP work done.\n");
2590
2591 /* Wait for START_ALIVE from Run Time ucode. Otherwise callbacks from
2592 * mac80211 will not be run successfully. */
2593 ret = wait_event_interruptible_timeout(priv->wait_command_queue,
2594 test_bit(STATUS_READY, &priv->status),
2595 UCODE_READY_TIMEOUT);
2596 if (!ret) {
2597 if (!test_bit(STATUS_READY, &priv->status)) {
2598 IWL_ERR(priv, "START_ALIVE timeout after %dms.\n",
2599 jiffies_to_msecs(UCODE_READY_TIMEOUT));
2600 return -ETIMEDOUT;
2601 }
2602 }
2603
2604 iwl4965_led_enable(priv);
2605
2606out:
2607 priv->is_open = 1;
2608 IWL_DEBUG_MAC80211(priv, "leave\n");
2609 return 0;
2610}
2611
2612void iwl4965_mac_stop(struct ieee80211_hw *hw)
2613{
2614 struct iwl_priv *priv = hw->priv;
2615
2616 IWL_DEBUG_MAC80211(priv, "enter\n");
2617
2618 if (!priv->is_open)
2619 return;
2620
2621 priv->is_open = 0;
2622
2623 iwl4965_down(priv);
2624
2625 flush_workqueue(priv->workqueue);
2626
2627 /* enable interrupts again in order to receive rfkill changes */
2628 iwl_write32(priv, CSR_INT, 0xFFFFFFFF);
2629 iwl_legacy_enable_interrupts(priv);
2630
2631 IWL_DEBUG_MAC80211(priv, "leave\n");
2632}
2633
2634void iwl4965_mac_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
2635{
2636 struct iwl_priv *priv = hw->priv;
2637
2638 IWL_DEBUG_MACDUMP(priv, "enter\n");
2639
2640 IWL_DEBUG_TX(priv, "dev->xmit(%d bytes) at rate 0x%02x\n", skb->len,
2641 ieee80211_get_tx_rate(hw, IEEE80211_SKB_CB(skb))->bitrate);
2642
2643 if (iwl4965_tx_skb(priv, skb))
2644 dev_kfree_skb_any(skb);
2645
2646 IWL_DEBUG_MACDUMP(priv, "leave\n");
2647}
2648
2649void iwl4965_mac_update_tkip_key(struct ieee80211_hw *hw,
2650 struct ieee80211_vif *vif,
2651 struct ieee80211_key_conf *keyconf,
2652 struct ieee80211_sta *sta,
2653 u32 iv32, u16 *phase1key)
2654{
2655 struct iwl_priv *priv = hw->priv;
2656 struct iwl_vif_priv *vif_priv = (void *)vif->drv_priv;
2657
2658 IWL_DEBUG_MAC80211(priv, "enter\n");
2659
2660 iwl4965_update_tkip_key(priv, vif_priv->ctx, keyconf, sta,
2661 iv32, phase1key);
2662
2663 IWL_DEBUG_MAC80211(priv, "leave\n");
2664}
2665
2666int iwl4965_mac_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
2667 struct ieee80211_vif *vif, struct ieee80211_sta *sta,
2668 struct ieee80211_key_conf *key)
2669{
2670 struct iwl_priv *priv = hw->priv;
2671 struct iwl_vif_priv *vif_priv = (void *)vif->drv_priv;
2672 struct iwl_rxon_context *ctx = vif_priv->ctx;
2673 int ret;
2674 u8 sta_id;
2675 bool is_default_wep_key = false;
2676
2677 IWL_DEBUG_MAC80211(priv, "enter\n");
2678
2679 if (priv->cfg->mod_params->sw_crypto) {
2680 IWL_DEBUG_MAC80211(priv, "leave - hwcrypto disabled\n");
2681 return -EOPNOTSUPP;
2682 }
2683
2684 sta_id = iwl_legacy_sta_id_or_broadcast(priv, vif_priv->ctx, sta);
2685 if (sta_id == IWL_INVALID_STATION)
2686 return -EINVAL;
2687
2688 mutex_lock(&priv->mutex);
2689 iwl_legacy_scan_cancel_timeout(priv, 100);
2690
2691 /*
2692 * If we are getting WEP group key and we didn't receive any key mapping
2693 * so far, we are in legacy wep mode (group key only), otherwise we are
2694 * in 1X mode.
2695 * In legacy wep mode, we use another host command to the uCode.
2696 */
2697 if ((key->cipher == WLAN_CIPHER_SUITE_WEP40 ||
2698 key->cipher == WLAN_CIPHER_SUITE_WEP104) &&
2699 !sta) {
2700 if (cmd == SET_KEY)
2701 is_default_wep_key = !ctx->key_mapping_keys;
2702 else
2703 is_default_wep_key =
2704 (key->hw_key_idx == HW_KEY_DEFAULT);
2705 }
2706
2707 switch (cmd) {
2708 case SET_KEY:
2709 if (is_default_wep_key)
2710 ret = iwl4965_set_default_wep_key(priv,
2711 vif_priv->ctx, key);
2712 else
2713 ret = iwl4965_set_dynamic_key(priv, vif_priv->ctx,
2714 key, sta_id);
2715
2716 IWL_DEBUG_MAC80211(priv, "enable hwcrypto key\n");
2717 break;
2718 case DISABLE_KEY:
2719 if (is_default_wep_key)
2720 ret = iwl4965_remove_default_wep_key(priv, ctx, key);
2721 else
2722 ret = iwl4965_remove_dynamic_key(priv, ctx,
2723 key, sta_id);
2724
2725 IWL_DEBUG_MAC80211(priv, "disable hwcrypto key\n");
2726 break;
2727 default:
2728 ret = -EINVAL;
2729 }
2730
2731 mutex_unlock(&priv->mutex);
2732 IWL_DEBUG_MAC80211(priv, "leave\n");
2733
2734 return ret;
2735}
2736
2737int iwl4965_mac_ampdu_action(struct ieee80211_hw *hw,
2738 struct ieee80211_vif *vif,
2739 enum ieee80211_ampdu_mlme_action action,
2740 struct ieee80211_sta *sta, u16 tid, u16 *ssn,
2741 u8 buf_size)
2742{
2743 struct iwl_priv *priv = hw->priv;
2744 int ret = -EINVAL;
2745
2746 IWL_DEBUG_HT(priv, "A-MPDU action on addr %pM tid %d\n",
2747 sta->addr, tid);
2748
2749 if (!(priv->cfg->sku & IWL_SKU_N))
2750 return -EACCES;
2751
2752 mutex_lock(&priv->mutex);
2753
2754 switch (action) {
2755 case IEEE80211_AMPDU_RX_START:
2756 IWL_DEBUG_HT(priv, "start Rx\n");
2757 ret = iwl4965_sta_rx_agg_start(priv, sta, tid, *ssn);
2758 break;
2759 case IEEE80211_AMPDU_RX_STOP:
2760 IWL_DEBUG_HT(priv, "stop Rx\n");
2761 ret = iwl4965_sta_rx_agg_stop(priv, sta, tid);
2762 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
2763 ret = 0;
2764 break;
2765 case IEEE80211_AMPDU_TX_START:
2766 IWL_DEBUG_HT(priv, "start Tx\n");
2767 ret = iwl4965_tx_agg_start(priv, vif, sta, tid, ssn);
2768 if (ret == 0) {
2769 priv->_4965.agg_tids_count++;
2770 IWL_DEBUG_HT(priv, "priv->_4965.agg_tids_count = %u\n",
2771 priv->_4965.agg_tids_count);
2772 }
2773 break;
2774 case IEEE80211_AMPDU_TX_STOP:
2775 IWL_DEBUG_HT(priv, "stop Tx\n");
2776 ret = iwl4965_tx_agg_stop(priv, vif, sta, tid);
2777 if ((ret == 0) && (priv->_4965.agg_tids_count > 0)) {
2778 priv->_4965.agg_tids_count--;
2779 IWL_DEBUG_HT(priv, "priv->_4965.agg_tids_count = %u\n",
2780 priv->_4965.agg_tids_count);
2781 }
2782 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
2783 ret = 0;
2784 break;
2785 case IEEE80211_AMPDU_TX_OPERATIONAL:
2786 ret = 0;
2787 break;
2788 }
2789 mutex_unlock(&priv->mutex);
2790
2791 return ret;
2792}
2793
2794int iwl4965_mac_sta_add(struct ieee80211_hw *hw,
2795 struct ieee80211_vif *vif,
2796 struct ieee80211_sta *sta)
2797{
2798 struct iwl_priv *priv = hw->priv;
2799 struct iwl_station_priv *sta_priv = (void *)sta->drv_priv;
2800 struct iwl_vif_priv *vif_priv = (void *)vif->drv_priv;
2801 bool is_ap = vif->type == NL80211_IFTYPE_STATION;
2802 int ret;
2803 u8 sta_id;
2804
2805 IWL_DEBUG_INFO(priv, "received request to add station %pM\n",
2806 sta->addr);
2807 mutex_lock(&priv->mutex);
2808 IWL_DEBUG_INFO(priv, "proceeding to add station %pM\n",
2809 sta->addr);
2810 sta_priv->common.sta_id = IWL_INVALID_STATION;
2811
2812 atomic_set(&sta_priv->pending_frames, 0);
2813
2814 ret = iwl_legacy_add_station_common(priv, vif_priv->ctx, sta->addr,
2815 is_ap, sta, &sta_id);
2816 if (ret) {
2817 IWL_ERR(priv, "Unable to add station %pM (%d)\n",
2818 sta->addr, ret);
2819 /* Should we return success if return code is EEXIST ? */
2820 mutex_unlock(&priv->mutex);
2821 return ret;
2822 }
2823
2824 sta_priv->common.sta_id = sta_id;
2825
2826 /* Initialize rate scaling */
2827 IWL_DEBUG_INFO(priv, "Initializing rate scaling for station %pM\n",
2828 sta->addr);
2829 iwl4965_rs_rate_init(priv, sta, sta_id);
2830 mutex_unlock(&priv->mutex);
2831
2832 return 0;
2833}
2834
2835void iwl4965_mac_channel_switch(struct ieee80211_hw *hw,
2836 struct ieee80211_channel_switch *ch_switch)
2837{
2838 struct iwl_priv *priv = hw->priv;
2839 const struct iwl_channel_info *ch_info;
2840 struct ieee80211_conf *conf = &hw->conf;
2841 struct ieee80211_channel *channel = ch_switch->channel;
2842 struct iwl_ht_config *ht_conf = &priv->current_ht_config;
2843
2844 struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
2845 u16 ch;
2846 unsigned long flags = 0;
2847
2848 IWL_DEBUG_MAC80211(priv, "enter\n");
2849
2850 if (iwl_legacy_is_rfkill(priv))
2851 goto out_exit;
2852
2853 if (test_bit(STATUS_EXIT_PENDING, &priv->status) ||
2854 test_bit(STATUS_SCANNING, &priv->status))
2855 goto out_exit;
2856
2857 if (!iwl_legacy_is_associated_ctx(ctx))
2858 goto out_exit;
2859
2860 /* channel switch in progress */
2861 if (priv->switch_rxon.switch_in_progress == true)
2862 goto out_exit;
2863
2864 mutex_lock(&priv->mutex);
2865 if (priv->cfg->ops->lib->set_channel_switch) {
2866
2867 ch = channel->hw_value;
2868 if (le16_to_cpu(ctx->active.channel) != ch) {
2869 ch_info = iwl_legacy_get_channel_info(priv,
2870 channel->band,
2871 ch);
2872 if (!iwl_legacy_is_channel_valid(ch_info)) {
2873 IWL_DEBUG_MAC80211(priv, "invalid channel\n");
2874 goto out;
2875 }
2876 spin_lock_irqsave(&priv->lock, flags);
2877
2878 priv->current_ht_config.smps = conf->smps_mode;
2879
2880 /* Configure HT40 channels */
2881 ctx->ht.enabled = conf_is_ht(conf);
2882 if (ctx->ht.enabled) {
2883 if (conf_is_ht40_minus(conf)) {
2884 ctx->ht.extension_chan_offset =
2885 IEEE80211_HT_PARAM_CHA_SEC_BELOW;
2886 ctx->ht.is_40mhz = true;
2887 } else if (conf_is_ht40_plus(conf)) {
2888 ctx->ht.extension_chan_offset =
2889 IEEE80211_HT_PARAM_CHA_SEC_ABOVE;
2890 ctx->ht.is_40mhz = true;
2891 } else {
2892 ctx->ht.extension_chan_offset =
2893 IEEE80211_HT_PARAM_CHA_SEC_NONE;
2894 ctx->ht.is_40mhz = false;
2895 }
2896 } else
2897 ctx->ht.is_40mhz = false;
2898
2899 if ((le16_to_cpu(ctx->staging.channel) != ch))
2900 ctx->staging.flags = 0;
2901
2902 iwl_legacy_set_rxon_channel(priv, channel, ctx);
2903 iwl_legacy_set_rxon_ht(priv, ht_conf);
2904 iwl_legacy_set_flags_for_band(priv, ctx, channel->band,
2905 ctx->vif);
2906 spin_unlock_irqrestore(&priv->lock, flags);
2907
2908 iwl_legacy_set_rate(priv);
2909 /*
2910 * at this point, staging_rxon has the
2911 * configuration for channel switch
2912 */
2913 if (priv->cfg->ops->lib->set_channel_switch(priv,
2914 ch_switch))
2915 priv->switch_rxon.switch_in_progress = false;
2916 }
2917 }
2918out:
2919 mutex_unlock(&priv->mutex);
2920out_exit:
2921 if (!priv->switch_rxon.switch_in_progress)
2922 ieee80211_chswitch_done(ctx->vif, false);
2923 IWL_DEBUG_MAC80211(priv, "leave\n");
2924}
2925
2926void iwl4965_configure_filter(struct ieee80211_hw *hw,
2927 unsigned int changed_flags,
2928 unsigned int *total_flags,
2929 u64 multicast)
2930{
2931 struct iwl_priv *priv = hw->priv;
2932 __le32 filter_or = 0, filter_nand = 0;
2933 struct iwl_rxon_context *ctx;
2934
2935#define CHK(test, flag) do { \
2936 if (*total_flags & (test)) \
2937 filter_or |= (flag); \
2938 else \
2939 filter_nand |= (flag); \
2940 } while (0)
2941
2942 IWL_DEBUG_MAC80211(priv, "Enter: changed: 0x%x, total: 0x%x\n",
2943 changed_flags, *total_flags);
2944
2945 CHK(FIF_OTHER_BSS | FIF_PROMISC_IN_BSS, RXON_FILTER_PROMISC_MSK);
2946 /* Setting _just_ RXON_FILTER_CTL2HOST_MSK causes FH errors */
2947 CHK(FIF_CONTROL, RXON_FILTER_CTL2HOST_MSK | RXON_FILTER_PROMISC_MSK);
2948 CHK(FIF_BCN_PRBRESP_PROMISC, RXON_FILTER_BCON_AWARE_MSK);
2949
2950#undef CHK
2951
2952 mutex_lock(&priv->mutex);
2953
2954 for_each_context(priv, ctx) {
2955 ctx->staging.filter_flags &= ~filter_nand;
2956 ctx->staging.filter_flags |= filter_or;
2957
2958 /*
2959 * Not committing directly because hardware can perform a scan,
2960 * but we'll eventually commit the filter flags change anyway.
2961 */
2962 }
2963
2964 mutex_unlock(&priv->mutex);
2965
2966 /*
2967 * Receiving all multicast frames is always enabled by the
2968 * default flags setup in iwl_legacy_connection_init_rx_config()
2969 * since we currently do not support programming multicast
2970 * filters into the device.
2971 */
2972 *total_flags &= FIF_OTHER_BSS | FIF_ALLMULTI | FIF_PROMISC_IN_BSS |
2973 FIF_BCN_PRBRESP_PROMISC | FIF_CONTROL;
2974}
2975
2976/*****************************************************************************
2977 *
2978 * driver setup and teardown
2979 *
2980 *****************************************************************************/
2981
2982static void iwl4965_bg_txpower_work(struct work_struct *work)
2983{
2984 struct iwl_priv *priv = container_of(work, struct iwl_priv,
2985 txpower_work);
2986
2987 /* If a scan happened to start before we got here
2988 * then just return; the statistics notification will
2989 * kick off another scheduled work to compensate for
2990 * any temperature delta we missed here. */
2991 if (test_bit(STATUS_EXIT_PENDING, &priv->status) ||
2992 test_bit(STATUS_SCANNING, &priv->status))
2993 return;
2994
2995 mutex_lock(&priv->mutex);
2996
2997 /* Regardless of if we are associated, we must reconfigure the
2998 * TX power since frames can be sent on non-radar channels while
2999 * not associated */
3000 priv->cfg->ops->lib->send_tx_power(priv);
3001
3002 /* Update last_temperature to keep is_calib_needed from running
3003 * when it isn't needed... */
3004 priv->last_temperature = priv->temperature;
3005
3006 mutex_unlock(&priv->mutex);
3007}
3008
3009static void iwl4965_setup_deferred_work(struct iwl_priv *priv)
3010{
3011 priv->workqueue = create_singlethread_workqueue(DRV_NAME);
3012
3013 init_waitqueue_head(&priv->wait_command_queue);
3014
3015 INIT_WORK(&priv->restart, iwl4965_bg_restart);
3016 INIT_WORK(&priv->rx_replenish, iwl4965_bg_rx_replenish);
3017 INIT_WORK(&priv->run_time_calib_work, iwl4965_bg_run_time_calib_work);
3018 INIT_DELAYED_WORK(&priv->init_alive_start, iwl4965_bg_init_alive_start);
3019 INIT_DELAYED_WORK(&priv->alive_start, iwl4965_bg_alive_start);
3020
3021 iwl_legacy_setup_scan_deferred_work(priv);
3022
3023 INIT_WORK(&priv->txpower_work, iwl4965_bg_txpower_work);
3024
3025 init_timer(&priv->statistics_periodic);
3026 priv->statistics_periodic.data = (unsigned long)priv;
3027 priv->statistics_periodic.function = iwl4965_bg_statistics_periodic;
3028
3029 init_timer(&priv->ucode_trace);
3030 priv->ucode_trace.data = (unsigned long)priv;
3031 priv->ucode_trace.function = iwl4965_bg_ucode_trace;
3032
3033 init_timer(&priv->watchdog);
3034 priv->watchdog.data = (unsigned long)priv;
3035 priv->watchdog.function = iwl_legacy_bg_watchdog;
3036
3037 tasklet_init(&priv->irq_tasklet, (void (*)(unsigned long))
3038 iwl4965_irq_tasklet, (unsigned long)priv);
3039}
3040
3041static void iwl4965_cancel_deferred_work(struct iwl_priv *priv)
3042{
3043 cancel_work_sync(&priv->txpower_work);
3044 cancel_delayed_work_sync(&priv->init_alive_start);
3045 cancel_delayed_work(&priv->alive_start);
3046 cancel_work_sync(&priv->run_time_calib_work);
3047
3048 iwl_legacy_cancel_scan_deferred_work(priv);
3049
3050 del_timer_sync(&priv->statistics_periodic);
3051 del_timer_sync(&priv->ucode_trace);
3052}
3053
3054static void iwl4965_init_hw_rates(struct iwl_priv *priv,
3055 struct ieee80211_rate *rates)
3056{
3057 int i;
3058
3059 for (i = 0; i < IWL_RATE_COUNT_LEGACY; i++) {
3060 rates[i].bitrate = iwlegacy_rates[i].ieee * 5;
3061 rates[i].hw_value = i; /* Rate scaling will work on indexes */
3062 rates[i].hw_value_short = i;
3063 rates[i].flags = 0;
3064 if ((i >= IWL_FIRST_CCK_RATE) && (i <= IWL_LAST_CCK_RATE)) {
3065 /*
3066 * If CCK != 1M then set short preamble rate flag.
3067 */
3068 rates[i].flags |=
3069 (iwlegacy_rates[i].plcp == IWL_RATE_1M_PLCP) ?
3070 0 : IEEE80211_RATE_SHORT_PREAMBLE;
3071 }
3072 }
3073}
3074/*
3075 * Acquire priv->lock before calling this function !
3076 */
3077void iwl4965_set_wr_ptrs(struct iwl_priv *priv, int txq_id, u32 index)
3078{
3079 iwl_legacy_write_direct32(priv, HBUS_TARG_WRPTR,
3080 (index & 0xff) | (txq_id << 8));
3081 iwl_legacy_write_prph(priv, IWL49_SCD_QUEUE_RDPTR(txq_id), index);
3082}
3083
3084void iwl4965_tx_queue_set_status(struct iwl_priv *priv,
3085 struct iwl_tx_queue *txq,
3086 int tx_fifo_id, int scd_retry)
3087{
3088 int txq_id = txq->q.id;
3089
3090 /* Find out whether to activate Tx queue */
3091 int active = test_bit(txq_id, &priv->txq_ctx_active_msk) ? 1 : 0;
3092
3093 /* Set up and activate */
3094 iwl_legacy_write_prph(priv, IWL49_SCD_QUEUE_STATUS_BITS(txq_id),
3095 (active << IWL49_SCD_QUEUE_STTS_REG_POS_ACTIVE) |
3096 (tx_fifo_id << IWL49_SCD_QUEUE_STTS_REG_POS_TXF) |
3097 (scd_retry << IWL49_SCD_QUEUE_STTS_REG_POS_WSL) |
3098 (scd_retry << IWL49_SCD_QUEUE_STTS_REG_POS_SCD_ACK) |
3099 IWL49_SCD_QUEUE_STTS_REG_MSK);
3100
3101 txq->sched_retry = scd_retry;
3102
3103 IWL_DEBUG_INFO(priv, "%s %s Queue %d on AC %d\n",
3104 active ? "Activate" : "Deactivate",
3105 scd_retry ? "BA" : "AC", txq_id, tx_fifo_id);
3106}
3107
3108
3109static int iwl4965_init_drv(struct iwl_priv *priv)
3110{
3111 int ret;
3112
3113 spin_lock_init(&priv->sta_lock);
3114 spin_lock_init(&priv->hcmd_lock);
3115
3116 INIT_LIST_HEAD(&priv->free_frames);
3117
3118 mutex_init(&priv->mutex);
3119 mutex_init(&priv->sync_cmd_mutex);
3120
3121 priv->ieee_channels = NULL;
3122 priv->ieee_rates = NULL;
3123 priv->band = IEEE80211_BAND_2GHZ;
3124
3125 priv->iw_mode = NL80211_IFTYPE_STATION;
3126 priv->current_ht_config.smps = IEEE80211_SMPS_STATIC;
3127 priv->missed_beacon_threshold = IWL_MISSED_BEACON_THRESHOLD_DEF;
3128 priv->_4965.agg_tids_count = 0;
3129
3130 /* initialize force reset */
3131 priv->force_reset[IWL_RF_RESET].reset_duration =
3132 IWL_DELAY_NEXT_FORCE_RF_RESET;
3133 priv->force_reset[IWL_FW_RESET].reset_duration =
3134 IWL_DELAY_NEXT_FORCE_FW_RELOAD;
3135
3136 /* Choose which receivers/antennas to use */
3137 if (priv->cfg->ops->hcmd->set_rxon_chain)
3138 priv->cfg->ops->hcmd->set_rxon_chain(priv,
3139 &priv->contexts[IWL_RXON_CTX_BSS]);
3140
3141 iwl_legacy_init_scan_params(priv);
3142
3143 /* Set the tx_power_user_lmt to the lowest power level
3144 * this value will get overwritten by channel max power avg
3145 * from eeprom */
3146 priv->tx_power_user_lmt = IWL4965_TX_POWER_TARGET_POWER_MIN;
3147 priv->tx_power_next = IWL4965_TX_POWER_TARGET_POWER_MIN;
3148
3149 ret = iwl_legacy_init_channel_map(priv);
3150 if (ret) {
3151 IWL_ERR(priv, "initializing regulatory failed: %d\n", ret);
3152 goto err;
3153 }
3154
3155 ret = iwl_legacy_init_geos(priv);
3156 if (ret) {
3157 IWL_ERR(priv, "initializing geos failed: %d\n", ret);
3158 goto err_free_channel_map;
3159 }
3160 iwl4965_init_hw_rates(priv, priv->ieee_rates);
3161
3162 return 0;
3163
3164err_free_channel_map:
3165 iwl_legacy_free_channel_map(priv);
3166err:
3167 return ret;
3168}
3169
3170static void iwl4965_uninit_drv(struct iwl_priv *priv)
3171{
3172 iwl4965_calib_free_results(priv);
3173 iwl_legacy_free_geos(priv);
3174 iwl_legacy_free_channel_map(priv);
3175 kfree(priv->scan_cmd);
3176}
3177
3178static void iwl4965_hw_detect(struct iwl_priv *priv)
3179{
3180 priv->hw_rev = _iwl_legacy_read32(priv, CSR_HW_REV);
3181 priv->hw_wa_rev = _iwl_legacy_read32(priv, CSR_HW_REV_WA_REG);
3182 pci_read_config_byte(priv->pci_dev, PCI_REVISION_ID, &priv->rev_id);
3183 IWL_DEBUG_INFO(priv, "HW Revision ID = 0x%X\n", priv->rev_id);
3184}
3185
3186static int iwl4965_set_hw_params(struct iwl_priv *priv)
3187{
3188 priv->hw_params.max_rxq_size = RX_QUEUE_SIZE;
3189 priv->hw_params.max_rxq_log = RX_QUEUE_SIZE_LOG;
3190 if (priv->cfg->mod_params->amsdu_size_8K)
3191 priv->hw_params.rx_page_order = get_order(IWL_RX_BUF_SIZE_8K);
3192 else
3193 priv->hw_params.rx_page_order = get_order(IWL_RX_BUF_SIZE_4K);
3194
3195 priv->hw_params.max_beacon_itrvl = IWL_MAX_UCODE_BEACON_INTERVAL;
3196
3197 if (priv->cfg->mod_params->disable_11n)
3198 priv->cfg->sku &= ~IWL_SKU_N;
3199
3200 /* Device-specific setup */
3201 return priv->cfg->ops->lib->set_hw_params(priv);
3202}
3203
3204static const u8 iwl4965_bss_ac_to_fifo[] = {
3205 IWL_TX_FIFO_VO,
3206 IWL_TX_FIFO_VI,
3207 IWL_TX_FIFO_BE,
3208 IWL_TX_FIFO_BK,
3209};
3210
3211static const u8 iwl4965_bss_ac_to_queue[] = {
3212 0, 1, 2, 3,
3213};
3214
3215static int
3216iwl4965_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
3217{
3218 int err = 0, i;
3219 struct iwl_priv *priv;
3220 struct ieee80211_hw *hw;
3221 struct iwl_cfg *cfg = (struct iwl_cfg *)(ent->driver_data);
3222 unsigned long flags;
3223 u16 pci_cmd;
3224
3225 /************************
3226 * 1. Allocating HW data
3227 ************************/
3228
3229 hw = iwl_legacy_alloc_all(cfg);
3230 if (!hw) {
3231 err = -ENOMEM;
3232 goto out;
3233 }
3234 priv = hw->priv;
3235 /* At this point both hw and priv are allocated. */
3236
3237 /*
3238 * The default context is always valid,
3239 * more may be discovered when firmware
3240 * is loaded.
3241 */
3242 priv->valid_contexts = BIT(IWL_RXON_CTX_BSS);
3243
3244 for (i = 0; i < NUM_IWL_RXON_CTX; i++)
3245 priv->contexts[i].ctxid = i;
3246
3247 priv->contexts[IWL_RXON_CTX_BSS].always_active = true;
3248 priv->contexts[IWL_RXON_CTX_BSS].is_active = true;
3249 priv->contexts[IWL_RXON_CTX_BSS].rxon_cmd = REPLY_RXON;
3250 priv->contexts[IWL_RXON_CTX_BSS].rxon_timing_cmd = REPLY_RXON_TIMING;
3251 priv->contexts[IWL_RXON_CTX_BSS].rxon_assoc_cmd = REPLY_RXON_ASSOC;
3252 priv->contexts[IWL_RXON_CTX_BSS].qos_cmd = REPLY_QOS_PARAM;
3253 priv->contexts[IWL_RXON_CTX_BSS].ap_sta_id = IWL_AP_ID;
3254 priv->contexts[IWL_RXON_CTX_BSS].wep_key_cmd = REPLY_WEPKEY;
3255 priv->contexts[IWL_RXON_CTX_BSS].ac_to_fifo = iwl4965_bss_ac_to_fifo;
3256 priv->contexts[IWL_RXON_CTX_BSS].ac_to_queue = iwl4965_bss_ac_to_queue;
3257 priv->contexts[IWL_RXON_CTX_BSS].exclusive_interface_modes =
3258 BIT(NL80211_IFTYPE_ADHOC);
3259 priv->contexts[IWL_RXON_CTX_BSS].interface_modes =
3260 BIT(NL80211_IFTYPE_STATION);
3261 priv->contexts[IWL_RXON_CTX_BSS].ap_devtype = RXON_DEV_TYPE_AP;
3262 priv->contexts[IWL_RXON_CTX_BSS].ibss_devtype = RXON_DEV_TYPE_IBSS;
3263 priv->contexts[IWL_RXON_CTX_BSS].station_devtype = RXON_DEV_TYPE_ESS;
3264 priv->contexts[IWL_RXON_CTX_BSS].unused_devtype = RXON_DEV_TYPE_ESS;
3265
3266 BUILD_BUG_ON(NUM_IWL_RXON_CTX != 1);
3267
3268 SET_IEEE80211_DEV(hw, &pdev->dev);
3269
3270 IWL_DEBUG_INFO(priv, "*** LOAD DRIVER ***\n");
3271 priv->cfg = cfg;
3272 priv->pci_dev = pdev;
3273 priv->inta_mask = CSR_INI_SET_MASK;
3274
3275 if (iwl_legacy_alloc_traffic_mem(priv))
3276 IWL_ERR(priv, "Not enough memory to generate traffic log\n");
3277
3278 /**************************
3279 * 2. Initializing PCI bus
3280 **************************/
3281 pci_disable_link_state(pdev, PCIE_LINK_STATE_L0S | PCIE_LINK_STATE_L1 |
3282 PCIE_LINK_STATE_CLKPM);
3283
3284 if (pci_enable_device(pdev)) {
3285 err = -ENODEV;
3286 goto out_ieee80211_free_hw;
3287 }
3288
3289 pci_set_master(pdev);
3290
3291 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(36));
3292 if (!err)
3293 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(36));
3294 if (err) {
3295 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
3296 if (!err)
3297 err = pci_set_consistent_dma_mask(pdev,
3298 DMA_BIT_MASK(32));
3299 /* both attempts failed: */
3300 if (err) {
3301 IWL_WARN(priv, "No suitable DMA available.\n");
3302 goto out_pci_disable_device;
3303 }
3304 }
3305
3306 err = pci_request_regions(pdev, DRV_NAME);
3307 if (err)
3308 goto out_pci_disable_device;
3309
3310 pci_set_drvdata(pdev, priv);
3311
3312
3313 /***********************
3314 * 3. Read REV register
3315 ***********************/
3316 priv->hw_base = pci_iomap(pdev, 0, 0);
3317 if (!priv->hw_base) {
3318 err = -ENODEV;
3319 goto out_pci_release_regions;
3320 }
3321
3322 IWL_DEBUG_INFO(priv, "pci_resource_len = 0x%08llx\n",
3323 (unsigned long long) pci_resource_len(pdev, 0));
3324 IWL_DEBUG_INFO(priv, "pci_resource_base = %p\n", priv->hw_base);
3325
3326 /* these spin locks will be used in apm_ops.init and EEPROM access
3327 * we should init now
3328 */
3329 spin_lock_init(&priv->reg_lock);
3330 spin_lock_init(&priv->lock);
3331
3332 /*
3333 * stop and reset the on-board processor just in case it is in a
3334 * strange state ... like being left stranded by a primary kernel
3335 * and this is now the kdump kernel trying to start up
3336 */
3337 iwl_write32(priv, CSR_RESET, CSR_RESET_REG_FLAG_NEVO_RESET);
3338
3339 iwl4965_hw_detect(priv);
3340 IWL_INFO(priv, "Detected %s, REV=0x%X\n",
3341 priv->cfg->name, priv->hw_rev);
3342
3343 /* We disable the RETRY_TIMEOUT register (0x41) to keep
3344 * PCI Tx retries from interfering with C3 CPU state */
3345 pci_write_config_byte(pdev, PCI_CFG_RETRY_TIMEOUT, 0x00);
3346
3347 iwl4965_prepare_card_hw(priv);
3348 if (!priv->hw_ready) {
3349 IWL_WARN(priv, "Failed, HW not ready\n");
3350 goto out_iounmap;
3351 }
3352
3353 /*****************
3354 * 4. Read EEPROM
3355 *****************/
3356 /* Read the EEPROM */
3357 err = iwl_legacy_eeprom_init(priv);
3358 if (err) {
3359 IWL_ERR(priv, "Unable to init EEPROM\n");
3360 goto out_iounmap;
3361 }
3362 err = iwl4965_eeprom_check_version(priv);
3363 if (err)
3364 goto out_free_eeprom;
3365
3366 if (err)
3367 goto out_free_eeprom;
3368
3369 /* extract MAC Address */
3370 iwl4965_eeprom_get_mac(priv, priv->addresses[0].addr);
3371 IWL_DEBUG_INFO(priv, "MAC address: %pM\n", priv->addresses[0].addr);
3372 priv->hw->wiphy->addresses = priv->addresses;
3373 priv->hw->wiphy->n_addresses = 1;
3374
3375 /************************
3376 * 5. Setup HW constants
3377 ************************/
3378 if (iwl4965_set_hw_params(priv)) {
3379 IWL_ERR(priv, "failed to set hw parameters\n");
3380 goto out_free_eeprom;
3381 }
3382
3383 /*******************
3384 * 6. Setup priv
3385 *******************/
3386
3387 err = iwl4965_init_drv(priv);
3388 if (err)
3389 goto out_free_eeprom;
3390 /* At this point both hw and priv are initialized. */
3391
3392 /********************
3393 * 7. Setup services
3394 ********************/
3395 spin_lock_irqsave(&priv->lock, flags);
3396 iwl_legacy_disable_interrupts(priv);
3397 spin_unlock_irqrestore(&priv->lock, flags);
3398
3399 pci_enable_msi(priv->pci_dev);
3400
3401 err = request_irq(priv->pci_dev->irq, iwl_legacy_isr,
3402 IRQF_SHARED, DRV_NAME, priv);
3403 if (err) {
3404 IWL_ERR(priv, "Error allocating IRQ %d\n", priv->pci_dev->irq);
3405 goto out_disable_msi;
3406 }
3407
3408 iwl4965_setup_deferred_work(priv);
3409 iwl4965_setup_rx_handlers(priv);
3410
3411 /*********************************************
3412 * 8. Enable interrupts and read RFKILL state
3413 *********************************************/
3414
3415 /* enable interrupts if needed: hw bug w/a */
3416 pci_read_config_word(priv->pci_dev, PCI_COMMAND, &pci_cmd);
3417 if (pci_cmd & PCI_COMMAND_INTX_DISABLE) {
3418 pci_cmd &= ~PCI_COMMAND_INTX_DISABLE;
3419 pci_write_config_word(priv->pci_dev, PCI_COMMAND, pci_cmd);
3420 }
3421
3422 iwl_legacy_enable_interrupts(priv);
3423
3424 /* If platform's RF_KILL switch is NOT set to KILL */
3425 if (iwl_read32(priv, CSR_GP_CNTRL) &
3426 CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW)
3427 clear_bit(STATUS_RF_KILL_HW, &priv->status);
3428 else
3429 set_bit(STATUS_RF_KILL_HW, &priv->status);
3430
3431 wiphy_rfkill_set_hw_state(priv->hw->wiphy,
3432 test_bit(STATUS_RF_KILL_HW, &priv->status));
3433
3434 iwl_legacy_power_initialize(priv);
3435
3436 init_completion(&priv->_4965.firmware_loading_complete);
3437
3438 err = iwl4965_request_firmware(priv, true);
3439 if (err)
3440 goto out_destroy_workqueue;
3441
3442 return 0;
3443
3444 out_destroy_workqueue:
3445 destroy_workqueue(priv->workqueue);
3446 priv->workqueue = NULL;
3447 free_irq(priv->pci_dev->irq, priv);
3448 out_disable_msi:
3449 pci_disable_msi(priv->pci_dev);
3450 iwl4965_uninit_drv(priv);
3451 out_free_eeprom:
3452 iwl_legacy_eeprom_free(priv);
3453 out_iounmap:
3454 pci_iounmap(pdev, priv->hw_base);
3455 out_pci_release_regions:
3456 pci_set_drvdata(pdev, NULL);
3457 pci_release_regions(pdev);
3458 out_pci_disable_device:
3459 pci_disable_device(pdev);
3460 out_ieee80211_free_hw:
3461 iwl_legacy_free_traffic_mem(priv);
3462 ieee80211_free_hw(priv->hw);
3463 out:
3464 return err;
3465}
3466
3467static void __devexit iwl4965_pci_remove(struct pci_dev *pdev)
3468{
3469 struct iwl_priv *priv = pci_get_drvdata(pdev);
3470 unsigned long flags;
3471
3472 if (!priv)
3473 return;
3474
3475 wait_for_completion(&priv->_4965.firmware_loading_complete);
3476
3477 IWL_DEBUG_INFO(priv, "*** UNLOAD DRIVER ***\n");
3478
3479 iwl_legacy_dbgfs_unregister(priv);
3480 sysfs_remove_group(&pdev->dev.kobj, &iwl_attribute_group);
3481
3482 /* ieee80211_unregister_hw call wil cause iwl_mac_stop to
3483 * to be called and iwl4965_down since we are removing the device
3484 * we need to set STATUS_EXIT_PENDING bit.
3485 */
3486 set_bit(STATUS_EXIT_PENDING, &priv->status);
3487
3488 iwl_legacy_leds_exit(priv);
3489
3490 if (priv->mac80211_registered) {
3491 ieee80211_unregister_hw(priv->hw);
3492 priv->mac80211_registered = 0;
3493 } else {
3494 iwl4965_down(priv);
3495 }
3496
3497 /*
3498 * Make sure device is reset to low power before unloading driver.
3499 * This may be redundant with iwl4965_down(), but there are paths to
3500 * run iwl4965_down() without calling apm_ops.stop(), and there are
3501 * paths to avoid running iwl4965_down() at all before leaving driver.
3502 * This (inexpensive) call *makes sure* device is reset.
3503 */
3504 iwl_legacy_apm_stop(priv);
3505
3506 /* make sure we flush any pending irq or
3507 * tasklet for the driver
3508 */
3509 spin_lock_irqsave(&priv->lock, flags);
3510 iwl_legacy_disable_interrupts(priv);
3511 spin_unlock_irqrestore(&priv->lock, flags);
3512
3513 iwl4965_synchronize_irq(priv);
3514
3515 iwl4965_dealloc_ucode_pci(priv);
3516
3517 if (priv->rxq.bd)
3518 iwl4965_rx_queue_free(priv, &priv->rxq);
3519 iwl4965_hw_txq_ctx_free(priv);
3520
3521 iwl_legacy_eeprom_free(priv);
3522
3523
3524 /*netif_stop_queue(dev); */
3525 flush_workqueue(priv->workqueue);
3526
3527 /* ieee80211_unregister_hw calls iwl_mac_stop, which flushes
3528 * priv->workqueue... so we can't take down the workqueue
3529 * until now... */
3530 destroy_workqueue(priv->workqueue);
3531 priv->workqueue = NULL;
3532 iwl_legacy_free_traffic_mem(priv);
3533
3534 free_irq(priv->pci_dev->irq, priv);
3535 pci_disable_msi(priv->pci_dev);
3536 pci_iounmap(pdev, priv->hw_base);
3537 pci_release_regions(pdev);
3538 pci_disable_device(pdev);
3539 pci_set_drvdata(pdev, NULL);
3540
3541 iwl4965_uninit_drv(priv);
3542
3543 dev_kfree_skb(priv->beacon_skb);
3544
3545 ieee80211_free_hw(priv->hw);
3546}
3547
3548/*
3549 * Activate/Deactivate Tx DMA/FIFO channels according tx fifos mask
3550 * must be called under priv->lock and mac access
3551 */
3552void iwl4965_txq_set_sched(struct iwl_priv *priv, u32 mask)
3553{
3554 iwl_legacy_write_prph(priv, IWL49_SCD_TXFACT, mask);
3555}
3556
3557/*****************************************************************************
3558 *
3559 * driver and module entry point
3560 *
3561 *****************************************************************************/
3562
3563/* Hardware specific file defines the PCI IDs table for that hardware module */
3564static DEFINE_PCI_DEVICE_TABLE(iwl4965_hw_card_ids) = {
3565#if defined(CONFIG_IWL4965_MODULE) || defined(CONFIG_IWL4965)
3566 {IWL_PCI_DEVICE(0x4229, PCI_ANY_ID, iwl4965_cfg)},
3567 {IWL_PCI_DEVICE(0x4230, PCI_ANY_ID, iwl4965_cfg)},
3568#endif /* CONFIG_IWL4965 */
3569
3570 {0}
3571};
3572MODULE_DEVICE_TABLE(pci, iwl4965_hw_card_ids);
3573
3574static struct pci_driver iwl4965_driver = {
3575 .name = DRV_NAME,
3576 .id_table = iwl4965_hw_card_ids,
3577 .probe = iwl4965_pci_probe,
3578 .remove = __devexit_p(iwl4965_pci_remove),
3579 .driver.pm = IWL_LEGACY_PM_OPS,
3580};
3581
3582static int __init iwl4965_init(void)
3583{
3584
3585 int ret;
3586 pr_info(DRV_DESCRIPTION ", " DRV_VERSION "\n");
3587 pr_info(DRV_COPYRIGHT "\n");
3588
3589 ret = iwl4965_rate_control_register();
3590 if (ret) {
3591 pr_err("Unable to register rate control algorithm: %d\n", ret);
3592 return ret;
3593 }
3594
3595 ret = pci_register_driver(&iwl4965_driver);
3596 if (ret) {
3597 pr_err("Unable to initialize PCI module\n");
3598 goto error_register;
3599 }
3600
3601 return ret;
3602
3603error_register:
3604 iwl4965_rate_control_unregister();
3605 return ret;
3606}
3607
3608static void __exit iwl4965_exit(void)
3609{
3610 pci_unregister_driver(&iwl4965_driver);
3611 iwl4965_rate_control_unregister();
3612}
3613
3614module_exit(iwl4965_exit);
3615module_init(iwl4965_init);
3616
3617#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
3618module_param_named(debug, iwlegacy_debug_level, uint, S_IRUGO | S_IWUSR);
3619MODULE_PARM_DESC(debug, "debug output mask");
3620#endif
3621
3622module_param_named(swcrypto, iwl4965_mod_params.sw_crypto, int, S_IRUGO);
3623MODULE_PARM_DESC(swcrypto, "using crypto in software (default 0 [hardware])");
3624module_param_named(queues_num, iwl4965_mod_params.num_of_queues, int, S_IRUGO);
3625MODULE_PARM_DESC(queues_num, "number of hw queues.");
3626module_param_named(11n_disable, iwl4965_mod_params.disable_11n, int, S_IRUGO);
3627MODULE_PARM_DESC(11n_disable, "disable 11n functionality");
3628module_param_named(amsdu_size_8K, iwl4965_mod_params.amsdu_size_8K,
3629 int, S_IRUGO);
3630MODULE_PARM_DESC(amsdu_size_8K, "enable 8K amsdu size");
3631module_param_named(fw_restart, iwl4965_mod_params.restart_fw, int, S_IRUGO);
3632MODULE_PARM_DESC(fw_restart, "restart firmware in case of error");