aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/wireless/intel
diff options
context:
space:
mode:
authorKalle Valo <kvalo@codeaurora.org>2015-11-17 13:37:11 -0500
committerKalle Valo <kvalo@codeaurora.org>2015-11-18 07:28:30 -0500
commit7ac9a364c1721a863ecc6cc9aba66e10114908db (patch)
tree2b83df4b655ea3494e95d9adb8d89ff539ecd688 /drivers/net/wireless/intel
parent367a1092b555f4372a556ddb53970d25061c74d1 (diff)
iwlegacy: move under intel directory
Part of reorganising wireless drivers directory and Kconfig. Signed-off-by: Kalle Valo <kvalo@codeaurora.org>
Diffstat (limited to 'drivers/net/wireless/intel')
-rw-r--r--drivers/net/wireless/intel/Kconfig1
-rw-r--r--drivers/net/wireless/intel/Makefile2
-rw-r--r--drivers/net/wireless/intel/iwlegacy/3945-debug.c511
-rw-r--r--drivers/net/wireless/intel/iwlegacy/3945-mac.c3959
-rw-r--r--drivers/net/wireless/intel/iwlegacy/3945-rs.c979
-rw-r--r--drivers/net/wireless/intel/iwlegacy/3945.c2741
-rw-r--r--drivers/net/wireless/intel/iwlegacy/3945.h593
-rw-r--r--drivers/net/wireless/intel/iwlegacy/4965-calib.c934
-rw-r--r--drivers/net/wireless/intel/iwlegacy/4965-debug.c752
-rw-r--r--drivers/net/wireless/intel/iwlegacy/4965-mac.c6868
-rw-r--r--drivers/net/wireless/intel/iwlegacy/4965-rs.c2835
-rw-r--r--drivers/net/wireless/intel/iwlegacy/4965.c1950
-rw-r--r--drivers/net/wireless/intel/iwlegacy/4965.h1285
-rw-r--r--drivers/net/wireless/intel/iwlegacy/Kconfig100
-rw-r--r--drivers/net/wireless/intel/iwlegacy/Makefile17
-rw-r--r--drivers/net/wireless/intel/iwlegacy/commands.h3370
-rw-r--r--drivers/net/wireless/intel/iwlegacy/common.c5586
-rw-r--r--drivers/net/wireless/intel/iwlegacy/common.h3084
-rw-r--r--drivers/net/wireless/intel/iwlegacy/csr.h419
-rw-r--r--drivers/net/wireless/intel/iwlegacy/debug.c1426
-rw-r--r--drivers/net/wireless/intel/iwlegacy/iwl-spectrum.h92
-rw-r--r--drivers/net/wireless/intel/iwlegacy/prph.h522
22 files changed, 38026 insertions, 0 deletions
diff --git a/drivers/net/wireless/intel/Kconfig b/drivers/net/wireless/intel/Kconfig
index 3f8eacc4209a..0a7cd61e528c 100644
--- a/drivers/net/wireless/intel/Kconfig
+++ b/drivers/net/wireless/intel/Kconfig
@@ -12,5 +12,6 @@ config WLAN_VENDOR_INTEL
12if WLAN_VENDOR_INTEL 12if WLAN_VENDOR_INTEL
13 13
14source "drivers/net/wireless/intel/ipw2x00/Kconfig" 14source "drivers/net/wireless/intel/ipw2x00/Kconfig"
15source "drivers/net/wireless/intel/iwlegacy/Kconfig"
15 16
16endif # WLAN_VENDOR_INTEL 17endif # WLAN_VENDOR_INTEL
diff --git a/drivers/net/wireless/intel/Makefile b/drivers/net/wireless/intel/Makefile
index 8e5dcb2d425e..cec507d3c6bf 100644
--- a/drivers/net/wireless/intel/Makefile
+++ b/drivers/net/wireless/intel/Makefile
@@ -1,2 +1,4 @@
1obj-$(CONFIG_IPW2100) += ipw2x00/ 1obj-$(CONFIG_IPW2100) += ipw2x00/
2obj-$(CONFIG_IPW2200) += ipw2x00/ 2obj-$(CONFIG_IPW2200) += ipw2x00/
3
4obj-$(CONFIG_IWLEGACY) += iwlegacy/
diff --git a/drivers/net/wireless/intel/iwlegacy/3945-debug.c b/drivers/net/wireless/intel/iwlegacy/3945-debug.c
new file mode 100644
index 000000000000..c1b4441fb8b2
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlegacy/3945-debug.c
@@ -0,0 +1,511 @@
1/******************************************************************************
2 *
3 * GPL LICENSE SUMMARY
4 *
5 * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of version 2 of the GNU General Public License as
9 * published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful, but
12 * WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
19 * USA
20 *
21 * The full GNU General Public License is included in this distribution
22 * in the file called LICENSE.GPL.
23 *
24 * Contact Information:
25 * Intel Linux Wireless <ilw@linux.intel.com>
26 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
27 *****************************************************************************/
28
29#include "common.h"
30#include "3945.h"
31
32static int
33il3945_stats_flag(struct il_priv *il, char *buf, int bufsz)
34{
35 int p = 0;
36
37 p += scnprintf(buf + p, bufsz - p, "Statistics Flag(0x%X):\n",
38 le32_to_cpu(il->_3945.stats.flag));
39 if (le32_to_cpu(il->_3945.stats.flag) & UCODE_STATS_CLEAR_MSK)
40 p += scnprintf(buf + p, bufsz - p,
41 "\tStatistics have been cleared\n");
42 p += scnprintf(buf + p, bufsz - p, "\tOperational Frequency: %s\n",
43 (le32_to_cpu(il->_3945.stats.flag) &
44 UCODE_STATS_FREQUENCY_MSK) ? "2.4 GHz" : "5.2 GHz");
45 p += scnprintf(buf + p, bufsz - p, "\tTGj Narrow Band: %s\n",
46 (le32_to_cpu(il->_3945.stats.flag) &
47 UCODE_STATS_NARROW_BAND_MSK) ? "enabled" : "disabled");
48 return p;
49}
50
51static ssize_t
52il3945_ucode_rx_stats_read(struct file *file, char __user *user_buf,
53 size_t count, loff_t *ppos)
54{
55 struct il_priv *il = file->private_data;
56 int pos = 0;
57 char *buf;
58 int bufsz =
59 sizeof(struct iwl39_stats_rx_phy) * 40 +
60 sizeof(struct iwl39_stats_rx_non_phy) * 40 + 400;
61 ssize_t ret;
62 struct iwl39_stats_rx_phy *ofdm, *accum_ofdm, *delta_ofdm, *max_ofdm;
63 struct iwl39_stats_rx_phy *cck, *accum_cck, *delta_cck, *max_cck;
64 struct iwl39_stats_rx_non_phy *general, *accum_general;
65 struct iwl39_stats_rx_non_phy *delta_general, *max_general;
66
67 if (!il_is_alive(il))
68 return -EAGAIN;
69
70 buf = kzalloc(bufsz, GFP_KERNEL);
71 if (!buf) {
72 IL_ERR("Can not allocate Buffer\n");
73 return -ENOMEM;
74 }
75
76 /*
77 * The statistic information display here is based on
78 * the last stats notification from uCode
79 * might not reflect the current uCode activity
80 */
81 ofdm = &il->_3945.stats.rx.ofdm;
82 cck = &il->_3945.stats.rx.cck;
83 general = &il->_3945.stats.rx.general;
84 accum_ofdm = &il->_3945.accum_stats.rx.ofdm;
85 accum_cck = &il->_3945.accum_stats.rx.cck;
86 accum_general = &il->_3945.accum_stats.rx.general;
87 delta_ofdm = &il->_3945.delta_stats.rx.ofdm;
88 delta_cck = &il->_3945.delta_stats.rx.cck;
89 delta_general = &il->_3945.delta_stats.rx.general;
90 max_ofdm = &il->_3945.max_delta.rx.ofdm;
91 max_cck = &il->_3945.max_delta.rx.cck;
92 max_general = &il->_3945.max_delta.rx.general;
93
94 pos += il3945_stats_flag(il, buf, bufsz);
95 pos +=
96 scnprintf(buf + pos, bufsz - pos,
97 "%-32s current"
98 "acumulative delta max\n",
99 "Statistics_Rx - OFDM:");
100 pos +=
101 scnprintf(buf + pos, bufsz - pos,
102 " %-30s %10u %10u %10u %10u\n", "ina_cnt:",
103 le32_to_cpu(ofdm->ina_cnt), accum_ofdm->ina_cnt,
104 delta_ofdm->ina_cnt, max_ofdm->ina_cnt);
105 pos +=
106 scnprintf(buf + pos, bufsz - pos,
107 " %-30s %10u %10u %10u %10u\n", "fina_cnt:",
108 le32_to_cpu(ofdm->fina_cnt), accum_ofdm->fina_cnt,
109 delta_ofdm->fina_cnt, max_ofdm->fina_cnt);
110 pos +=
111 scnprintf(buf + pos, bufsz - pos,
112 " %-30s %10u %10u %10u %10u\n", "plcp_err:",
113 le32_to_cpu(ofdm->plcp_err), accum_ofdm->plcp_err,
114 delta_ofdm->plcp_err, max_ofdm->plcp_err);
115 pos +=
116 scnprintf(buf + pos, bufsz - pos,
117 " %-30s %10u %10u %10u %10u\n", "crc32_err:",
118 le32_to_cpu(ofdm->crc32_err), accum_ofdm->crc32_err,
119 delta_ofdm->crc32_err, max_ofdm->crc32_err);
120 pos +=
121 scnprintf(buf + pos, bufsz - pos,
122 " %-30s %10u %10u %10u %10u\n", "overrun_err:",
123 le32_to_cpu(ofdm->overrun_err), accum_ofdm->overrun_err,
124 delta_ofdm->overrun_err, max_ofdm->overrun_err);
125 pos +=
126 scnprintf(buf + pos, bufsz - pos,
127 " %-30s %10u %10u %10u %10u\n", "early_overrun_err:",
128 le32_to_cpu(ofdm->early_overrun_err),
129 accum_ofdm->early_overrun_err,
130 delta_ofdm->early_overrun_err,
131 max_ofdm->early_overrun_err);
132 pos +=
133 scnprintf(buf + pos, bufsz - pos,
134 " %-30s %10u %10u %10u %10u\n", "crc32_good:",
135 le32_to_cpu(ofdm->crc32_good), accum_ofdm->crc32_good,
136 delta_ofdm->crc32_good, max_ofdm->crc32_good);
137 pos +=
138 scnprintf(buf + pos, bufsz - pos,
139 " %-30s %10u %10u %10u %10u\n", "false_alarm_cnt:",
140 le32_to_cpu(ofdm->false_alarm_cnt),
141 accum_ofdm->false_alarm_cnt, delta_ofdm->false_alarm_cnt,
142 max_ofdm->false_alarm_cnt);
143 pos +=
144 scnprintf(buf + pos, bufsz - pos,
145 " %-30s %10u %10u %10u %10u\n", "fina_sync_err_cnt:",
146 le32_to_cpu(ofdm->fina_sync_err_cnt),
147 accum_ofdm->fina_sync_err_cnt,
148 delta_ofdm->fina_sync_err_cnt,
149 max_ofdm->fina_sync_err_cnt);
150 pos +=
151 scnprintf(buf + pos, bufsz - pos,
152 " %-30s %10u %10u %10u %10u\n", "sfd_timeout:",
153 le32_to_cpu(ofdm->sfd_timeout), accum_ofdm->sfd_timeout,
154 delta_ofdm->sfd_timeout, max_ofdm->sfd_timeout);
155 pos +=
156 scnprintf(buf + pos, bufsz - pos,
157 " %-30s %10u %10u %10u %10u\n", "fina_timeout:",
158 le32_to_cpu(ofdm->fina_timeout), accum_ofdm->fina_timeout,
159 delta_ofdm->fina_timeout, max_ofdm->fina_timeout);
160 pos +=
161 scnprintf(buf + pos, bufsz - pos,
162 " %-30s %10u %10u %10u %10u\n", "unresponded_rts:",
163 le32_to_cpu(ofdm->unresponded_rts),
164 accum_ofdm->unresponded_rts, delta_ofdm->unresponded_rts,
165 max_ofdm->unresponded_rts);
166 pos +=
167 scnprintf(buf + pos, bufsz - pos,
168 " %-30s %10u %10u %10u %10u\n",
169 "rxe_frame_lmt_ovrun:",
170 le32_to_cpu(ofdm->rxe_frame_limit_overrun),
171 accum_ofdm->rxe_frame_limit_overrun,
172 delta_ofdm->rxe_frame_limit_overrun,
173 max_ofdm->rxe_frame_limit_overrun);
174 pos +=
175 scnprintf(buf + pos, bufsz - pos,
176 " %-30s %10u %10u %10u %10u\n", "sent_ack_cnt:",
177 le32_to_cpu(ofdm->sent_ack_cnt), accum_ofdm->sent_ack_cnt,
178 delta_ofdm->sent_ack_cnt, max_ofdm->sent_ack_cnt);
179 pos +=
180 scnprintf(buf + pos, bufsz - pos,
181 " %-30s %10u %10u %10u %10u\n", "sent_cts_cnt:",
182 le32_to_cpu(ofdm->sent_cts_cnt), accum_ofdm->sent_cts_cnt,
183 delta_ofdm->sent_cts_cnt, max_ofdm->sent_cts_cnt);
184
185 pos +=
186 scnprintf(buf + pos, bufsz - pos,
187 "%-32s current"
188 "acumulative delta max\n",
189 "Statistics_Rx - CCK:");
190 pos +=
191 scnprintf(buf + pos, bufsz - pos,
192 " %-30s %10u %10u %10u %10u\n", "ina_cnt:",
193 le32_to_cpu(cck->ina_cnt), accum_cck->ina_cnt,
194 delta_cck->ina_cnt, max_cck->ina_cnt);
195 pos +=
196 scnprintf(buf + pos, bufsz - pos,
197 " %-30s %10u %10u %10u %10u\n", "fina_cnt:",
198 le32_to_cpu(cck->fina_cnt), accum_cck->fina_cnt,
199 delta_cck->fina_cnt, max_cck->fina_cnt);
200 pos +=
201 scnprintf(buf + pos, bufsz - pos,
202 " %-30s %10u %10u %10u %10u\n", "plcp_err:",
203 le32_to_cpu(cck->plcp_err), accum_cck->plcp_err,
204 delta_cck->plcp_err, max_cck->plcp_err);
205 pos +=
206 scnprintf(buf + pos, bufsz - pos,
207 " %-30s %10u %10u %10u %10u\n", "crc32_err:",
208 le32_to_cpu(cck->crc32_err), accum_cck->crc32_err,
209 delta_cck->crc32_err, max_cck->crc32_err);
210 pos +=
211 scnprintf(buf + pos, bufsz - pos,
212 " %-30s %10u %10u %10u %10u\n", "overrun_err:",
213 le32_to_cpu(cck->overrun_err), accum_cck->overrun_err,
214 delta_cck->overrun_err, max_cck->overrun_err);
215 pos +=
216 scnprintf(buf + pos, bufsz - pos,
217 " %-30s %10u %10u %10u %10u\n", "early_overrun_err:",
218 le32_to_cpu(cck->early_overrun_err),
219 accum_cck->early_overrun_err,
220 delta_cck->early_overrun_err, max_cck->early_overrun_err);
221 pos +=
222 scnprintf(buf + pos, bufsz - pos,
223 " %-30s %10u %10u %10u %10u\n", "crc32_good:",
224 le32_to_cpu(cck->crc32_good), accum_cck->crc32_good,
225 delta_cck->crc32_good, max_cck->crc32_good);
226 pos +=
227 scnprintf(buf + pos, bufsz - pos,
228 " %-30s %10u %10u %10u %10u\n", "false_alarm_cnt:",
229 le32_to_cpu(cck->false_alarm_cnt),
230 accum_cck->false_alarm_cnt, delta_cck->false_alarm_cnt,
231 max_cck->false_alarm_cnt);
232 pos +=
233 scnprintf(buf + pos, bufsz - pos,
234 " %-30s %10u %10u %10u %10u\n", "fina_sync_err_cnt:",
235 le32_to_cpu(cck->fina_sync_err_cnt),
236 accum_cck->fina_sync_err_cnt,
237 delta_cck->fina_sync_err_cnt, max_cck->fina_sync_err_cnt);
238 pos +=
239 scnprintf(buf + pos, bufsz - pos,
240 " %-30s %10u %10u %10u %10u\n", "sfd_timeout:",
241 le32_to_cpu(cck->sfd_timeout), accum_cck->sfd_timeout,
242 delta_cck->sfd_timeout, max_cck->sfd_timeout);
243 pos +=
244 scnprintf(buf + pos, bufsz - pos,
245 " %-30s %10u %10u %10u %10u\n", "fina_timeout:",
246 le32_to_cpu(cck->fina_timeout), accum_cck->fina_timeout,
247 delta_cck->fina_timeout, max_cck->fina_timeout);
248 pos +=
249 scnprintf(buf + pos, bufsz - pos,
250 " %-30s %10u %10u %10u %10u\n", "unresponded_rts:",
251 le32_to_cpu(cck->unresponded_rts),
252 accum_cck->unresponded_rts, delta_cck->unresponded_rts,
253 max_cck->unresponded_rts);
254 pos +=
255 scnprintf(buf + pos, bufsz - pos,
256 " %-30s %10u %10u %10u %10u\n",
257 "rxe_frame_lmt_ovrun:",
258 le32_to_cpu(cck->rxe_frame_limit_overrun),
259 accum_cck->rxe_frame_limit_overrun,
260 delta_cck->rxe_frame_limit_overrun,
261 max_cck->rxe_frame_limit_overrun);
262 pos +=
263 scnprintf(buf + pos, bufsz - pos,
264 " %-30s %10u %10u %10u %10u\n", "sent_ack_cnt:",
265 le32_to_cpu(cck->sent_ack_cnt), accum_cck->sent_ack_cnt,
266 delta_cck->sent_ack_cnt, max_cck->sent_ack_cnt);
267 pos +=
268 scnprintf(buf + pos, bufsz - pos,
269 " %-30s %10u %10u %10u %10u\n", "sent_cts_cnt:",
270 le32_to_cpu(cck->sent_cts_cnt), accum_cck->sent_cts_cnt,
271 delta_cck->sent_cts_cnt, max_cck->sent_cts_cnt);
272
273 pos +=
274 scnprintf(buf + pos, bufsz - pos,
275 "%-32s current"
276 "acumulative delta max\n",
277 "Statistics_Rx - GENERAL:");
278 pos +=
279 scnprintf(buf + pos, bufsz - pos,
280 " %-30s %10u %10u %10u %10u\n", "bogus_cts:",
281 le32_to_cpu(general->bogus_cts), accum_general->bogus_cts,
282 delta_general->bogus_cts, max_general->bogus_cts);
283 pos +=
284 scnprintf(buf + pos, bufsz - pos,
285 " %-30s %10u %10u %10u %10u\n", "bogus_ack:",
286 le32_to_cpu(general->bogus_ack), accum_general->bogus_ack,
287 delta_general->bogus_ack, max_general->bogus_ack);
288 pos +=
289 scnprintf(buf + pos, bufsz - pos,
290 " %-30s %10u %10u %10u %10u\n", "non_bssid_frames:",
291 le32_to_cpu(general->non_bssid_frames),
292 accum_general->non_bssid_frames,
293 delta_general->non_bssid_frames,
294 max_general->non_bssid_frames);
295 pos +=
296 scnprintf(buf + pos, bufsz - pos,
297 " %-30s %10u %10u %10u %10u\n", "filtered_frames:",
298 le32_to_cpu(general->filtered_frames),
299 accum_general->filtered_frames,
300 delta_general->filtered_frames,
301 max_general->filtered_frames);
302 pos +=
303 scnprintf(buf + pos, bufsz - pos,
304 " %-30s %10u %10u %10u %10u\n",
305 "non_channel_beacons:",
306 le32_to_cpu(general->non_channel_beacons),
307 accum_general->non_channel_beacons,
308 delta_general->non_channel_beacons,
309 max_general->non_channel_beacons);
310
311 ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
312 kfree(buf);
313 return ret;
314}
315
316static ssize_t
317il3945_ucode_tx_stats_read(struct file *file, char __user *user_buf,
318 size_t count, loff_t *ppos)
319{
320 struct il_priv *il = file->private_data;
321 int pos = 0;
322 char *buf;
323 int bufsz = (sizeof(struct iwl39_stats_tx) * 48) + 250;
324 ssize_t ret;
325 struct iwl39_stats_tx *tx, *accum_tx, *delta_tx, *max_tx;
326
327 if (!il_is_alive(il))
328 return -EAGAIN;
329
330 buf = kzalloc(bufsz, GFP_KERNEL);
331 if (!buf) {
332 IL_ERR("Can not allocate Buffer\n");
333 return -ENOMEM;
334 }
335
336 /*
337 * The statistic information display here is based on
338 * the last stats notification from uCode
339 * might not reflect the current uCode activity
340 */
341 tx = &il->_3945.stats.tx;
342 accum_tx = &il->_3945.accum_stats.tx;
343 delta_tx = &il->_3945.delta_stats.tx;
344 max_tx = &il->_3945.max_delta.tx;
345 pos += il3945_stats_flag(il, buf, bufsz);
346 pos +=
347 scnprintf(buf + pos, bufsz - pos,
348 "%-32s current"
349 "acumulative delta max\n",
350 "Statistics_Tx:");
351 pos +=
352 scnprintf(buf + pos, bufsz - pos,
353 " %-30s %10u %10u %10u %10u\n", "preamble:",
354 le32_to_cpu(tx->preamble_cnt), accum_tx->preamble_cnt,
355 delta_tx->preamble_cnt, max_tx->preamble_cnt);
356 pos +=
357 scnprintf(buf + pos, bufsz - pos,
358 " %-30s %10u %10u %10u %10u\n", "rx_detected_cnt:",
359 le32_to_cpu(tx->rx_detected_cnt),
360 accum_tx->rx_detected_cnt, delta_tx->rx_detected_cnt,
361 max_tx->rx_detected_cnt);
362 pos +=
363 scnprintf(buf + pos, bufsz - pos,
364 " %-30s %10u %10u %10u %10u\n", "bt_prio_defer_cnt:",
365 le32_to_cpu(tx->bt_prio_defer_cnt),
366 accum_tx->bt_prio_defer_cnt, delta_tx->bt_prio_defer_cnt,
367 max_tx->bt_prio_defer_cnt);
368 pos +=
369 scnprintf(buf + pos, bufsz - pos,
370 " %-30s %10u %10u %10u %10u\n", "bt_prio_kill_cnt:",
371 le32_to_cpu(tx->bt_prio_kill_cnt),
372 accum_tx->bt_prio_kill_cnt, delta_tx->bt_prio_kill_cnt,
373 max_tx->bt_prio_kill_cnt);
374 pos +=
375 scnprintf(buf + pos, bufsz - pos,
376 " %-30s %10u %10u %10u %10u\n", "few_bytes_cnt:",
377 le32_to_cpu(tx->few_bytes_cnt), accum_tx->few_bytes_cnt,
378 delta_tx->few_bytes_cnt, max_tx->few_bytes_cnt);
379 pos +=
380 scnprintf(buf + pos, bufsz - pos,
381 " %-30s %10u %10u %10u %10u\n", "cts_timeout:",
382 le32_to_cpu(tx->cts_timeout), accum_tx->cts_timeout,
383 delta_tx->cts_timeout, max_tx->cts_timeout);
384 pos +=
385 scnprintf(buf + pos, bufsz - pos,
386 " %-30s %10u %10u %10u %10u\n", "ack_timeout:",
387 le32_to_cpu(tx->ack_timeout), accum_tx->ack_timeout,
388 delta_tx->ack_timeout, max_tx->ack_timeout);
389 pos +=
390 scnprintf(buf + pos, bufsz - pos,
391 " %-30s %10u %10u %10u %10u\n", "expected_ack_cnt:",
392 le32_to_cpu(tx->expected_ack_cnt),
393 accum_tx->expected_ack_cnt, delta_tx->expected_ack_cnt,
394 max_tx->expected_ack_cnt);
395 pos +=
396 scnprintf(buf + pos, bufsz - pos,
397 " %-30s %10u %10u %10u %10u\n", "actual_ack_cnt:",
398 le32_to_cpu(tx->actual_ack_cnt), accum_tx->actual_ack_cnt,
399 delta_tx->actual_ack_cnt, max_tx->actual_ack_cnt);
400
401 ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
402 kfree(buf);
403 return ret;
404}
405
406static ssize_t
407il3945_ucode_general_stats_read(struct file *file, char __user *user_buf,
408 size_t count, loff_t *ppos)
409{
410 struct il_priv *il = file->private_data;
411 int pos = 0;
412 char *buf;
413 int bufsz = sizeof(struct iwl39_stats_general) * 10 + 300;
414 ssize_t ret;
415 struct iwl39_stats_general *general, *accum_general;
416 struct iwl39_stats_general *delta_general, *max_general;
417 struct stats_dbg *dbg, *accum_dbg, *delta_dbg, *max_dbg;
418 struct iwl39_stats_div *div, *accum_div, *delta_div, *max_div;
419
420 if (!il_is_alive(il))
421 return -EAGAIN;
422
423 buf = kzalloc(bufsz, GFP_KERNEL);
424 if (!buf) {
425 IL_ERR("Can not allocate Buffer\n");
426 return -ENOMEM;
427 }
428
429 /*
430 * The statistic information display here is based on
431 * the last stats notification from uCode
432 * might not reflect the current uCode activity
433 */
434 general = &il->_3945.stats.general;
435 dbg = &il->_3945.stats.general.dbg;
436 div = &il->_3945.stats.general.div;
437 accum_general = &il->_3945.accum_stats.general;
438 delta_general = &il->_3945.delta_stats.general;
439 max_general = &il->_3945.max_delta.general;
440 accum_dbg = &il->_3945.accum_stats.general.dbg;
441 delta_dbg = &il->_3945.delta_stats.general.dbg;
442 max_dbg = &il->_3945.max_delta.general.dbg;
443 accum_div = &il->_3945.accum_stats.general.div;
444 delta_div = &il->_3945.delta_stats.general.div;
445 max_div = &il->_3945.max_delta.general.div;
446 pos += il3945_stats_flag(il, buf, bufsz);
447 pos +=
448 scnprintf(buf + pos, bufsz - pos,
449 "%-32s current"
450 "acumulative delta max\n",
451 "Statistics_General:");
452 pos +=
453 scnprintf(buf + pos, bufsz - pos,
454 " %-30s %10u %10u %10u %10u\n", "burst_check:",
455 le32_to_cpu(dbg->burst_check), accum_dbg->burst_check,
456 delta_dbg->burst_check, max_dbg->burst_check);
457 pos +=
458 scnprintf(buf + pos, bufsz - pos,
459 " %-30s %10u %10u %10u %10u\n", "burst_count:",
460 le32_to_cpu(dbg->burst_count), accum_dbg->burst_count,
461 delta_dbg->burst_count, max_dbg->burst_count);
462 pos +=
463 scnprintf(buf + pos, bufsz - pos,
464 " %-30s %10u %10u %10u %10u\n", "sleep_time:",
465 le32_to_cpu(general->sleep_time),
466 accum_general->sleep_time, delta_general->sleep_time,
467 max_general->sleep_time);
468 pos +=
469 scnprintf(buf + pos, bufsz - pos,
470 " %-30s %10u %10u %10u %10u\n", "slots_out:",
471 le32_to_cpu(general->slots_out), accum_general->slots_out,
472 delta_general->slots_out, max_general->slots_out);
473 pos +=
474 scnprintf(buf + pos, bufsz - pos,
475 " %-30s %10u %10u %10u %10u\n", "slots_idle:",
476 le32_to_cpu(general->slots_idle),
477 accum_general->slots_idle, delta_general->slots_idle,
478 max_general->slots_idle);
479 pos +=
480 scnprintf(buf + pos, bufsz - pos, "ttl_timestamp:\t\t\t%u\n",
481 le32_to_cpu(general->ttl_timestamp));
482 pos +=
483 scnprintf(buf + pos, bufsz - pos,
484 " %-30s %10u %10u %10u %10u\n", "tx_on_a:",
485 le32_to_cpu(div->tx_on_a), accum_div->tx_on_a,
486 delta_div->tx_on_a, max_div->tx_on_a);
487 pos +=
488 scnprintf(buf + pos, bufsz - pos,
489 " %-30s %10u %10u %10u %10u\n", "tx_on_b:",
490 le32_to_cpu(div->tx_on_b), accum_div->tx_on_b,
491 delta_div->tx_on_b, max_div->tx_on_b);
492 pos +=
493 scnprintf(buf + pos, bufsz - pos,
494 " %-30s %10u %10u %10u %10u\n", "exec_time:",
495 le32_to_cpu(div->exec_time), accum_div->exec_time,
496 delta_div->exec_time, max_div->exec_time);
497 pos +=
498 scnprintf(buf + pos, bufsz - pos,
499 " %-30s %10u %10u %10u %10u\n", "probe_time:",
500 le32_to_cpu(div->probe_time), accum_div->probe_time,
501 delta_div->probe_time, max_div->probe_time);
502 ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
503 kfree(buf);
504 return ret;
505}
506
507const struct il_debugfs_ops il3945_debugfs_ops = {
508 .rx_stats_read = il3945_ucode_rx_stats_read,
509 .tx_stats_read = il3945_ucode_tx_stats_read,
510 .general_stats_read = il3945_ucode_general_stats_read,
511};
diff --git a/drivers/net/wireless/intel/iwlegacy/3945-mac.c b/drivers/net/wireless/intel/iwlegacy/3945-mac.c
new file mode 100644
index 000000000000..af1b3e6839fa
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlegacy/3945-mac.c
@@ -0,0 +1,3959 @@
1/******************************************************************************
2 *
3 * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved.
4 *
5 * Portions of this file are derived from the ipw3945 project, as well
6 * as portions of the ieee80211 subsystem header files.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of version 2 of the GNU General Public License as
10 * published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it will be useful, but WITHOUT
13 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 * more details.
16 *
17 * You should have received a copy of the GNU General Public License along with
18 * this program; if not, write to the Free Software Foundation, Inc.,
19 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
20 *
21 * The full GNU General Public License is included in this distribution in the
22 * file called LICENSE.
23 *
24 * Contact Information:
25 * Intel Linux Wireless <ilw@linux.intel.com>
26 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
27 *
28 *****************************************************************************/
29
30#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
31
32#include <linux/kernel.h>
33#include <linux/module.h>
34#include <linux/init.h>
35#include <linux/pci.h>
36#include <linux/pci-aspm.h>
37#include <linux/slab.h>
38#include <linux/dma-mapping.h>
39#include <linux/delay.h>
40#include <linux/sched.h>
41#include <linux/skbuff.h>
42#include <linux/netdevice.h>
43#include <linux/firmware.h>
44#include <linux/etherdevice.h>
45#include <linux/if_arp.h>
46
47#include <net/ieee80211_radiotap.h>
48#include <net/mac80211.h>
49
50#include <asm/div64.h>
51
52#define DRV_NAME "iwl3945"
53
54#include "commands.h"
55#include "common.h"
56#include "3945.h"
57#include "iwl-spectrum.h"
58
59/*
60 * module name, copyright, version, etc.
61 */
62
63#define DRV_DESCRIPTION \
64"Intel(R) PRO/Wireless 3945ABG/BG Network Connection driver for Linux"
65
66#ifdef CONFIG_IWLEGACY_DEBUG
67#define VD "d"
68#else
69#define VD
70#endif
71
72/*
73 * add "s" to indicate spectrum measurement included.
74 * we add it here to be consistent with previous releases in which
75 * this was configurable.
76 */
77#define DRV_VERSION IWLWIFI_VERSION VD "s"
78#define DRV_COPYRIGHT "Copyright(c) 2003-2011 Intel Corporation"
79#define DRV_AUTHOR "<ilw@linux.intel.com>"
80
81MODULE_DESCRIPTION(DRV_DESCRIPTION);
82MODULE_VERSION(DRV_VERSION);
83MODULE_AUTHOR(DRV_COPYRIGHT " " DRV_AUTHOR);
84MODULE_LICENSE("GPL");
85
86 /* module parameters */
87struct il_mod_params il3945_mod_params = {
88 .sw_crypto = 1,
89 .restart_fw = 1,
90 .disable_hw_scan = 1,
91 /* the rest are 0 by default */
92};
93
94/**
95 * il3945_get_antenna_flags - Get antenna flags for RXON command
96 * @il: eeprom and antenna fields are used to determine antenna flags
97 *
98 * il->eeprom39 is used to determine if antenna AUX/MAIN are reversed
99 * il3945_mod_params.antenna specifies the antenna diversity mode:
100 *
101 * IL_ANTENNA_DIVERSITY - NIC selects best antenna by itself
102 * IL_ANTENNA_MAIN - Force MAIN antenna
103 * IL_ANTENNA_AUX - Force AUX antenna
104 */
105__le32
106il3945_get_antenna_flags(const struct il_priv *il)
107{
108 struct il3945_eeprom *eeprom = (struct il3945_eeprom *)il->eeprom;
109
110 switch (il3945_mod_params.antenna) {
111 case IL_ANTENNA_DIVERSITY:
112 return 0;
113
114 case IL_ANTENNA_MAIN:
115 if (eeprom->antenna_switch_type)
116 return RXON_FLG_DIS_DIV_MSK | RXON_FLG_ANT_B_MSK;
117 return RXON_FLG_DIS_DIV_MSK | RXON_FLG_ANT_A_MSK;
118
119 case IL_ANTENNA_AUX:
120 if (eeprom->antenna_switch_type)
121 return RXON_FLG_DIS_DIV_MSK | RXON_FLG_ANT_A_MSK;
122 return RXON_FLG_DIS_DIV_MSK | RXON_FLG_ANT_B_MSK;
123 }
124
125 /* bad antenna selector value */
126 IL_ERR("Bad antenna selector value (0x%x)\n",
127 il3945_mod_params.antenna);
128
129 return 0; /* "diversity" is default if error */
130}
131
132static int
133il3945_set_ccmp_dynamic_key_info(struct il_priv *il,
134 struct ieee80211_key_conf *keyconf, u8 sta_id)
135{
136 unsigned long flags;
137 __le16 key_flags = 0;
138 int ret;
139
140 key_flags |= (STA_KEY_FLG_CCMP | STA_KEY_FLG_MAP_KEY_MSK);
141 key_flags |= cpu_to_le16(keyconf->keyidx << STA_KEY_FLG_KEYID_POS);
142
143 if (sta_id == il->hw_params.bcast_id)
144 key_flags |= STA_KEY_MULTICAST_MSK;
145
146 keyconf->flags |= IEEE80211_KEY_FLAG_GENERATE_IV;
147 keyconf->hw_key_idx = keyconf->keyidx;
148 key_flags &= ~STA_KEY_FLG_INVALID;
149
150 spin_lock_irqsave(&il->sta_lock, flags);
151 il->stations[sta_id].keyinfo.cipher = keyconf->cipher;
152 il->stations[sta_id].keyinfo.keylen = keyconf->keylen;
153 memcpy(il->stations[sta_id].keyinfo.key, keyconf->key, keyconf->keylen);
154
155 memcpy(il->stations[sta_id].sta.key.key, keyconf->key, keyconf->keylen);
156
157 if ((il->stations[sta_id].sta.key.
158 key_flags & STA_KEY_FLG_ENCRYPT_MSK) == STA_KEY_FLG_NO_ENC)
159 il->stations[sta_id].sta.key.key_offset =
160 il_get_free_ucode_key_idx(il);
161 /* else, we are overriding an existing key => no need to allocated room
162 * in uCode. */
163
164 WARN(il->stations[sta_id].sta.key.key_offset == WEP_INVALID_OFFSET,
165 "no space for a new key");
166
167 il->stations[sta_id].sta.key.key_flags = key_flags;
168 il->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_KEY_MASK;
169 il->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
170
171 D_INFO("hwcrypto: modify ucode station key info\n");
172
173 ret = il_send_add_sta(il, &il->stations[sta_id].sta, CMD_ASYNC);
174
175 spin_unlock_irqrestore(&il->sta_lock, flags);
176
177 return ret;
178}
179
180static int
181il3945_set_tkip_dynamic_key_info(struct il_priv *il,
182 struct ieee80211_key_conf *keyconf, u8 sta_id)
183{
184 return -EOPNOTSUPP;
185}
186
187static int
188il3945_set_wep_dynamic_key_info(struct il_priv *il,
189 struct ieee80211_key_conf *keyconf, u8 sta_id)
190{
191 return -EOPNOTSUPP;
192}
193
194static int
195il3945_clear_sta_key_info(struct il_priv *il, u8 sta_id)
196{
197 unsigned long flags;
198 struct il_addsta_cmd sta_cmd;
199
200 spin_lock_irqsave(&il->sta_lock, flags);
201 memset(&il->stations[sta_id].keyinfo, 0, sizeof(struct il_hw_key));
202 memset(&il->stations[sta_id].sta.key, 0, sizeof(struct il4965_keyinfo));
203 il->stations[sta_id].sta.key.key_flags = STA_KEY_FLG_NO_ENC;
204 il->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_KEY_MASK;
205 il->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
206 memcpy(&sta_cmd, &il->stations[sta_id].sta,
207 sizeof(struct il_addsta_cmd));
208 spin_unlock_irqrestore(&il->sta_lock, flags);
209
210 D_INFO("hwcrypto: clear ucode station key info\n");
211 return il_send_add_sta(il, &sta_cmd, CMD_SYNC);
212}
213
214static int
215il3945_set_dynamic_key(struct il_priv *il, struct ieee80211_key_conf *keyconf,
216 u8 sta_id)
217{
218 int ret = 0;
219
220 keyconf->hw_key_idx = HW_KEY_DYNAMIC;
221
222 switch (keyconf->cipher) {
223 case WLAN_CIPHER_SUITE_CCMP:
224 ret = il3945_set_ccmp_dynamic_key_info(il, keyconf, sta_id);
225 break;
226 case WLAN_CIPHER_SUITE_TKIP:
227 ret = il3945_set_tkip_dynamic_key_info(il, keyconf, sta_id);
228 break;
229 case WLAN_CIPHER_SUITE_WEP40:
230 case WLAN_CIPHER_SUITE_WEP104:
231 ret = il3945_set_wep_dynamic_key_info(il, keyconf, sta_id);
232 break;
233 default:
234 IL_ERR("Unknown alg: %s alg=%x\n", __func__, keyconf->cipher);
235 ret = -EINVAL;
236 }
237
238 D_WEP("Set dynamic key: alg=%x len=%d idx=%d sta=%d ret=%d\n",
239 keyconf->cipher, keyconf->keylen, keyconf->keyidx, sta_id, ret);
240
241 return ret;
242}
243
244static int
245il3945_remove_static_key(struct il_priv *il)
246{
247 int ret = -EOPNOTSUPP;
248
249 return ret;
250}
251
252static int
253il3945_set_static_key(struct il_priv *il, struct ieee80211_key_conf *key)
254{
255 if (key->cipher == WLAN_CIPHER_SUITE_WEP40 ||
256 key->cipher == WLAN_CIPHER_SUITE_WEP104)
257 return -EOPNOTSUPP;
258
259 IL_ERR("Static key invalid: cipher %x\n", key->cipher);
260 return -EINVAL;
261}
262
263static void
264il3945_clear_free_frames(struct il_priv *il)
265{
266 struct list_head *element;
267
268 D_INFO("%d frames on pre-allocated heap on clear.\n", il->frames_count);
269
270 while (!list_empty(&il->free_frames)) {
271 element = il->free_frames.next;
272 list_del(element);
273 kfree(list_entry(element, struct il3945_frame, list));
274 il->frames_count--;
275 }
276
277 if (il->frames_count) {
278 IL_WARN("%d frames still in use. Did we lose one?\n",
279 il->frames_count);
280 il->frames_count = 0;
281 }
282}
283
284static struct il3945_frame *
285il3945_get_free_frame(struct il_priv *il)
286{
287 struct il3945_frame *frame;
288 struct list_head *element;
289 if (list_empty(&il->free_frames)) {
290 frame = kzalloc(sizeof(*frame), GFP_KERNEL);
291 if (!frame) {
292 IL_ERR("Could not allocate frame!\n");
293 return NULL;
294 }
295
296 il->frames_count++;
297 return frame;
298 }
299
300 element = il->free_frames.next;
301 list_del(element);
302 return list_entry(element, struct il3945_frame, list);
303}
304
305static void
306il3945_free_frame(struct il_priv *il, struct il3945_frame *frame)
307{
308 memset(frame, 0, sizeof(*frame));
309 list_add(&frame->list, &il->free_frames);
310}
311
312unsigned int
313il3945_fill_beacon_frame(struct il_priv *il, struct ieee80211_hdr *hdr,
314 int left)
315{
316
317 if (!il_is_associated(il) || !il->beacon_skb)
318 return 0;
319
320 if (il->beacon_skb->len > left)
321 return 0;
322
323 memcpy(hdr, il->beacon_skb->data, il->beacon_skb->len);
324
325 return il->beacon_skb->len;
326}
327
328static int
329il3945_send_beacon_cmd(struct il_priv *il)
330{
331 struct il3945_frame *frame;
332 unsigned int frame_size;
333 int rc;
334 u8 rate;
335
336 frame = il3945_get_free_frame(il);
337
338 if (!frame) {
339 IL_ERR("Could not obtain free frame buffer for beacon "
340 "command.\n");
341 return -ENOMEM;
342 }
343
344 rate = il_get_lowest_plcp(il);
345
346 frame_size = il3945_hw_get_beacon_cmd(il, frame, rate);
347
348 rc = il_send_cmd_pdu(il, C_TX_BEACON, frame_size, &frame->u.cmd[0]);
349
350 il3945_free_frame(il, frame);
351
352 return rc;
353}
354
355static void
356il3945_unset_hw_params(struct il_priv *il)
357{
358 if (il->_3945.shared_virt)
359 dma_free_coherent(&il->pci_dev->dev,
360 sizeof(struct il3945_shared),
361 il->_3945.shared_virt, il->_3945.shared_phys);
362}
363
364static void
365il3945_build_tx_cmd_hwcrypto(struct il_priv *il, struct ieee80211_tx_info *info,
366 struct il_device_cmd *cmd,
367 struct sk_buff *skb_frag, int sta_id)
368{
369 struct il3945_tx_cmd *tx_cmd = (struct il3945_tx_cmd *)cmd->cmd.payload;
370 struct il_hw_key *keyinfo = &il->stations[sta_id].keyinfo;
371
372 tx_cmd->sec_ctl = 0;
373
374 switch (keyinfo->cipher) {
375 case WLAN_CIPHER_SUITE_CCMP:
376 tx_cmd->sec_ctl = TX_CMD_SEC_CCM;
377 memcpy(tx_cmd->key, keyinfo->key, keyinfo->keylen);
378 D_TX("tx_cmd with AES hwcrypto\n");
379 break;
380
381 case WLAN_CIPHER_SUITE_TKIP:
382 break;
383
384 case WLAN_CIPHER_SUITE_WEP104:
385 tx_cmd->sec_ctl |= TX_CMD_SEC_KEY128;
386 /* fall through */
387 case WLAN_CIPHER_SUITE_WEP40:
388 tx_cmd->sec_ctl |=
389 TX_CMD_SEC_WEP | (info->control.hw_key->
390 hw_key_idx & TX_CMD_SEC_MSK) <<
391 TX_CMD_SEC_SHIFT;
392
393 memcpy(&tx_cmd->key[3], keyinfo->key, keyinfo->keylen);
394
395 D_TX("Configuring packet for WEP encryption " "with key %d\n",
396 info->control.hw_key->hw_key_idx);
397 break;
398
399 default:
400 IL_ERR("Unknown encode cipher %x\n", keyinfo->cipher);
401 break;
402 }
403}
404
405/*
406 * handle build C_TX command notification.
407 */
408static void
409il3945_build_tx_cmd_basic(struct il_priv *il, struct il_device_cmd *cmd,
410 struct ieee80211_tx_info *info,
411 struct ieee80211_hdr *hdr, u8 std_id)
412{
413 struct il3945_tx_cmd *tx_cmd = (struct il3945_tx_cmd *)cmd->cmd.payload;
414 __le32 tx_flags = tx_cmd->tx_flags;
415 __le16 fc = hdr->frame_control;
416
417 tx_cmd->stop_time.life_time = TX_CMD_LIFE_TIME_INFINITE;
418 if (!(info->flags & IEEE80211_TX_CTL_NO_ACK)) {
419 tx_flags |= TX_CMD_FLG_ACK_MSK;
420 if (ieee80211_is_mgmt(fc))
421 tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK;
422 if (ieee80211_is_probe_resp(fc) &&
423 !(le16_to_cpu(hdr->seq_ctrl) & 0xf))
424 tx_flags |= TX_CMD_FLG_TSF_MSK;
425 } else {
426 tx_flags &= (~TX_CMD_FLG_ACK_MSK);
427 tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK;
428 }
429
430 tx_cmd->sta_id = std_id;
431 if (ieee80211_has_morefrags(fc))
432 tx_flags |= TX_CMD_FLG_MORE_FRAG_MSK;
433
434 if (ieee80211_is_data_qos(fc)) {
435 u8 *qc = ieee80211_get_qos_ctl(hdr);
436 tx_cmd->tid_tspec = qc[0] & 0xf;
437 tx_flags &= ~TX_CMD_FLG_SEQ_CTL_MSK;
438 } else {
439 tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK;
440 }
441
442 il_tx_cmd_protection(il, info, fc, &tx_flags);
443
444 tx_flags &= ~(TX_CMD_FLG_ANT_SEL_MSK);
445 if (ieee80211_is_mgmt(fc)) {
446 if (ieee80211_is_assoc_req(fc) || ieee80211_is_reassoc_req(fc))
447 tx_cmd->timeout.pm_frame_timeout = cpu_to_le16(3);
448 else
449 tx_cmd->timeout.pm_frame_timeout = cpu_to_le16(2);
450 } else {
451 tx_cmd->timeout.pm_frame_timeout = 0;
452 }
453
454 tx_cmd->driver_txop = 0;
455 tx_cmd->tx_flags = tx_flags;
456 tx_cmd->next_frame_len = 0;
457}
458
459/*
460 * start C_TX command process
461 */
462static int
463il3945_tx_skb(struct il_priv *il,
464 struct ieee80211_sta *sta,
465 struct sk_buff *skb)
466{
467 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
468 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
469 struct il3945_tx_cmd *tx_cmd;
470 struct il_tx_queue *txq = NULL;
471 struct il_queue *q = NULL;
472 struct il_device_cmd *out_cmd;
473 struct il_cmd_meta *out_meta;
474 dma_addr_t phys_addr;
475 dma_addr_t txcmd_phys;
476 int txq_id = skb_get_queue_mapping(skb);
477 u16 len, idx, hdr_len;
478 u16 firstlen, secondlen;
479 u8 id;
480 u8 unicast;
481 u8 sta_id;
482 u8 tid = 0;
483 __le16 fc;
484 u8 wait_write_ptr = 0;
485 unsigned long flags;
486
487 spin_lock_irqsave(&il->lock, flags);
488 if (il_is_rfkill(il)) {
489 D_DROP("Dropping - RF KILL\n");
490 goto drop_unlock;
491 }
492
493 if ((ieee80211_get_tx_rate(il->hw, info)->hw_value & 0xFF) ==
494 IL_INVALID_RATE) {
495 IL_ERR("ERROR: No TX rate available.\n");
496 goto drop_unlock;
497 }
498
499 unicast = !is_multicast_ether_addr(hdr->addr1);
500 id = 0;
501
502 fc = hdr->frame_control;
503
504#ifdef CONFIG_IWLEGACY_DEBUG
505 if (ieee80211_is_auth(fc))
506 D_TX("Sending AUTH frame\n");
507 else if (ieee80211_is_assoc_req(fc))
508 D_TX("Sending ASSOC frame\n");
509 else if (ieee80211_is_reassoc_req(fc))
510 D_TX("Sending REASSOC frame\n");
511#endif
512
513 spin_unlock_irqrestore(&il->lock, flags);
514
515 hdr_len = ieee80211_hdrlen(fc);
516
517 /* Find idx into station table for destination station */
518 sta_id = il_sta_id_or_broadcast(il, sta);
519 if (sta_id == IL_INVALID_STATION) {
520 D_DROP("Dropping - INVALID STATION: %pM\n", hdr->addr1);
521 goto drop;
522 }
523
524 D_RATE("station Id %d\n", sta_id);
525
526 if (ieee80211_is_data_qos(fc)) {
527 u8 *qc = ieee80211_get_qos_ctl(hdr);
528 tid = qc[0] & IEEE80211_QOS_CTL_TID_MASK;
529 if (unlikely(tid >= MAX_TID_COUNT))
530 goto drop;
531 }
532
533 /* Descriptor for chosen Tx queue */
534 txq = &il->txq[txq_id];
535 q = &txq->q;
536
537 if ((il_queue_space(q) < q->high_mark))
538 goto drop;
539
540 spin_lock_irqsave(&il->lock, flags);
541
542 idx = il_get_cmd_idx(q, q->write_ptr, 0);
543
544 txq->skbs[q->write_ptr] = skb;
545
546 /* Init first empty entry in queue's array of Tx/cmd buffers */
547 out_cmd = txq->cmd[idx];
548 out_meta = &txq->meta[idx];
549 tx_cmd = (struct il3945_tx_cmd *)out_cmd->cmd.payload;
550 memset(&out_cmd->hdr, 0, sizeof(out_cmd->hdr));
551 memset(tx_cmd, 0, sizeof(*tx_cmd));
552
553 /*
554 * Set up the Tx-command (not MAC!) header.
555 * Store the chosen Tx queue and TFD idx within the sequence field;
556 * after Tx, uCode's Tx response will return this value so driver can
557 * locate the frame within the tx queue and do post-tx processing.
558 */
559 out_cmd->hdr.cmd = C_TX;
560 out_cmd->hdr.sequence =
561 cpu_to_le16((u16)
562 (QUEUE_TO_SEQ(txq_id) | IDX_TO_SEQ(q->write_ptr)));
563
564 /* Copy MAC header from skb into command buffer */
565 memcpy(tx_cmd->hdr, hdr, hdr_len);
566
567 if (info->control.hw_key)
568 il3945_build_tx_cmd_hwcrypto(il, info, out_cmd, skb, sta_id);
569
570 /* TODO need this for burst mode later on */
571 il3945_build_tx_cmd_basic(il, out_cmd, info, hdr, sta_id);
572
573 il3945_hw_build_tx_cmd_rate(il, out_cmd, info, hdr, sta_id);
574
575 /* Total # bytes to be transmitted */
576 tx_cmd->len = cpu_to_le16((u16) skb->len);
577
578 tx_cmd->tx_flags &= ~TX_CMD_FLG_ANT_A_MSK;
579 tx_cmd->tx_flags &= ~TX_CMD_FLG_ANT_B_MSK;
580
581 /*
582 * Use the first empty entry in this queue's command buffer array
583 * to contain the Tx command and MAC header concatenated together
584 * (payload data will be in another buffer).
585 * Size of this varies, due to varying MAC header length.
586 * If end is not dword aligned, we'll have 2 extra bytes at the end
587 * of the MAC header (device reads on dword boundaries).
588 * We'll tell device about this padding later.
589 */
590 len =
591 sizeof(struct il3945_tx_cmd) + sizeof(struct il_cmd_header) +
592 hdr_len;
593 firstlen = (len + 3) & ~3;
594
595 /* Physical address of this Tx command's header (not MAC header!),
596 * within command buffer array. */
597 txcmd_phys =
598 pci_map_single(il->pci_dev, &out_cmd->hdr, firstlen,
599 PCI_DMA_TODEVICE);
600 if (unlikely(pci_dma_mapping_error(il->pci_dev, txcmd_phys)))
601 goto drop_unlock;
602
603 /* Set up TFD's 2nd entry to point directly to remainder of skb,
604 * if any (802.11 null frames have no payload). */
605 secondlen = skb->len - hdr_len;
606 if (secondlen > 0) {
607 phys_addr =
608 pci_map_single(il->pci_dev, skb->data + hdr_len, secondlen,
609 PCI_DMA_TODEVICE);
610 if (unlikely(pci_dma_mapping_error(il->pci_dev, phys_addr)))
611 goto drop_unlock;
612 }
613
614 /* Add buffer containing Tx command and MAC(!) header to TFD's
615 * first entry */
616 il->ops->txq_attach_buf_to_tfd(il, txq, txcmd_phys, firstlen, 1, 0);
617 dma_unmap_addr_set(out_meta, mapping, txcmd_phys);
618 dma_unmap_len_set(out_meta, len, firstlen);
619 if (secondlen > 0)
620 il->ops->txq_attach_buf_to_tfd(il, txq, phys_addr, secondlen, 0,
621 U32_PAD(secondlen));
622
623 if (!ieee80211_has_morefrags(hdr->frame_control)) {
624 txq->need_update = 1;
625 } else {
626 wait_write_ptr = 1;
627 txq->need_update = 0;
628 }
629
630 il_update_stats(il, true, fc, skb->len);
631
632 D_TX("sequence nr = 0X%x\n", le16_to_cpu(out_cmd->hdr.sequence));
633 D_TX("tx_flags = 0X%x\n", le32_to_cpu(tx_cmd->tx_flags));
634 il_print_hex_dump(il, IL_DL_TX, tx_cmd, sizeof(*tx_cmd));
635 il_print_hex_dump(il, IL_DL_TX, (u8 *) tx_cmd->hdr,
636 ieee80211_hdrlen(fc));
637
638 /* Tell device the write idx *just past* this latest filled TFD */
639 q->write_ptr = il_queue_inc_wrap(q->write_ptr, q->n_bd);
640 il_txq_update_write_ptr(il, txq);
641 spin_unlock_irqrestore(&il->lock, flags);
642
643 if (il_queue_space(q) < q->high_mark && il->mac80211_registered) {
644 if (wait_write_ptr) {
645 spin_lock_irqsave(&il->lock, flags);
646 txq->need_update = 1;
647 il_txq_update_write_ptr(il, txq);
648 spin_unlock_irqrestore(&il->lock, flags);
649 }
650
651 il_stop_queue(il, txq);
652 }
653
654 return 0;
655
656drop_unlock:
657 spin_unlock_irqrestore(&il->lock, flags);
658drop:
659 return -1;
660}
661
662static int
663il3945_get_measurement(struct il_priv *il,
664 struct ieee80211_measurement_params *params, u8 type)
665{
666 struct il_spectrum_cmd spectrum;
667 struct il_rx_pkt *pkt;
668 struct il_host_cmd cmd = {
669 .id = C_SPECTRUM_MEASUREMENT,
670 .data = (void *)&spectrum,
671 .flags = CMD_WANT_SKB,
672 };
673 u32 add_time = le64_to_cpu(params->start_time);
674 int rc;
675 int spectrum_resp_status;
676 int duration = le16_to_cpu(params->duration);
677
678 if (il_is_associated(il))
679 add_time =
680 il_usecs_to_beacons(il,
681 le64_to_cpu(params->start_time) -
682 il->_3945.last_tsf,
683 le16_to_cpu(il->timing.beacon_interval));
684
685 memset(&spectrum, 0, sizeof(spectrum));
686
687 spectrum.channel_count = cpu_to_le16(1);
688 spectrum.flags =
689 RXON_FLG_TSF2HOST_MSK | RXON_FLG_ANT_A_MSK | RXON_FLG_DIS_DIV_MSK;
690 spectrum.filter_flags = MEASUREMENT_FILTER_FLAG;
691 cmd.len = sizeof(spectrum);
692 spectrum.len = cpu_to_le16(cmd.len - sizeof(spectrum.len));
693
694 if (il_is_associated(il))
695 spectrum.start_time =
696 il_add_beacon_time(il, il->_3945.last_beacon_time, add_time,
697 le16_to_cpu(il->timing.beacon_interval));
698 else
699 spectrum.start_time = 0;
700
701 spectrum.channels[0].duration = cpu_to_le32(duration * TIME_UNIT);
702 spectrum.channels[0].channel = params->channel;
703 spectrum.channels[0].type = type;
704 if (il->active.flags & RXON_FLG_BAND_24G_MSK)
705 spectrum.flags |=
706 RXON_FLG_BAND_24G_MSK | RXON_FLG_AUTO_DETECT_MSK |
707 RXON_FLG_TGG_PROTECT_MSK;
708
709 rc = il_send_cmd_sync(il, &cmd);
710 if (rc)
711 return rc;
712
713 pkt = (struct il_rx_pkt *)cmd.reply_page;
714 if (pkt->hdr.flags & IL_CMD_FAILED_MSK) {
715 IL_ERR("Bad return from N_RX_ON_ASSOC command\n");
716 rc = -EIO;
717 }
718
719 spectrum_resp_status = le16_to_cpu(pkt->u.spectrum.status);
720 switch (spectrum_resp_status) {
721 case 0: /* Command will be handled */
722 if (pkt->u.spectrum.id != 0xff) {
723 D_INFO("Replaced existing measurement: %d\n",
724 pkt->u.spectrum.id);
725 il->measurement_status &= ~MEASUREMENT_READY;
726 }
727 il->measurement_status |= MEASUREMENT_ACTIVE;
728 rc = 0;
729 break;
730
731 case 1: /* Command will not be handled */
732 rc = -EAGAIN;
733 break;
734 }
735
736 il_free_pages(il, cmd.reply_page);
737
738 return rc;
739}
740
741static void
742il3945_hdl_alive(struct il_priv *il, struct il_rx_buf *rxb)
743{
744 struct il_rx_pkt *pkt = rxb_addr(rxb);
745 struct il_alive_resp *palive;
746 struct delayed_work *pwork;
747
748 palive = &pkt->u.alive_frame;
749
750 D_INFO("Alive ucode status 0x%08X revision " "0x%01X 0x%01X\n",
751 palive->is_valid, palive->ver_type, palive->ver_subtype);
752
753 if (palive->ver_subtype == INITIALIZE_SUBTYPE) {
754 D_INFO("Initialization Alive received.\n");
755 memcpy(&il->card_alive_init, &pkt->u.alive_frame,
756 sizeof(struct il_alive_resp));
757 pwork = &il->init_alive_start;
758 } else {
759 D_INFO("Runtime Alive received.\n");
760 memcpy(&il->card_alive, &pkt->u.alive_frame,
761 sizeof(struct il_alive_resp));
762 pwork = &il->alive_start;
763 il3945_disable_events(il);
764 }
765
766 /* We delay the ALIVE response by 5ms to
767 * give the HW RF Kill time to activate... */
768 if (palive->is_valid == UCODE_VALID_OK)
769 queue_delayed_work(il->workqueue, pwork, msecs_to_jiffies(5));
770 else
771 IL_WARN("uCode did not respond OK.\n");
772}
773
774static void
775il3945_hdl_add_sta(struct il_priv *il, struct il_rx_buf *rxb)
776{
777#ifdef CONFIG_IWLEGACY_DEBUG
778 struct il_rx_pkt *pkt = rxb_addr(rxb);
779#endif
780
781 D_RX("Received C_ADD_STA: 0x%02X\n", pkt->u.status);
782}
783
784static void
785il3945_hdl_beacon(struct il_priv *il, struct il_rx_buf *rxb)
786{
787 struct il_rx_pkt *pkt = rxb_addr(rxb);
788 struct il3945_beacon_notif *beacon = &(pkt->u.beacon_status);
789#ifdef CONFIG_IWLEGACY_DEBUG
790 u8 rate = beacon->beacon_notify_hdr.rate;
791
792 D_RX("beacon status %x retries %d iss %d " "tsf %d %d rate %d\n",
793 le32_to_cpu(beacon->beacon_notify_hdr.status) & TX_STATUS_MSK,
794 beacon->beacon_notify_hdr.failure_frame,
795 le32_to_cpu(beacon->ibss_mgr_status),
796 le32_to_cpu(beacon->high_tsf), le32_to_cpu(beacon->low_tsf), rate);
797#endif
798
799 il->ibss_manager = le32_to_cpu(beacon->ibss_mgr_status);
800
801}
802
803/* Handle notification from uCode that card's power state is changing
804 * due to software, hardware, or critical temperature RFKILL */
805static void
806il3945_hdl_card_state(struct il_priv *il, struct il_rx_buf *rxb)
807{
808 struct il_rx_pkt *pkt = rxb_addr(rxb);
809 u32 flags = le32_to_cpu(pkt->u.card_state_notif.flags);
810 unsigned long status = il->status;
811
812 IL_WARN("Card state received: HW:%s SW:%s\n",
813 (flags & HW_CARD_DISABLED) ? "Kill" : "On",
814 (flags & SW_CARD_DISABLED) ? "Kill" : "On");
815
816 _il_wr(il, CSR_UCODE_DRV_GP1_SET, CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED);
817
818 if (flags & HW_CARD_DISABLED)
819 set_bit(S_RFKILL, &il->status);
820 else
821 clear_bit(S_RFKILL, &il->status);
822
823 il_scan_cancel(il);
824
825 if ((test_bit(S_RFKILL, &status) !=
826 test_bit(S_RFKILL, &il->status)))
827 wiphy_rfkill_set_hw_state(il->hw->wiphy,
828 test_bit(S_RFKILL, &il->status));
829 else
830 wake_up(&il->wait_command_queue);
831}
832
833/**
834 * il3945_setup_handlers - Initialize Rx handler callbacks
835 *
836 * Setup the RX handlers for each of the reply types sent from the uCode
837 * to the host.
838 *
839 * This function chains into the hardware specific files for them to setup
840 * any hardware specific handlers as well.
841 */
842static void
843il3945_setup_handlers(struct il_priv *il)
844{
845 il->handlers[N_ALIVE] = il3945_hdl_alive;
846 il->handlers[C_ADD_STA] = il3945_hdl_add_sta;
847 il->handlers[N_ERROR] = il_hdl_error;
848 il->handlers[N_CHANNEL_SWITCH] = il_hdl_csa;
849 il->handlers[N_SPECTRUM_MEASUREMENT] = il_hdl_spectrum_measurement;
850 il->handlers[N_PM_SLEEP] = il_hdl_pm_sleep;
851 il->handlers[N_PM_DEBUG_STATS] = il_hdl_pm_debug_stats;
852 il->handlers[N_BEACON] = il3945_hdl_beacon;
853
854 /*
855 * The same handler is used for both the REPLY to a discrete
856 * stats request from the host as well as for the periodic
857 * stats notifications (after received beacons) from the uCode.
858 */
859 il->handlers[C_STATS] = il3945_hdl_c_stats;
860 il->handlers[N_STATS] = il3945_hdl_stats;
861
862 il_setup_rx_scan_handlers(il);
863 il->handlers[N_CARD_STATE] = il3945_hdl_card_state;
864
865 /* Set up hardware specific Rx handlers */
866 il3945_hw_handler_setup(il);
867}
868
869/************************** RX-FUNCTIONS ****************************/
870/*
871 * Rx theory of operation
872 *
873 * The host allocates 32 DMA target addresses and passes the host address
874 * to the firmware at register IL_RFDS_TBL_LOWER + N * RFD_SIZE where N is
875 * 0 to 31
876 *
877 * Rx Queue Indexes
878 * The host/firmware share two idx registers for managing the Rx buffers.
879 *
880 * The READ idx maps to the first position that the firmware may be writing
881 * to -- the driver can read up to (but not including) this position and get
882 * good data.
883 * The READ idx is managed by the firmware once the card is enabled.
884 *
885 * The WRITE idx maps to the last position the driver has read from -- the
886 * position preceding WRITE is the last slot the firmware can place a packet.
887 *
888 * The queue is empty (no good data) if WRITE = READ - 1, and is full if
889 * WRITE = READ.
890 *
891 * During initialization, the host sets up the READ queue position to the first
892 * IDX position, and WRITE to the last (READ - 1 wrapped)
893 *
894 * When the firmware places a packet in a buffer, it will advance the READ idx
895 * and fire the RX interrupt. The driver can then query the READ idx and
896 * process as many packets as possible, moving the WRITE idx forward as it
897 * resets the Rx queue buffers with new memory.
898 *
899 * The management in the driver is as follows:
900 * + A list of pre-allocated SKBs is stored in iwl->rxq->rx_free. When
901 * iwl->rxq->free_count drops to or below RX_LOW_WATERMARK, work is scheduled
902 * to replenish the iwl->rxq->rx_free.
903 * + In il3945_rx_replenish (scheduled) if 'processed' != 'read' then the
904 * iwl->rxq is replenished and the READ IDX is updated (updating the
905 * 'processed' and 'read' driver idxes as well)
906 * + A received packet is processed and handed to the kernel network stack,
907 * detached from the iwl->rxq. The driver 'processed' idx is updated.
908 * + The Host/Firmware iwl->rxq is replenished at tasklet time from the rx_free
909 * list. If there are no allocated buffers in iwl->rxq->rx_free, the READ
910 * IDX is not incremented and iwl->status(RX_STALLED) is set. If there
911 * were enough free buffers and RX_STALLED is set it is cleared.
912 *
913 *
914 * Driver sequence:
915 *
916 * il3945_rx_replenish() Replenishes rx_free list from rx_used, and calls
917 * il3945_rx_queue_restock
918 * il3945_rx_queue_restock() Moves available buffers from rx_free into Rx
919 * queue, updates firmware pointers, and updates
920 * the WRITE idx. If insufficient rx_free buffers
921 * are available, schedules il3945_rx_replenish
922 *
923 * -- enable interrupts --
924 * ISR - il3945_rx() Detach il_rx_bufs from pool up to the
925 * READ IDX, detaching the SKB from the pool.
926 * Moves the packet buffer from queue to rx_used.
927 * Calls il3945_rx_queue_restock to refill any empty
928 * slots.
929 * ...
930 *
931 */
932
933/**
934 * il3945_dma_addr2rbd_ptr - convert a DMA address to a uCode read buffer ptr
935 */
936static inline __le32
937il3945_dma_addr2rbd_ptr(struct il_priv *il, dma_addr_t dma_addr)
938{
939 return cpu_to_le32((u32) dma_addr);
940}
941
942/**
943 * il3945_rx_queue_restock - refill RX queue from pre-allocated pool
944 *
945 * If there are slots in the RX queue that need to be restocked,
946 * and we have free pre-allocated buffers, fill the ranks as much
947 * as we can, pulling from rx_free.
948 *
949 * This moves the 'write' idx forward to catch up with 'processed', and
950 * also updates the memory address in the firmware to reference the new
951 * target buffer.
952 */
953static void
954il3945_rx_queue_restock(struct il_priv *il)
955{
956 struct il_rx_queue *rxq = &il->rxq;
957 struct list_head *element;
958 struct il_rx_buf *rxb;
959 unsigned long flags;
960 int write;
961
962 spin_lock_irqsave(&rxq->lock, flags);
963 write = rxq->write & ~0x7;
964 while (il_rx_queue_space(rxq) > 0 && rxq->free_count) {
965 /* Get next free Rx buffer, remove from free list */
966 element = rxq->rx_free.next;
967 rxb = list_entry(element, struct il_rx_buf, list);
968 list_del(element);
969
970 /* Point to Rx buffer via next RBD in circular buffer */
971 rxq->bd[rxq->write] =
972 il3945_dma_addr2rbd_ptr(il, rxb->page_dma);
973 rxq->queue[rxq->write] = rxb;
974 rxq->write = (rxq->write + 1) & RX_QUEUE_MASK;
975 rxq->free_count--;
976 }
977 spin_unlock_irqrestore(&rxq->lock, flags);
978 /* If the pre-allocated buffer pool is dropping low, schedule to
979 * refill it */
980 if (rxq->free_count <= RX_LOW_WATERMARK)
981 queue_work(il->workqueue, &il->rx_replenish);
982
983 /* If we've added more space for the firmware to place data, tell it.
984 * Increment device's write pointer in multiples of 8. */
985 if (rxq->write_actual != (rxq->write & ~0x7) ||
986 abs(rxq->write - rxq->read) > 7) {
987 spin_lock_irqsave(&rxq->lock, flags);
988 rxq->need_update = 1;
989 spin_unlock_irqrestore(&rxq->lock, flags);
990 il_rx_queue_update_write_ptr(il, rxq);
991 }
992}
993
994/**
995 * il3945_rx_replenish - Move all used packet from rx_used to rx_free
996 *
997 * When moving to rx_free an SKB is allocated for the slot.
998 *
999 * Also restock the Rx queue via il3945_rx_queue_restock.
1000 * This is called as a scheduled work item (except for during initialization)
1001 */
1002static void
1003il3945_rx_allocate(struct il_priv *il, gfp_t priority)
1004{
1005 struct il_rx_queue *rxq = &il->rxq;
1006 struct list_head *element;
1007 struct il_rx_buf *rxb;
1008 struct page *page;
1009 dma_addr_t page_dma;
1010 unsigned long flags;
1011 gfp_t gfp_mask = priority;
1012
1013 while (1) {
1014 spin_lock_irqsave(&rxq->lock, flags);
1015 if (list_empty(&rxq->rx_used)) {
1016 spin_unlock_irqrestore(&rxq->lock, flags);
1017 return;
1018 }
1019 spin_unlock_irqrestore(&rxq->lock, flags);
1020
1021 if (rxq->free_count > RX_LOW_WATERMARK)
1022 gfp_mask |= __GFP_NOWARN;
1023
1024 if (il->hw_params.rx_page_order > 0)
1025 gfp_mask |= __GFP_COMP;
1026
1027 /* Alloc a new receive buffer */
1028 page = alloc_pages(gfp_mask, il->hw_params.rx_page_order);
1029 if (!page) {
1030 if (net_ratelimit())
1031 D_INFO("Failed to allocate SKB buffer.\n");
1032 if (rxq->free_count <= RX_LOW_WATERMARK &&
1033 net_ratelimit())
1034 IL_ERR("Failed to allocate SKB buffer with %0x."
1035 "Only %u free buffers remaining.\n",
1036 priority, rxq->free_count);
1037 /* We don't reschedule replenish work here -- we will
1038 * call the restock method and if it still needs
1039 * more buffers it will schedule replenish */
1040 break;
1041 }
1042
1043 /* Get physical address of RB/SKB */
1044 page_dma =
1045 pci_map_page(il->pci_dev, page, 0,
1046 PAGE_SIZE << il->hw_params.rx_page_order,
1047 PCI_DMA_FROMDEVICE);
1048
1049 if (unlikely(pci_dma_mapping_error(il->pci_dev, page_dma))) {
1050 __free_pages(page, il->hw_params.rx_page_order);
1051 break;
1052 }
1053
1054 spin_lock_irqsave(&rxq->lock, flags);
1055
1056 if (list_empty(&rxq->rx_used)) {
1057 spin_unlock_irqrestore(&rxq->lock, flags);
1058 pci_unmap_page(il->pci_dev, page_dma,
1059 PAGE_SIZE << il->hw_params.rx_page_order,
1060 PCI_DMA_FROMDEVICE);
1061 __free_pages(page, il->hw_params.rx_page_order);
1062 return;
1063 }
1064
1065 element = rxq->rx_used.next;
1066 rxb = list_entry(element, struct il_rx_buf, list);
1067 list_del(element);
1068
1069 rxb->page = page;
1070 rxb->page_dma = page_dma;
1071 list_add_tail(&rxb->list, &rxq->rx_free);
1072 rxq->free_count++;
1073 il->alloc_rxb_page++;
1074
1075 spin_unlock_irqrestore(&rxq->lock, flags);
1076 }
1077}
1078
1079void
1080il3945_rx_queue_reset(struct il_priv *il, struct il_rx_queue *rxq)
1081{
1082 unsigned long flags;
1083 int i;
1084 spin_lock_irqsave(&rxq->lock, flags);
1085 INIT_LIST_HEAD(&rxq->rx_free);
1086 INIT_LIST_HEAD(&rxq->rx_used);
1087 /* Fill the rx_used queue with _all_ of the Rx buffers */
1088 for (i = 0; i < RX_FREE_BUFFERS + RX_QUEUE_SIZE; i++) {
1089 /* In the reset function, these buffers may have been allocated
1090 * to an SKB, so we need to unmap and free potential storage */
1091 if (rxq->pool[i].page != NULL) {
1092 pci_unmap_page(il->pci_dev, rxq->pool[i].page_dma,
1093 PAGE_SIZE << il->hw_params.rx_page_order,
1094 PCI_DMA_FROMDEVICE);
1095 __il_free_pages(il, rxq->pool[i].page);
1096 rxq->pool[i].page = NULL;
1097 }
1098 list_add_tail(&rxq->pool[i].list, &rxq->rx_used);
1099 }
1100
1101 /* Set us so that we have processed and used all buffers, but have
1102 * not restocked the Rx queue with fresh buffers */
1103 rxq->read = rxq->write = 0;
1104 rxq->write_actual = 0;
1105 rxq->free_count = 0;
1106 spin_unlock_irqrestore(&rxq->lock, flags);
1107}
1108
1109void
1110il3945_rx_replenish(void *data)
1111{
1112 struct il_priv *il = data;
1113 unsigned long flags;
1114
1115 il3945_rx_allocate(il, GFP_KERNEL);
1116
1117 spin_lock_irqsave(&il->lock, flags);
1118 il3945_rx_queue_restock(il);
1119 spin_unlock_irqrestore(&il->lock, flags);
1120}
1121
1122static void
1123il3945_rx_replenish_now(struct il_priv *il)
1124{
1125 il3945_rx_allocate(il, GFP_ATOMIC);
1126
1127 il3945_rx_queue_restock(il);
1128}
1129
1130/* Assumes that the skb field of the buffers in 'pool' is kept accurate.
1131 * If an SKB has been detached, the POOL needs to have its SKB set to NULL
1132 * This free routine walks the list of POOL entries and if SKB is set to
1133 * non NULL it is unmapped and freed
1134 */
1135static void
1136il3945_rx_queue_free(struct il_priv *il, struct il_rx_queue *rxq)
1137{
1138 int i;
1139 for (i = 0; i < RX_QUEUE_SIZE + RX_FREE_BUFFERS; i++) {
1140 if (rxq->pool[i].page != NULL) {
1141 pci_unmap_page(il->pci_dev, rxq->pool[i].page_dma,
1142 PAGE_SIZE << il->hw_params.rx_page_order,
1143 PCI_DMA_FROMDEVICE);
1144 __il_free_pages(il, rxq->pool[i].page);
1145 rxq->pool[i].page = NULL;
1146 }
1147 }
1148
1149 dma_free_coherent(&il->pci_dev->dev, 4 * RX_QUEUE_SIZE, rxq->bd,
1150 rxq->bd_dma);
1151 dma_free_coherent(&il->pci_dev->dev, sizeof(struct il_rb_status),
1152 rxq->rb_stts, rxq->rb_stts_dma);
1153 rxq->bd = NULL;
1154 rxq->rb_stts = NULL;
1155}
1156
1157/* Convert linear signal-to-noise ratio into dB */
1158static u8 ratio2dB[100] = {
1159/* 0 1 2 3 4 5 6 7 8 9 */
1160 0, 0, 6, 10, 12, 14, 16, 17, 18, 19, /* 00 - 09 */
1161 20, 21, 22, 22, 23, 23, 24, 25, 26, 26, /* 10 - 19 */
1162 26, 26, 26, 27, 27, 28, 28, 28, 29, 29, /* 20 - 29 */
1163 29, 30, 30, 30, 31, 31, 31, 31, 32, 32, /* 30 - 39 */
1164 32, 32, 32, 33, 33, 33, 33, 33, 34, 34, /* 40 - 49 */
1165 34, 34, 34, 34, 35, 35, 35, 35, 35, 35, /* 50 - 59 */
1166 36, 36, 36, 36, 36, 36, 36, 37, 37, 37, /* 60 - 69 */
1167 37, 37, 37, 37, 37, 38, 38, 38, 38, 38, /* 70 - 79 */
1168 38, 38, 38, 38, 38, 39, 39, 39, 39, 39, /* 80 - 89 */
1169 39, 39, 39, 39, 39, 40, 40, 40, 40, 40 /* 90 - 99 */
1170};
1171
1172/* Calculates a relative dB value from a ratio of linear
1173 * (i.e. not dB) signal levels.
1174 * Conversion assumes that levels are voltages (20*log), not powers (10*log). */
1175int
1176il3945_calc_db_from_ratio(int sig_ratio)
1177{
1178 /* 1000:1 or higher just report as 60 dB */
1179 if (sig_ratio >= 1000)
1180 return 60;
1181
1182 /* 100:1 or higher, divide by 10 and use table,
1183 * add 20 dB to make up for divide by 10 */
1184 if (sig_ratio >= 100)
1185 return 20 + (int)ratio2dB[sig_ratio / 10];
1186
1187 /* We shouldn't see this */
1188 if (sig_ratio < 1)
1189 return 0;
1190
1191 /* Use table for ratios 1:1 - 99:1 */
1192 return (int)ratio2dB[sig_ratio];
1193}
1194
1195/**
1196 * il3945_rx_handle - Main entry function for receiving responses from uCode
1197 *
1198 * Uses the il->handlers callback function array to invoke
1199 * the appropriate handlers, including command responses,
1200 * frame-received notifications, and other notifications.
1201 */
1202static void
1203il3945_rx_handle(struct il_priv *il)
1204{
1205 struct il_rx_buf *rxb;
1206 struct il_rx_pkt *pkt;
1207 struct il_rx_queue *rxq = &il->rxq;
1208 u32 r, i;
1209 int reclaim;
1210 unsigned long flags;
1211 u8 fill_rx = 0;
1212 u32 count = 8;
1213 int total_empty = 0;
1214
1215 /* uCode's read idx (stored in shared DRAM) indicates the last Rx
1216 * buffer that the driver may process (last buffer filled by ucode). */
1217 r = le16_to_cpu(rxq->rb_stts->closed_rb_num) & 0x0FFF;
1218 i = rxq->read;
1219
1220 /* calculate total frames need to be restock after handling RX */
1221 total_empty = r - rxq->write_actual;
1222 if (total_empty < 0)
1223 total_empty += RX_QUEUE_SIZE;
1224
1225 if (total_empty > (RX_QUEUE_SIZE / 2))
1226 fill_rx = 1;
1227 /* Rx interrupt, but nothing sent from uCode */
1228 if (i == r)
1229 D_RX("r = %d, i = %d\n", r, i);
1230
1231 while (i != r) {
1232 int len;
1233
1234 rxb = rxq->queue[i];
1235
1236 /* If an RXB doesn't have a Rx queue slot associated with it,
1237 * then a bug has been introduced in the queue refilling
1238 * routines -- catch it here */
1239 BUG_ON(rxb == NULL);
1240
1241 rxq->queue[i] = NULL;
1242
1243 pci_unmap_page(il->pci_dev, rxb->page_dma,
1244 PAGE_SIZE << il->hw_params.rx_page_order,
1245 PCI_DMA_FROMDEVICE);
1246 pkt = rxb_addr(rxb);
1247
1248 len = le32_to_cpu(pkt->len_n_flags) & IL_RX_FRAME_SIZE_MSK;
1249 len += sizeof(u32); /* account for status word */
1250
1251 reclaim = il_need_reclaim(il, pkt);
1252
1253 /* Based on type of command response or notification,
1254 * handle those that need handling via function in
1255 * handlers table. See il3945_setup_handlers() */
1256 if (il->handlers[pkt->hdr.cmd]) {
1257 D_RX("r = %d, i = %d, %s, 0x%02x\n", r, i,
1258 il_get_cmd_string(pkt->hdr.cmd), pkt->hdr.cmd);
1259 il->isr_stats.handlers[pkt->hdr.cmd]++;
1260 il->handlers[pkt->hdr.cmd] (il, rxb);
1261 } else {
1262 /* No handling needed */
1263 D_RX("r %d i %d No handler needed for %s, 0x%02x\n", r,
1264 i, il_get_cmd_string(pkt->hdr.cmd), pkt->hdr.cmd);
1265 }
1266
1267 /*
1268 * XXX: After here, we should always check rxb->page
1269 * against NULL before touching it or its virtual
1270 * memory (pkt). Because some handler might have
1271 * already taken or freed the pages.
1272 */
1273
1274 if (reclaim) {
1275 /* Invoke any callbacks, transfer the buffer to caller,
1276 * and fire off the (possibly) blocking il_send_cmd()
1277 * as we reclaim the driver command queue */
1278 if (rxb->page)
1279 il_tx_cmd_complete(il, rxb);
1280 else
1281 IL_WARN("Claim null rxb?\n");
1282 }
1283
1284 /* Reuse the page if possible. For notification packets and
1285 * SKBs that fail to Rx correctly, add them back into the
1286 * rx_free list for reuse later. */
1287 spin_lock_irqsave(&rxq->lock, flags);
1288 if (rxb->page != NULL) {
1289 rxb->page_dma =
1290 pci_map_page(il->pci_dev, rxb->page, 0,
1291 PAGE_SIZE << il->hw_params.
1292 rx_page_order, PCI_DMA_FROMDEVICE);
1293 if (unlikely(pci_dma_mapping_error(il->pci_dev,
1294 rxb->page_dma))) {
1295 __il_free_pages(il, rxb->page);
1296 rxb->page = NULL;
1297 list_add_tail(&rxb->list, &rxq->rx_used);
1298 } else {
1299 list_add_tail(&rxb->list, &rxq->rx_free);
1300 rxq->free_count++;
1301 }
1302 } else
1303 list_add_tail(&rxb->list, &rxq->rx_used);
1304
1305 spin_unlock_irqrestore(&rxq->lock, flags);
1306
1307 i = (i + 1) & RX_QUEUE_MASK;
1308 /* If there are a lot of unused frames,
1309 * restock the Rx queue so ucode won't assert. */
1310 if (fill_rx) {
1311 count++;
1312 if (count >= 8) {
1313 rxq->read = i;
1314 il3945_rx_replenish_now(il);
1315 count = 0;
1316 }
1317 }
1318 }
1319
1320 /* Backtrack one entry */
1321 rxq->read = i;
1322 if (fill_rx)
1323 il3945_rx_replenish_now(il);
1324 else
1325 il3945_rx_queue_restock(il);
1326}
1327
1328/* call this function to flush any scheduled tasklet */
1329static inline void
1330il3945_synchronize_irq(struct il_priv *il)
1331{
1332 /* wait to make sure we flush pending tasklet */
1333 synchronize_irq(il->pci_dev->irq);
1334 tasklet_kill(&il->irq_tasklet);
1335}
1336
1337static const char *
1338il3945_desc_lookup(int i)
1339{
1340 switch (i) {
1341 case 1:
1342 return "FAIL";
1343 case 2:
1344 return "BAD_PARAM";
1345 case 3:
1346 return "BAD_CHECKSUM";
1347 case 4:
1348 return "NMI_INTERRUPT";
1349 case 5:
1350 return "SYSASSERT";
1351 case 6:
1352 return "FATAL_ERROR";
1353 }
1354
1355 return "UNKNOWN";
1356}
1357
1358#define ERROR_START_OFFSET (1 * sizeof(u32))
1359#define ERROR_ELEM_SIZE (7 * sizeof(u32))
1360
1361void
1362il3945_dump_nic_error_log(struct il_priv *il)
1363{
1364 u32 i;
1365 u32 desc, time, count, base, data1;
1366 u32 blink1, blink2, ilink1, ilink2;
1367
1368 base = le32_to_cpu(il->card_alive.error_event_table_ptr);
1369
1370 if (!il3945_hw_valid_rtc_data_addr(base)) {
1371 IL_ERR("Not valid error log pointer 0x%08X\n", base);
1372 return;
1373 }
1374
1375 count = il_read_targ_mem(il, base);
1376
1377 if (ERROR_START_OFFSET <= count * ERROR_ELEM_SIZE) {
1378 IL_ERR("Start IWL Error Log Dump:\n");
1379 IL_ERR("Status: 0x%08lX, count: %d\n", il->status, count);
1380 }
1381
1382 IL_ERR("Desc Time asrtPC blink2 "
1383 "ilink1 nmiPC Line\n");
1384 for (i = ERROR_START_OFFSET;
1385 i < (count * ERROR_ELEM_SIZE) + ERROR_START_OFFSET;
1386 i += ERROR_ELEM_SIZE) {
1387 desc = il_read_targ_mem(il, base + i);
1388 time = il_read_targ_mem(il, base + i + 1 * sizeof(u32));
1389 blink1 = il_read_targ_mem(il, base + i + 2 * sizeof(u32));
1390 blink2 = il_read_targ_mem(il, base + i + 3 * sizeof(u32));
1391 ilink1 = il_read_targ_mem(il, base + i + 4 * sizeof(u32));
1392 ilink2 = il_read_targ_mem(il, base + i + 5 * sizeof(u32));
1393 data1 = il_read_targ_mem(il, base + i + 6 * sizeof(u32));
1394
1395 IL_ERR("%-13s (0x%X) %010u 0x%05X 0x%05X 0x%05X 0x%05X %u\n\n",
1396 il3945_desc_lookup(desc), desc, time, blink1, blink2,
1397 ilink1, ilink2, data1);
1398 }
1399}
1400
1401static void
1402il3945_irq_tasklet(struct il_priv *il)
1403{
1404 u32 inta, handled = 0;
1405 u32 inta_fh;
1406 unsigned long flags;
1407#ifdef CONFIG_IWLEGACY_DEBUG
1408 u32 inta_mask;
1409#endif
1410
1411 spin_lock_irqsave(&il->lock, flags);
1412
1413 /* Ack/clear/reset pending uCode interrupts.
1414 * Note: Some bits in CSR_INT are "OR" of bits in CSR_FH_INT_STATUS,
1415 * and will clear only when CSR_FH_INT_STATUS gets cleared. */
1416 inta = _il_rd(il, CSR_INT);
1417 _il_wr(il, CSR_INT, inta);
1418
1419 /* Ack/clear/reset pending flow-handler (DMA) interrupts.
1420 * Any new interrupts that happen after this, either while we're
1421 * in this tasklet, or later, will show up in next ISR/tasklet. */
1422 inta_fh = _il_rd(il, CSR_FH_INT_STATUS);
1423 _il_wr(il, CSR_FH_INT_STATUS, inta_fh);
1424
1425#ifdef CONFIG_IWLEGACY_DEBUG
1426 if (il_get_debug_level(il) & IL_DL_ISR) {
1427 /* just for debug */
1428 inta_mask = _il_rd(il, CSR_INT_MASK);
1429 D_ISR("inta 0x%08x, enabled 0x%08x, fh 0x%08x\n", inta,
1430 inta_mask, inta_fh);
1431 }
1432#endif
1433
1434 spin_unlock_irqrestore(&il->lock, flags);
1435
1436 /* Since CSR_INT and CSR_FH_INT_STATUS reads and clears are not
1437 * atomic, make sure that inta covers all the interrupts that
1438 * we've discovered, even if FH interrupt came in just after
1439 * reading CSR_INT. */
1440 if (inta_fh & CSR39_FH_INT_RX_MASK)
1441 inta |= CSR_INT_BIT_FH_RX;
1442 if (inta_fh & CSR39_FH_INT_TX_MASK)
1443 inta |= CSR_INT_BIT_FH_TX;
1444
1445 /* Now service all interrupt bits discovered above. */
1446 if (inta & CSR_INT_BIT_HW_ERR) {
1447 IL_ERR("Hardware error detected. Restarting.\n");
1448
1449 /* Tell the device to stop sending interrupts */
1450 il_disable_interrupts(il);
1451
1452 il->isr_stats.hw++;
1453 il_irq_handle_error(il);
1454
1455 handled |= CSR_INT_BIT_HW_ERR;
1456
1457 return;
1458 }
1459#ifdef CONFIG_IWLEGACY_DEBUG
1460 if (il_get_debug_level(il) & (IL_DL_ISR)) {
1461 /* NIC fires this, but we don't use it, redundant with WAKEUP */
1462 if (inta & CSR_INT_BIT_SCD) {
1463 D_ISR("Scheduler finished to transmit "
1464 "the frame/frames.\n");
1465 il->isr_stats.sch++;
1466 }
1467
1468 /* Alive notification via Rx interrupt will do the real work */
1469 if (inta & CSR_INT_BIT_ALIVE) {
1470 D_ISR("Alive interrupt\n");
1471 il->isr_stats.alive++;
1472 }
1473 }
1474#endif
1475 /* Safely ignore these bits for debug checks below */
1476 inta &= ~(CSR_INT_BIT_SCD | CSR_INT_BIT_ALIVE);
1477
1478 /* Error detected by uCode */
1479 if (inta & CSR_INT_BIT_SW_ERR) {
1480 IL_ERR("Microcode SW error detected. " "Restarting 0x%X.\n",
1481 inta);
1482 il->isr_stats.sw++;
1483 il_irq_handle_error(il);
1484 handled |= CSR_INT_BIT_SW_ERR;
1485 }
1486
1487 /* uCode wakes up after power-down sleep */
1488 if (inta & CSR_INT_BIT_WAKEUP) {
1489 D_ISR("Wakeup interrupt\n");
1490 il_rx_queue_update_write_ptr(il, &il->rxq);
1491
1492 spin_lock_irqsave(&il->lock, flags);
1493 il_txq_update_write_ptr(il, &il->txq[0]);
1494 il_txq_update_write_ptr(il, &il->txq[1]);
1495 il_txq_update_write_ptr(il, &il->txq[2]);
1496 il_txq_update_write_ptr(il, &il->txq[3]);
1497 il_txq_update_write_ptr(il, &il->txq[4]);
1498 spin_unlock_irqrestore(&il->lock, flags);
1499
1500 il->isr_stats.wakeup++;
1501 handled |= CSR_INT_BIT_WAKEUP;
1502 }
1503
1504 /* All uCode command responses, including Tx command responses,
1505 * Rx "responses" (frame-received notification), and other
1506 * notifications from uCode come through here*/
1507 if (inta & (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX)) {
1508 il3945_rx_handle(il);
1509 il->isr_stats.rx++;
1510 handled |= (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX);
1511 }
1512
1513 if (inta & CSR_INT_BIT_FH_TX) {
1514 D_ISR("Tx interrupt\n");
1515 il->isr_stats.tx++;
1516
1517 _il_wr(il, CSR_FH_INT_STATUS, (1 << 6));
1518 il_wr(il, FH39_TCSR_CREDIT(FH39_SRVC_CHNL), 0x0);
1519 handled |= CSR_INT_BIT_FH_TX;
1520 }
1521
1522 if (inta & ~handled) {
1523 IL_ERR("Unhandled INTA bits 0x%08x\n", inta & ~handled);
1524 il->isr_stats.unhandled++;
1525 }
1526
1527 if (inta & ~il->inta_mask) {
1528 IL_WARN("Disabled INTA bits 0x%08x were pending\n",
1529 inta & ~il->inta_mask);
1530 IL_WARN(" with inta_fh = 0x%08x\n", inta_fh);
1531 }
1532
1533 /* Re-enable all interrupts */
1534 /* only Re-enable if disabled by irq */
1535 if (test_bit(S_INT_ENABLED, &il->status))
1536 il_enable_interrupts(il);
1537
1538#ifdef CONFIG_IWLEGACY_DEBUG
1539 if (il_get_debug_level(il) & (IL_DL_ISR)) {
1540 inta = _il_rd(il, CSR_INT);
1541 inta_mask = _il_rd(il, CSR_INT_MASK);
1542 inta_fh = _il_rd(il, CSR_FH_INT_STATUS);
1543 D_ISR("End inta 0x%08x, enabled 0x%08x, fh 0x%08x, "
1544 "flags 0x%08lx\n", inta, inta_mask, inta_fh, flags);
1545 }
1546#endif
1547}
1548
1549static int
1550il3945_get_channels_for_scan(struct il_priv *il, enum ieee80211_band band,
1551 u8 is_active, u8 n_probes,
1552 struct il3945_scan_channel *scan_ch,
1553 struct ieee80211_vif *vif)
1554{
1555 struct ieee80211_channel *chan;
1556 const struct ieee80211_supported_band *sband;
1557 const struct il_channel_info *ch_info;
1558 u16 passive_dwell = 0;
1559 u16 active_dwell = 0;
1560 int added, i;
1561
1562 sband = il_get_hw_mode(il, band);
1563 if (!sband)
1564 return 0;
1565
1566 active_dwell = il_get_active_dwell_time(il, band, n_probes);
1567 passive_dwell = il_get_passive_dwell_time(il, band, vif);
1568
1569 if (passive_dwell <= active_dwell)
1570 passive_dwell = active_dwell + 1;
1571
1572 for (i = 0, added = 0; i < il->scan_request->n_channels; i++) {
1573 chan = il->scan_request->channels[i];
1574
1575 if (chan->band != band)
1576 continue;
1577
1578 scan_ch->channel = chan->hw_value;
1579
1580 ch_info = il_get_channel_info(il, band, scan_ch->channel);
1581 if (!il_is_channel_valid(ch_info)) {
1582 D_SCAN("Channel %d is INVALID for this band.\n",
1583 scan_ch->channel);
1584 continue;
1585 }
1586
1587 scan_ch->active_dwell = cpu_to_le16(active_dwell);
1588 scan_ch->passive_dwell = cpu_to_le16(passive_dwell);
1589 /* If passive , set up for auto-switch
1590 * and use long active_dwell time.
1591 */
1592 if (!is_active || il_is_channel_passive(ch_info) ||
1593 (chan->flags & IEEE80211_CHAN_NO_IR)) {
1594 scan_ch->type = 0; /* passive */
1595 if (IL_UCODE_API(il->ucode_ver) == 1)
1596 scan_ch->active_dwell =
1597 cpu_to_le16(passive_dwell - 1);
1598 } else {
1599 scan_ch->type = 1; /* active */
1600 }
1601
1602 /* Set direct probe bits. These may be used both for active
1603 * scan channels (probes gets sent right away),
1604 * or for passive channels (probes get se sent only after
1605 * hearing clear Rx packet).*/
1606 if (IL_UCODE_API(il->ucode_ver) >= 2) {
1607 if (n_probes)
1608 scan_ch->type |= IL39_SCAN_PROBE_MASK(n_probes);
1609 } else {
1610 /* uCode v1 does not allow setting direct probe bits on
1611 * passive channel. */
1612 if ((scan_ch->type & 1) && n_probes)
1613 scan_ch->type |= IL39_SCAN_PROBE_MASK(n_probes);
1614 }
1615
1616 /* Set txpower levels to defaults */
1617 scan_ch->tpc.dsp_atten = 110;
1618 /* scan_pwr_info->tpc.dsp_atten; */
1619
1620 /*scan_pwr_info->tpc.tx_gain; */
1621 if (band == IEEE80211_BAND_5GHZ)
1622 scan_ch->tpc.tx_gain = ((1 << 5) | (3 << 3)) | 3;
1623 else {
1624 scan_ch->tpc.tx_gain = ((1 << 5) | (5 << 3));
1625 /* NOTE: if we were doing 6Mb OFDM for scans we'd use
1626 * power level:
1627 * scan_ch->tpc.tx_gain = ((1 << 5) | (2 << 3)) | 3;
1628 */
1629 }
1630
1631 D_SCAN("Scanning %d [%s %d]\n", scan_ch->channel,
1632 (scan_ch->type & 1) ? "ACTIVE" : "PASSIVE",
1633 (scan_ch->type & 1) ? active_dwell : passive_dwell);
1634
1635 scan_ch++;
1636 added++;
1637 }
1638
1639 D_SCAN("total channels to scan %d\n", added);
1640 return added;
1641}
1642
1643static void
1644il3945_init_hw_rates(struct il_priv *il, struct ieee80211_rate *rates)
1645{
1646 int i;
1647
1648 for (i = 0; i < RATE_COUNT_LEGACY; i++) {
1649 rates[i].bitrate = il3945_rates[i].ieee * 5;
1650 rates[i].hw_value = i; /* Rate scaling will work on idxes */
1651 rates[i].hw_value_short = i;
1652 rates[i].flags = 0;
1653 if (i > IL39_LAST_OFDM_RATE || i < IL_FIRST_OFDM_RATE) {
1654 /*
1655 * If CCK != 1M then set short preamble rate flag.
1656 */
1657 rates[i].flags |=
1658 (il3945_rates[i].plcp ==
1659 10) ? 0 : IEEE80211_RATE_SHORT_PREAMBLE;
1660 }
1661 }
1662}
1663
1664/******************************************************************************
1665 *
1666 * uCode download functions
1667 *
1668 ******************************************************************************/
1669
1670static void
1671il3945_dealloc_ucode_pci(struct il_priv *il)
1672{
1673 il_free_fw_desc(il->pci_dev, &il->ucode_code);
1674 il_free_fw_desc(il->pci_dev, &il->ucode_data);
1675 il_free_fw_desc(il->pci_dev, &il->ucode_data_backup);
1676 il_free_fw_desc(il->pci_dev, &il->ucode_init);
1677 il_free_fw_desc(il->pci_dev, &il->ucode_init_data);
1678 il_free_fw_desc(il->pci_dev, &il->ucode_boot);
1679}
1680
1681/**
1682 * il3945_verify_inst_full - verify runtime uCode image in card vs. host,
1683 * looking at all data.
1684 */
1685static int
1686il3945_verify_inst_full(struct il_priv *il, __le32 * image, u32 len)
1687{
1688 u32 val;
1689 u32 save_len = len;
1690 int rc = 0;
1691 u32 errcnt;
1692
1693 D_INFO("ucode inst image size is %u\n", len);
1694
1695 il_wr(il, HBUS_TARG_MEM_RADDR, IL39_RTC_INST_LOWER_BOUND);
1696
1697 errcnt = 0;
1698 for (; len > 0; len -= sizeof(u32), image++) {
1699 /* read data comes through single port, auto-incr addr */
1700 /* NOTE: Use the debugless read so we don't flood kernel log
1701 * if IL_DL_IO is set */
1702 val = _il_rd(il, HBUS_TARG_MEM_RDAT);
1703 if (val != le32_to_cpu(*image)) {
1704 IL_ERR("uCode INST section is invalid at "
1705 "offset 0x%x, is 0x%x, s/b 0x%x\n",
1706 save_len - len, val, le32_to_cpu(*image));
1707 rc = -EIO;
1708 errcnt++;
1709 if (errcnt >= 20)
1710 break;
1711 }
1712 }
1713
1714 if (!errcnt)
1715 D_INFO("ucode image in INSTRUCTION memory is good\n");
1716
1717 return rc;
1718}
1719
1720/**
1721 * il3945_verify_inst_sparse - verify runtime uCode image in card vs. host,
1722 * using sample data 100 bytes apart. If these sample points are good,
1723 * it's a pretty good bet that everything between them is good, too.
1724 */
1725static int
1726il3945_verify_inst_sparse(struct il_priv *il, __le32 * image, u32 len)
1727{
1728 u32 val;
1729 int rc = 0;
1730 u32 errcnt = 0;
1731 u32 i;
1732
1733 D_INFO("ucode inst image size is %u\n", len);
1734
1735 for (i = 0; i < len; i += 100, image += 100 / sizeof(u32)) {
1736 /* read data comes through single port, auto-incr addr */
1737 /* NOTE: Use the debugless read so we don't flood kernel log
1738 * if IL_DL_IO is set */
1739 il_wr(il, HBUS_TARG_MEM_RADDR, i + IL39_RTC_INST_LOWER_BOUND);
1740 val = _il_rd(il, HBUS_TARG_MEM_RDAT);
1741 if (val != le32_to_cpu(*image)) {
1742#if 0 /* Enable this if you want to see details */
1743 IL_ERR("uCode INST section is invalid at "
1744 "offset 0x%x, is 0x%x, s/b 0x%x\n", i, val,
1745 *image);
1746#endif
1747 rc = -EIO;
1748 errcnt++;
1749 if (errcnt >= 3)
1750 break;
1751 }
1752 }
1753
1754 return rc;
1755}
1756
1757/**
1758 * il3945_verify_ucode - determine which instruction image is in SRAM,
1759 * and verify its contents
1760 */
1761static int
1762il3945_verify_ucode(struct il_priv *il)
1763{
1764 __le32 *image;
1765 u32 len;
1766 int rc = 0;
1767
1768 /* Try bootstrap */
1769 image = (__le32 *) il->ucode_boot.v_addr;
1770 len = il->ucode_boot.len;
1771 rc = il3945_verify_inst_sparse(il, image, len);
1772 if (rc == 0) {
1773 D_INFO("Bootstrap uCode is good in inst SRAM\n");
1774 return 0;
1775 }
1776
1777 /* Try initialize */
1778 image = (__le32 *) il->ucode_init.v_addr;
1779 len = il->ucode_init.len;
1780 rc = il3945_verify_inst_sparse(il, image, len);
1781 if (rc == 0) {
1782 D_INFO("Initialize uCode is good in inst SRAM\n");
1783 return 0;
1784 }
1785
1786 /* Try runtime/protocol */
1787 image = (__le32 *) il->ucode_code.v_addr;
1788 len = il->ucode_code.len;
1789 rc = il3945_verify_inst_sparse(il, image, len);
1790 if (rc == 0) {
1791 D_INFO("Runtime uCode is good in inst SRAM\n");
1792 return 0;
1793 }
1794
1795 IL_ERR("NO VALID UCODE IMAGE IN INSTRUCTION SRAM!!\n");
1796
1797 /* Since nothing seems to match, show first several data entries in
1798 * instruction SRAM, so maybe visual inspection will give a clue.
1799 * Selection of bootstrap image (vs. other images) is arbitrary. */
1800 image = (__le32 *) il->ucode_boot.v_addr;
1801 len = il->ucode_boot.len;
1802 rc = il3945_verify_inst_full(il, image, len);
1803
1804 return rc;
1805}
1806
1807static void
1808il3945_nic_start(struct il_priv *il)
1809{
1810 /* Remove all resets to allow NIC to operate */
1811 _il_wr(il, CSR_RESET, 0);
1812}
1813
1814#define IL3945_UCODE_GET(item) \
1815static u32 il3945_ucode_get_##item(const struct il_ucode_header *ucode)\
1816{ \
1817 return le32_to_cpu(ucode->v1.item); \
1818}
1819
1820static u32
1821il3945_ucode_get_header_size(u32 api_ver)
1822{
1823 return 24;
1824}
1825
1826static u8 *
1827il3945_ucode_get_data(const struct il_ucode_header *ucode)
1828{
1829 return (u8 *) ucode->v1.data;
1830}
1831
1832IL3945_UCODE_GET(inst_size);
1833IL3945_UCODE_GET(data_size);
1834IL3945_UCODE_GET(init_size);
1835IL3945_UCODE_GET(init_data_size);
1836IL3945_UCODE_GET(boot_size);
1837
1838/**
1839 * il3945_read_ucode - Read uCode images from disk file.
1840 *
1841 * Copy into buffers for card to fetch via bus-mastering
1842 */
1843static int
1844il3945_read_ucode(struct il_priv *il)
1845{
1846 const struct il_ucode_header *ucode;
1847 int ret = -EINVAL, idx;
1848 const struct firmware *ucode_raw;
1849 /* firmware file name contains uCode/driver compatibility version */
1850 const char *name_pre = il->cfg->fw_name_pre;
1851 const unsigned int api_max = il->cfg->ucode_api_max;
1852 const unsigned int api_min = il->cfg->ucode_api_min;
1853 char buf[25];
1854 u8 *src;
1855 size_t len;
1856 u32 api_ver, inst_size, data_size, init_size, init_data_size, boot_size;
1857
1858 /* Ask kernel firmware_class module to get the boot firmware off disk.
1859 * request_firmware() is synchronous, file is in memory on return. */
1860 for (idx = api_max; idx >= api_min; idx--) {
1861 sprintf(buf, "%s%u%s", name_pre, idx, ".ucode");
1862 ret = request_firmware(&ucode_raw, buf, &il->pci_dev->dev);
1863 if (ret < 0) {
1864 IL_ERR("%s firmware file req failed: %d\n", buf, ret);
1865 if (ret == -ENOENT)
1866 continue;
1867 else
1868 goto error;
1869 } else {
1870 if (idx < api_max)
1871 IL_ERR("Loaded firmware %s, "
1872 "which is deprecated. "
1873 " Please use API v%u instead.\n", buf,
1874 api_max);
1875 D_INFO("Got firmware '%s' file "
1876 "(%zd bytes) from disk\n", buf, ucode_raw->size);
1877 break;
1878 }
1879 }
1880
1881 if (ret < 0)
1882 goto error;
1883
1884 /* Make sure that we got at least our header! */
1885 if (ucode_raw->size < il3945_ucode_get_header_size(1)) {
1886 IL_ERR("File size way too small!\n");
1887 ret = -EINVAL;
1888 goto err_release;
1889 }
1890
1891 /* Data from ucode file: header followed by uCode images */
1892 ucode = (struct il_ucode_header *)ucode_raw->data;
1893
1894 il->ucode_ver = le32_to_cpu(ucode->ver);
1895 api_ver = IL_UCODE_API(il->ucode_ver);
1896 inst_size = il3945_ucode_get_inst_size(ucode);
1897 data_size = il3945_ucode_get_data_size(ucode);
1898 init_size = il3945_ucode_get_init_size(ucode);
1899 init_data_size = il3945_ucode_get_init_data_size(ucode);
1900 boot_size = il3945_ucode_get_boot_size(ucode);
1901 src = il3945_ucode_get_data(ucode);
1902
1903 /* api_ver should match the api version forming part of the
1904 * firmware filename ... but we don't check for that and only rely
1905 * on the API version read from firmware header from here on forward */
1906
1907 if (api_ver < api_min || api_ver > api_max) {
1908 IL_ERR("Driver unable to support your firmware API. "
1909 "Driver supports v%u, firmware is v%u.\n", api_max,
1910 api_ver);
1911 il->ucode_ver = 0;
1912 ret = -EINVAL;
1913 goto err_release;
1914 }
1915 if (api_ver != api_max)
1916 IL_ERR("Firmware has old API version. Expected %u, "
1917 "got %u. New firmware can be obtained "
1918 "from http://www.intellinuxwireless.org.\n", api_max,
1919 api_ver);
1920
1921 IL_INFO("loaded firmware version %u.%u.%u.%u\n",
1922 IL_UCODE_MAJOR(il->ucode_ver), IL_UCODE_MINOR(il->ucode_ver),
1923 IL_UCODE_API(il->ucode_ver), IL_UCODE_SERIAL(il->ucode_ver));
1924
1925 snprintf(il->hw->wiphy->fw_version, sizeof(il->hw->wiphy->fw_version),
1926 "%u.%u.%u.%u", IL_UCODE_MAJOR(il->ucode_ver),
1927 IL_UCODE_MINOR(il->ucode_ver), IL_UCODE_API(il->ucode_ver),
1928 IL_UCODE_SERIAL(il->ucode_ver));
1929
1930 D_INFO("f/w package hdr ucode version raw = 0x%x\n", il->ucode_ver);
1931 D_INFO("f/w package hdr runtime inst size = %u\n", inst_size);
1932 D_INFO("f/w package hdr runtime data size = %u\n", data_size);
1933 D_INFO("f/w package hdr init inst size = %u\n", init_size);
1934 D_INFO("f/w package hdr init data size = %u\n", init_data_size);
1935 D_INFO("f/w package hdr boot inst size = %u\n", boot_size);
1936
1937 /* Verify size of file vs. image size info in file's header */
1938 if (ucode_raw->size !=
1939 il3945_ucode_get_header_size(api_ver) + inst_size + data_size +
1940 init_size + init_data_size + boot_size) {
1941
1942 D_INFO("uCode file size %zd does not match expected size\n",
1943 ucode_raw->size);
1944 ret = -EINVAL;
1945 goto err_release;
1946 }
1947
1948 /* Verify that uCode images will fit in card's SRAM */
1949 if (inst_size > IL39_MAX_INST_SIZE) {
1950 D_INFO("uCode instr len %d too large to fit in\n", inst_size);
1951 ret = -EINVAL;
1952 goto err_release;
1953 }
1954
1955 if (data_size > IL39_MAX_DATA_SIZE) {
1956 D_INFO("uCode data len %d too large to fit in\n", data_size);
1957 ret = -EINVAL;
1958 goto err_release;
1959 }
1960 if (init_size > IL39_MAX_INST_SIZE) {
1961 D_INFO("uCode init instr len %d too large to fit in\n",
1962 init_size);
1963 ret = -EINVAL;
1964 goto err_release;
1965 }
1966 if (init_data_size > IL39_MAX_DATA_SIZE) {
1967 D_INFO("uCode init data len %d too large to fit in\n",
1968 init_data_size);
1969 ret = -EINVAL;
1970 goto err_release;
1971 }
1972 if (boot_size > IL39_MAX_BSM_SIZE) {
1973 D_INFO("uCode boot instr len %d too large to fit in\n",
1974 boot_size);
1975 ret = -EINVAL;
1976 goto err_release;
1977 }
1978
1979 /* Allocate ucode buffers for card's bus-master loading ... */
1980
1981 /* Runtime instructions and 2 copies of data:
1982 * 1) unmodified from disk
1983 * 2) backup cache for save/restore during power-downs */
1984 il->ucode_code.len = inst_size;
1985 il_alloc_fw_desc(il->pci_dev, &il->ucode_code);
1986
1987 il->ucode_data.len = data_size;
1988 il_alloc_fw_desc(il->pci_dev, &il->ucode_data);
1989
1990 il->ucode_data_backup.len = data_size;
1991 il_alloc_fw_desc(il->pci_dev, &il->ucode_data_backup);
1992
1993 if (!il->ucode_code.v_addr || !il->ucode_data.v_addr ||
1994 !il->ucode_data_backup.v_addr)
1995 goto err_pci_alloc;
1996
1997 /* Initialization instructions and data */
1998 if (init_size && init_data_size) {
1999 il->ucode_init.len = init_size;
2000 il_alloc_fw_desc(il->pci_dev, &il->ucode_init);
2001
2002 il->ucode_init_data.len = init_data_size;
2003 il_alloc_fw_desc(il->pci_dev, &il->ucode_init_data);
2004
2005 if (!il->ucode_init.v_addr || !il->ucode_init_data.v_addr)
2006 goto err_pci_alloc;
2007 }
2008
2009 /* Bootstrap (instructions only, no data) */
2010 if (boot_size) {
2011 il->ucode_boot.len = boot_size;
2012 il_alloc_fw_desc(il->pci_dev, &il->ucode_boot);
2013
2014 if (!il->ucode_boot.v_addr)
2015 goto err_pci_alloc;
2016 }
2017
2018 /* Copy images into buffers for card's bus-master reads ... */
2019
2020 /* Runtime instructions (first block of data in file) */
2021 len = inst_size;
2022 D_INFO("Copying (but not loading) uCode instr len %zd\n", len);
2023 memcpy(il->ucode_code.v_addr, src, len);
2024 src += len;
2025
2026 D_INFO("uCode instr buf vaddr = 0x%p, paddr = 0x%08x\n",
2027 il->ucode_code.v_addr, (u32) il->ucode_code.p_addr);
2028
2029 /* Runtime data (2nd block)
2030 * NOTE: Copy into backup buffer will be done in il3945_up() */
2031 len = data_size;
2032 D_INFO("Copying (but not loading) uCode data len %zd\n", len);
2033 memcpy(il->ucode_data.v_addr, src, len);
2034 memcpy(il->ucode_data_backup.v_addr, src, len);
2035 src += len;
2036
2037 /* Initialization instructions (3rd block) */
2038 if (init_size) {
2039 len = init_size;
2040 D_INFO("Copying (but not loading) init instr len %zd\n", len);
2041 memcpy(il->ucode_init.v_addr, src, len);
2042 src += len;
2043 }
2044
2045 /* Initialization data (4th block) */
2046 if (init_data_size) {
2047 len = init_data_size;
2048 D_INFO("Copying (but not loading) init data len %zd\n", len);
2049 memcpy(il->ucode_init_data.v_addr, src, len);
2050 src += len;
2051 }
2052
2053 /* Bootstrap instructions (5th block) */
2054 len = boot_size;
2055 D_INFO("Copying (but not loading) boot instr len %zd\n", len);
2056 memcpy(il->ucode_boot.v_addr, src, len);
2057
2058 /* We have our copies now, allow OS release its copies */
2059 release_firmware(ucode_raw);
2060 return 0;
2061
2062err_pci_alloc:
2063 IL_ERR("failed to allocate pci memory\n");
2064 ret = -ENOMEM;
2065 il3945_dealloc_ucode_pci(il);
2066
2067err_release:
2068 release_firmware(ucode_raw);
2069
2070error:
2071 return ret;
2072}
2073
2074/**
2075 * il3945_set_ucode_ptrs - Set uCode address location
2076 *
2077 * Tell initialization uCode where to find runtime uCode.
2078 *
2079 * BSM registers initially contain pointers to initialization uCode.
2080 * We need to replace them to load runtime uCode inst and data,
2081 * and to save runtime data when powering down.
2082 */
2083static int
2084il3945_set_ucode_ptrs(struct il_priv *il)
2085{
2086 dma_addr_t pinst;
2087 dma_addr_t pdata;
2088
2089 /* bits 31:0 for 3945 */
2090 pinst = il->ucode_code.p_addr;
2091 pdata = il->ucode_data_backup.p_addr;
2092
2093 /* Tell bootstrap uCode where to find image to load */
2094 il_wr_prph(il, BSM_DRAM_INST_PTR_REG, pinst);
2095 il_wr_prph(il, BSM_DRAM_DATA_PTR_REG, pdata);
2096 il_wr_prph(il, BSM_DRAM_DATA_BYTECOUNT_REG, il->ucode_data.len);
2097
2098 /* Inst byte count must be last to set up, bit 31 signals uCode
2099 * that all new ptr/size info is in place */
2100 il_wr_prph(il, BSM_DRAM_INST_BYTECOUNT_REG,
2101 il->ucode_code.len | BSM_DRAM_INST_LOAD);
2102
2103 D_INFO("Runtime uCode pointers are set.\n");
2104
2105 return 0;
2106}
2107
2108/**
2109 * il3945_init_alive_start - Called after N_ALIVE notification received
2110 *
2111 * Called after N_ALIVE notification received from "initialize" uCode.
2112 *
2113 * Tell "initialize" uCode to go ahead and load the runtime uCode.
2114 */
2115static void
2116il3945_init_alive_start(struct il_priv *il)
2117{
2118 /* Check alive response for "valid" sign from uCode */
2119 if (il->card_alive_init.is_valid != UCODE_VALID_OK) {
2120 /* We had an error bringing up the hardware, so take it
2121 * all the way back down so we can try again */
2122 D_INFO("Initialize Alive failed.\n");
2123 goto restart;
2124 }
2125
2126 /* Bootstrap uCode has loaded initialize uCode ... verify inst image.
2127 * This is a paranoid check, because we would not have gotten the
2128 * "initialize" alive if code weren't properly loaded. */
2129 if (il3945_verify_ucode(il)) {
2130 /* Runtime instruction load was bad;
2131 * take it all the way back down so we can try again */
2132 D_INFO("Bad \"initialize\" uCode load.\n");
2133 goto restart;
2134 }
2135
2136 /* Send pointers to protocol/runtime uCode image ... init code will
2137 * load and launch runtime uCode, which will send us another "Alive"
2138 * notification. */
2139 D_INFO("Initialization Alive received.\n");
2140 if (il3945_set_ucode_ptrs(il)) {
2141 /* Runtime instruction load won't happen;
2142 * take it all the way back down so we can try again */
2143 D_INFO("Couldn't set up uCode pointers.\n");
2144 goto restart;
2145 }
2146 return;
2147
2148restart:
2149 queue_work(il->workqueue, &il->restart);
2150}
2151
2152/**
2153 * il3945_alive_start - called after N_ALIVE notification received
2154 * from protocol/runtime uCode (initialization uCode's
2155 * Alive gets handled by il3945_init_alive_start()).
2156 */
2157static void
2158il3945_alive_start(struct il_priv *il)
2159{
2160 int thermal_spin = 0;
2161 u32 rfkill;
2162
2163 D_INFO("Runtime Alive received.\n");
2164
2165 if (il->card_alive.is_valid != UCODE_VALID_OK) {
2166 /* We had an error bringing up the hardware, so take it
2167 * all the way back down so we can try again */
2168 D_INFO("Alive failed.\n");
2169 goto restart;
2170 }
2171
2172 /* Initialize uCode has loaded Runtime uCode ... verify inst image.
2173 * This is a paranoid check, because we would not have gotten the
2174 * "runtime" alive if code weren't properly loaded. */
2175 if (il3945_verify_ucode(il)) {
2176 /* Runtime instruction load was bad;
2177 * take it all the way back down so we can try again */
2178 D_INFO("Bad runtime uCode load.\n");
2179 goto restart;
2180 }
2181
2182 rfkill = il_rd_prph(il, APMG_RFKILL_REG);
2183 D_INFO("RFKILL status: 0x%x\n", rfkill);
2184
2185 if (rfkill & 0x1) {
2186 clear_bit(S_RFKILL, &il->status);
2187 /* if RFKILL is not on, then wait for thermal
2188 * sensor in adapter to kick in */
2189 while (il3945_hw_get_temperature(il) == 0) {
2190 thermal_spin++;
2191 udelay(10);
2192 }
2193
2194 if (thermal_spin)
2195 D_INFO("Thermal calibration took %dus\n",
2196 thermal_spin * 10);
2197 } else
2198 set_bit(S_RFKILL, &il->status);
2199
2200 /* After the ALIVE response, we can send commands to 3945 uCode */
2201 set_bit(S_ALIVE, &il->status);
2202
2203 /* Enable watchdog to monitor the driver tx queues */
2204 il_setup_watchdog(il);
2205
2206 if (il_is_rfkill(il))
2207 return;
2208
2209 ieee80211_wake_queues(il->hw);
2210
2211 il->active_rate = RATES_MASK_3945;
2212
2213 il_power_update_mode(il, true);
2214
2215 if (il_is_associated(il)) {
2216 struct il3945_rxon_cmd *active_rxon =
2217 (struct il3945_rxon_cmd *)(&il->active);
2218
2219 il->staging.filter_flags |= RXON_FILTER_ASSOC_MSK;
2220 active_rxon->filter_flags &= ~RXON_FILTER_ASSOC_MSK;
2221 } else {
2222 /* Initialize our rx_config data */
2223 il_connection_init_rx_config(il);
2224 }
2225
2226 /* Configure Bluetooth device coexistence support */
2227 il_send_bt_config(il);
2228
2229 set_bit(S_READY, &il->status);
2230
2231 /* Configure the adapter for unassociated operation */
2232 il3945_commit_rxon(il);
2233
2234 il3945_reg_txpower_periodic(il);
2235
2236 D_INFO("ALIVE processing complete.\n");
2237 wake_up(&il->wait_command_queue);
2238
2239 return;
2240
2241restart:
2242 queue_work(il->workqueue, &il->restart);
2243}
2244
2245static void il3945_cancel_deferred_work(struct il_priv *il);
2246
2247static void
2248__il3945_down(struct il_priv *il)
2249{
2250 unsigned long flags;
2251 int exit_pending;
2252
2253 D_INFO(DRV_NAME " is going down\n");
2254
2255 il_scan_cancel_timeout(il, 200);
2256
2257 exit_pending = test_and_set_bit(S_EXIT_PENDING, &il->status);
2258
2259 /* Stop TX queues watchdog. We need to have S_EXIT_PENDING bit set
2260 * to prevent rearm timer */
2261 del_timer_sync(&il->watchdog);
2262
2263 /* Station information will now be cleared in device */
2264 il_clear_ucode_stations(il);
2265 il_dealloc_bcast_stations(il);
2266 il_clear_driver_stations(il);
2267
2268 /* Unblock any waiting calls */
2269 wake_up_all(&il->wait_command_queue);
2270
2271 /* Wipe out the EXIT_PENDING status bit if we are not actually
2272 * exiting the module */
2273 if (!exit_pending)
2274 clear_bit(S_EXIT_PENDING, &il->status);
2275
2276 /* stop and reset the on-board processor */
2277 _il_wr(il, CSR_RESET, CSR_RESET_REG_FLAG_NEVO_RESET);
2278
2279 /* tell the device to stop sending interrupts */
2280 spin_lock_irqsave(&il->lock, flags);
2281 il_disable_interrupts(il);
2282 spin_unlock_irqrestore(&il->lock, flags);
2283 il3945_synchronize_irq(il);
2284
2285 if (il->mac80211_registered)
2286 ieee80211_stop_queues(il->hw);
2287
2288 /* If we have not previously called il3945_init() then
2289 * clear all bits but the RF Kill bits and return */
2290 if (!il_is_init(il)) {
2291 il->status =
2292 test_bit(S_RFKILL, &il->status) << S_RFKILL |
2293 test_bit(S_GEO_CONFIGURED, &il->status) << S_GEO_CONFIGURED |
2294 test_bit(S_EXIT_PENDING, &il->status) << S_EXIT_PENDING;
2295 goto exit;
2296 }
2297
2298 /* ...otherwise clear out all the status bits but the RF Kill
2299 * bit and continue taking the NIC down. */
2300 il->status &=
2301 test_bit(S_RFKILL, &il->status) << S_RFKILL |
2302 test_bit(S_GEO_CONFIGURED, &il->status) << S_GEO_CONFIGURED |
2303 test_bit(S_FW_ERROR, &il->status) << S_FW_ERROR |
2304 test_bit(S_EXIT_PENDING, &il->status) << S_EXIT_PENDING;
2305
2306 /*
2307 * We disabled and synchronized interrupt, and priv->mutex is taken, so
2308 * here is the only thread which will program device registers, but
2309 * still have lockdep assertions, so we are taking reg_lock.
2310 */
2311 spin_lock_irq(&il->reg_lock);
2312 /* FIXME: il_grab_nic_access if rfkill is off ? */
2313
2314 il3945_hw_txq_ctx_stop(il);
2315 il3945_hw_rxq_stop(il);
2316 /* Power-down device's busmaster DMA clocks */
2317 _il_wr_prph(il, APMG_CLK_DIS_REG, APMG_CLK_VAL_DMA_CLK_RQT);
2318 udelay(5);
2319 /* Stop the device, and put it in low power state */
2320 _il_apm_stop(il);
2321
2322 spin_unlock_irq(&il->reg_lock);
2323
2324 il3945_hw_txq_ctx_free(il);
2325exit:
2326 memset(&il->card_alive, 0, sizeof(struct il_alive_resp));
2327
2328 if (il->beacon_skb)
2329 dev_kfree_skb(il->beacon_skb);
2330 il->beacon_skb = NULL;
2331
2332 /* clear out any free frames */
2333 il3945_clear_free_frames(il);
2334}
2335
2336static void
2337il3945_down(struct il_priv *il)
2338{
2339 mutex_lock(&il->mutex);
2340 __il3945_down(il);
2341 mutex_unlock(&il->mutex);
2342
2343 il3945_cancel_deferred_work(il);
2344}
2345
2346#define MAX_HW_RESTARTS 5
2347
2348static int
2349il3945_alloc_bcast_station(struct il_priv *il)
2350{
2351 unsigned long flags;
2352 u8 sta_id;
2353
2354 spin_lock_irqsave(&il->sta_lock, flags);
2355 sta_id = il_prep_station(il, il_bcast_addr, false, NULL);
2356 if (sta_id == IL_INVALID_STATION) {
2357 IL_ERR("Unable to prepare broadcast station\n");
2358 spin_unlock_irqrestore(&il->sta_lock, flags);
2359
2360 return -EINVAL;
2361 }
2362
2363 il->stations[sta_id].used |= IL_STA_DRIVER_ACTIVE;
2364 il->stations[sta_id].used |= IL_STA_BCAST;
2365 spin_unlock_irqrestore(&il->sta_lock, flags);
2366
2367 return 0;
2368}
2369
2370static int
2371__il3945_up(struct il_priv *il)
2372{
2373 int rc, i;
2374
2375 rc = il3945_alloc_bcast_station(il);
2376 if (rc)
2377 return rc;
2378
2379 if (test_bit(S_EXIT_PENDING, &il->status)) {
2380 IL_WARN("Exit pending; will not bring the NIC up\n");
2381 return -EIO;
2382 }
2383
2384 if (!il->ucode_data_backup.v_addr || !il->ucode_data.v_addr) {
2385 IL_ERR("ucode not available for device bring up\n");
2386 return -EIO;
2387 }
2388
2389 /* If platform's RF_KILL switch is NOT set to KILL */
2390 if (_il_rd(il, CSR_GP_CNTRL) & CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW)
2391 clear_bit(S_RFKILL, &il->status);
2392 else {
2393 set_bit(S_RFKILL, &il->status);
2394 return -ERFKILL;
2395 }
2396
2397 _il_wr(il, CSR_INT, 0xFFFFFFFF);
2398
2399 rc = il3945_hw_nic_init(il);
2400 if (rc) {
2401 IL_ERR("Unable to int nic\n");
2402 return rc;
2403 }
2404
2405 /* make sure rfkill handshake bits are cleared */
2406 _il_wr(il, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
2407 _il_wr(il, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED);
2408
2409 /* clear (again), then enable host interrupts */
2410 _il_wr(il, CSR_INT, 0xFFFFFFFF);
2411 il_enable_interrupts(il);
2412
2413 /* really make sure rfkill handshake bits are cleared */
2414 _il_wr(il, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
2415 _il_wr(il, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
2416
2417 /* Copy original ucode data image from disk into backup cache.
2418 * This will be used to initialize the on-board processor's
2419 * data SRAM for a clean start when the runtime program first loads. */
2420 memcpy(il->ucode_data_backup.v_addr, il->ucode_data.v_addr,
2421 il->ucode_data.len);
2422
2423 /* We return success when we resume from suspend and rf_kill is on. */
2424 if (test_bit(S_RFKILL, &il->status))
2425 return 0;
2426
2427 for (i = 0; i < MAX_HW_RESTARTS; i++) {
2428
2429 /* load bootstrap state machine,
2430 * load bootstrap program into processor's memory,
2431 * prepare to load the "initialize" uCode */
2432 rc = il->ops->load_ucode(il);
2433
2434 if (rc) {
2435 IL_ERR("Unable to set up bootstrap uCode: %d\n", rc);
2436 continue;
2437 }
2438
2439 /* start card; "initialize" will load runtime ucode */
2440 il3945_nic_start(il);
2441
2442 D_INFO(DRV_NAME " is coming up\n");
2443
2444 return 0;
2445 }
2446
2447 set_bit(S_EXIT_PENDING, &il->status);
2448 __il3945_down(il);
2449 clear_bit(S_EXIT_PENDING, &il->status);
2450
2451 /* tried to restart and config the device for as long as our
2452 * patience could withstand */
2453 IL_ERR("Unable to initialize device after %d attempts.\n", i);
2454 return -EIO;
2455}
2456
2457/*****************************************************************************
2458 *
2459 * Workqueue callbacks
2460 *
2461 *****************************************************************************/
2462
2463static void
2464il3945_bg_init_alive_start(struct work_struct *data)
2465{
2466 struct il_priv *il =
2467 container_of(data, struct il_priv, init_alive_start.work);
2468
2469 mutex_lock(&il->mutex);
2470 if (test_bit(S_EXIT_PENDING, &il->status))
2471 goto out;
2472
2473 il3945_init_alive_start(il);
2474out:
2475 mutex_unlock(&il->mutex);
2476}
2477
2478static void
2479il3945_bg_alive_start(struct work_struct *data)
2480{
2481 struct il_priv *il =
2482 container_of(data, struct il_priv, alive_start.work);
2483
2484 mutex_lock(&il->mutex);
2485 if (test_bit(S_EXIT_PENDING, &il->status) || il->txq == NULL)
2486 goto out;
2487
2488 il3945_alive_start(il);
2489out:
2490 mutex_unlock(&il->mutex);
2491}
2492
2493/*
2494 * 3945 cannot interrupt driver when hardware rf kill switch toggles;
2495 * driver must poll CSR_GP_CNTRL_REG register for change. This register
2496 * *is* readable even when device has been SW_RESET into low power mode
2497 * (e.g. during RF KILL).
2498 */
2499static void
2500il3945_rfkill_poll(struct work_struct *data)
2501{
2502 struct il_priv *il =
2503 container_of(data, struct il_priv, _3945.rfkill_poll.work);
2504 bool old_rfkill = test_bit(S_RFKILL, &il->status);
2505 bool new_rfkill =
2506 !(_il_rd(il, CSR_GP_CNTRL) & CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW);
2507
2508 if (new_rfkill != old_rfkill) {
2509 if (new_rfkill)
2510 set_bit(S_RFKILL, &il->status);
2511 else
2512 clear_bit(S_RFKILL, &il->status);
2513
2514 wiphy_rfkill_set_hw_state(il->hw->wiphy, new_rfkill);
2515
2516 D_RF_KILL("RF_KILL bit toggled to %s.\n",
2517 new_rfkill ? "disable radio" : "enable radio");
2518 }
2519
2520 /* Keep this running, even if radio now enabled. This will be
2521 * cancelled in mac_start() if system decides to start again */
2522 queue_delayed_work(il->workqueue, &il->_3945.rfkill_poll,
2523 round_jiffies_relative(2 * HZ));
2524
2525}
2526
2527int
2528il3945_request_scan(struct il_priv *il, struct ieee80211_vif *vif)
2529{
2530 struct il_host_cmd cmd = {
2531 .id = C_SCAN,
2532 .len = sizeof(struct il3945_scan_cmd),
2533 .flags = CMD_SIZE_HUGE,
2534 };
2535 struct il3945_scan_cmd *scan;
2536 u8 n_probes = 0;
2537 enum ieee80211_band band;
2538 bool is_active = false;
2539 int ret;
2540 u16 len;
2541
2542 lockdep_assert_held(&il->mutex);
2543
2544 if (!il->scan_cmd) {
2545 il->scan_cmd =
2546 kmalloc(sizeof(struct il3945_scan_cmd) + IL_MAX_SCAN_SIZE,
2547 GFP_KERNEL);
2548 if (!il->scan_cmd) {
2549 D_SCAN("Fail to allocate scan memory\n");
2550 return -ENOMEM;
2551 }
2552 }
2553 scan = il->scan_cmd;
2554 memset(scan, 0, sizeof(struct il3945_scan_cmd) + IL_MAX_SCAN_SIZE);
2555
2556 scan->quiet_plcp_th = IL_PLCP_QUIET_THRESH;
2557 scan->quiet_time = IL_ACTIVE_QUIET_TIME;
2558
2559 if (il_is_associated(il)) {
2560 u16 interval;
2561 u32 extra;
2562 u32 suspend_time = 100;
2563 u32 scan_suspend_time = 100;
2564
2565 D_INFO("Scanning while associated...\n");
2566
2567 interval = vif->bss_conf.beacon_int;
2568
2569 scan->suspend_time = 0;
2570 scan->max_out_time = cpu_to_le32(200 * 1024);
2571 if (!interval)
2572 interval = suspend_time;
2573 /*
2574 * suspend time format:
2575 * 0-19: beacon interval in usec (time before exec.)
2576 * 20-23: 0
2577 * 24-31: number of beacons (suspend between channels)
2578 */
2579
2580 extra = (suspend_time / interval) << 24;
2581 scan_suspend_time =
2582 0xFF0FFFFF & (extra | ((suspend_time % interval) * 1024));
2583
2584 scan->suspend_time = cpu_to_le32(scan_suspend_time);
2585 D_SCAN("suspend_time 0x%X beacon interval %d\n",
2586 scan_suspend_time, interval);
2587 }
2588
2589 if (il->scan_request->n_ssids) {
2590 int i, p = 0;
2591 D_SCAN("Kicking off active scan\n");
2592 for (i = 0; i < il->scan_request->n_ssids; i++) {
2593 /* always does wildcard anyway */
2594 if (!il->scan_request->ssids[i].ssid_len)
2595 continue;
2596 scan->direct_scan[p].id = WLAN_EID_SSID;
2597 scan->direct_scan[p].len =
2598 il->scan_request->ssids[i].ssid_len;
2599 memcpy(scan->direct_scan[p].ssid,
2600 il->scan_request->ssids[i].ssid,
2601 il->scan_request->ssids[i].ssid_len);
2602 n_probes++;
2603 p++;
2604 }
2605 is_active = true;
2606 } else
2607 D_SCAN("Kicking off passive scan.\n");
2608
2609 /* We don't build a direct scan probe request; the uCode will do
2610 * that based on the direct_mask added to each channel entry */
2611 scan->tx_cmd.tx_flags = TX_CMD_FLG_SEQ_CTL_MSK;
2612 scan->tx_cmd.sta_id = il->hw_params.bcast_id;
2613 scan->tx_cmd.stop_time.life_time = TX_CMD_LIFE_TIME_INFINITE;
2614
2615 /* flags + rate selection */
2616
2617 switch (il->scan_band) {
2618 case IEEE80211_BAND_2GHZ:
2619 scan->flags = RXON_FLG_BAND_24G_MSK | RXON_FLG_AUTO_DETECT_MSK;
2620 scan->tx_cmd.rate = RATE_1M_PLCP;
2621 band = IEEE80211_BAND_2GHZ;
2622 break;
2623 case IEEE80211_BAND_5GHZ:
2624 scan->tx_cmd.rate = RATE_6M_PLCP;
2625 band = IEEE80211_BAND_5GHZ;
2626 break;
2627 default:
2628 IL_WARN("Invalid scan band\n");
2629 return -EIO;
2630 }
2631
2632 /*
2633 * If active scaning is requested but a certain channel is marked
2634 * passive, we can do active scanning if we detect transmissions. For
2635 * passive only scanning disable switching to active on any channel.
2636 */
2637 scan->good_CRC_th =
2638 is_active ? IL_GOOD_CRC_TH_DEFAULT : IL_GOOD_CRC_TH_NEVER;
2639
2640 len =
2641 il_fill_probe_req(il, (struct ieee80211_mgmt *)scan->data,
2642 vif->addr, il->scan_request->ie,
2643 il->scan_request->ie_len,
2644 IL_MAX_SCAN_SIZE - sizeof(*scan));
2645 scan->tx_cmd.len = cpu_to_le16(len);
2646
2647 /* select Rx antennas */
2648 scan->flags |= il3945_get_antenna_flags(il);
2649
2650 scan->channel_count =
2651 il3945_get_channels_for_scan(il, band, is_active, n_probes,
2652 (void *)&scan->data[len], vif);
2653 if (scan->channel_count == 0) {
2654 D_SCAN("channel count %d\n", scan->channel_count);
2655 return -EIO;
2656 }
2657
2658 cmd.len +=
2659 le16_to_cpu(scan->tx_cmd.len) +
2660 scan->channel_count * sizeof(struct il3945_scan_channel);
2661 cmd.data = scan;
2662 scan->len = cpu_to_le16(cmd.len);
2663
2664 set_bit(S_SCAN_HW, &il->status);
2665 ret = il_send_cmd_sync(il, &cmd);
2666 if (ret)
2667 clear_bit(S_SCAN_HW, &il->status);
2668 return ret;
2669}
2670
2671void
2672il3945_post_scan(struct il_priv *il)
2673{
2674 /*
2675 * Since setting the RXON may have been deferred while
2676 * performing the scan, fire one off if needed
2677 */
2678 if (memcmp(&il->staging, &il->active, sizeof(il->staging)))
2679 il3945_commit_rxon(il);
2680}
2681
2682static void
2683il3945_bg_restart(struct work_struct *data)
2684{
2685 struct il_priv *il = container_of(data, struct il_priv, restart);
2686
2687 if (test_bit(S_EXIT_PENDING, &il->status))
2688 return;
2689
2690 if (test_and_clear_bit(S_FW_ERROR, &il->status)) {
2691 mutex_lock(&il->mutex);
2692 il->is_open = 0;
2693 mutex_unlock(&il->mutex);
2694 il3945_down(il);
2695 ieee80211_restart_hw(il->hw);
2696 } else {
2697 il3945_down(il);
2698
2699 mutex_lock(&il->mutex);
2700 if (test_bit(S_EXIT_PENDING, &il->status)) {
2701 mutex_unlock(&il->mutex);
2702 return;
2703 }
2704
2705 __il3945_up(il);
2706 mutex_unlock(&il->mutex);
2707 }
2708}
2709
2710static void
2711il3945_bg_rx_replenish(struct work_struct *data)
2712{
2713 struct il_priv *il = container_of(data, struct il_priv, rx_replenish);
2714
2715 mutex_lock(&il->mutex);
2716 if (test_bit(S_EXIT_PENDING, &il->status))
2717 goto out;
2718
2719 il3945_rx_replenish(il);
2720out:
2721 mutex_unlock(&il->mutex);
2722}
2723
2724void
2725il3945_post_associate(struct il_priv *il)
2726{
2727 int rc = 0;
2728 struct ieee80211_conf *conf = NULL;
2729
2730 if (!il->vif || !il->is_open)
2731 return;
2732
2733 D_ASSOC("Associated as %d to: %pM\n", il->vif->bss_conf.aid,
2734 il->active.bssid_addr);
2735
2736 if (test_bit(S_EXIT_PENDING, &il->status))
2737 return;
2738
2739 il_scan_cancel_timeout(il, 200);
2740
2741 conf = &il->hw->conf;
2742
2743 il->staging.filter_flags &= ~RXON_FILTER_ASSOC_MSK;
2744 il3945_commit_rxon(il);
2745
2746 rc = il_send_rxon_timing(il);
2747 if (rc)
2748 IL_WARN("C_RXON_TIMING failed - " "Attempting to continue.\n");
2749
2750 il->staging.filter_flags |= RXON_FILTER_ASSOC_MSK;
2751
2752 il->staging.assoc_id = cpu_to_le16(il->vif->bss_conf.aid);
2753
2754 D_ASSOC("assoc id %d beacon interval %d\n", il->vif->bss_conf.aid,
2755 il->vif->bss_conf.beacon_int);
2756
2757 if (il->vif->bss_conf.use_short_preamble)
2758 il->staging.flags |= RXON_FLG_SHORT_PREAMBLE_MSK;
2759 else
2760 il->staging.flags &= ~RXON_FLG_SHORT_PREAMBLE_MSK;
2761
2762 if (il->staging.flags & RXON_FLG_BAND_24G_MSK) {
2763 if (il->vif->bss_conf.use_short_slot)
2764 il->staging.flags |= RXON_FLG_SHORT_SLOT_MSK;
2765 else
2766 il->staging.flags &= ~RXON_FLG_SHORT_SLOT_MSK;
2767 }
2768
2769 il3945_commit_rxon(il);
2770
2771 switch (il->vif->type) {
2772 case NL80211_IFTYPE_STATION:
2773 il3945_rate_scale_init(il->hw, IL_AP_ID);
2774 break;
2775 case NL80211_IFTYPE_ADHOC:
2776 il3945_send_beacon_cmd(il);
2777 break;
2778 default:
2779 IL_ERR("%s Should not be called in %d mode\n", __func__,
2780 il->vif->type);
2781 break;
2782 }
2783}
2784
2785/*****************************************************************************
2786 *
2787 * mac80211 entry point functions
2788 *
2789 *****************************************************************************/
2790
2791#define UCODE_READY_TIMEOUT (2 * HZ)
2792
2793static int
2794il3945_mac_start(struct ieee80211_hw *hw)
2795{
2796 struct il_priv *il = hw->priv;
2797 int ret;
2798
2799 /* we should be verifying the device is ready to be opened */
2800 mutex_lock(&il->mutex);
2801 D_MAC80211("enter\n");
2802
2803 /* fetch ucode file from disk, alloc and copy to bus-master buffers ...
2804 * ucode filename and max sizes are card-specific. */
2805
2806 if (!il->ucode_code.len) {
2807 ret = il3945_read_ucode(il);
2808 if (ret) {
2809 IL_ERR("Could not read microcode: %d\n", ret);
2810 mutex_unlock(&il->mutex);
2811 goto out_release_irq;
2812 }
2813 }
2814
2815 ret = __il3945_up(il);
2816
2817 mutex_unlock(&il->mutex);
2818
2819 if (ret)
2820 goto out_release_irq;
2821
2822 D_INFO("Start UP work.\n");
2823
2824 /* Wait for START_ALIVE from ucode. Otherwise callbacks from
2825 * mac80211 will not be run successfully. */
2826 ret = wait_event_timeout(il->wait_command_queue,
2827 test_bit(S_READY, &il->status),
2828 UCODE_READY_TIMEOUT);
2829 if (!ret) {
2830 if (!test_bit(S_READY, &il->status)) {
2831 IL_ERR("Wait for START_ALIVE timeout after %dms.\n",
2832 jiffies_to_msecs(UCODE_READY_TIMEOUT));
2833 ret = -ETIMEDOUT;
2834 goto out_release_irq;
2835 }
2836 }
2837
2838 /* ucode is running and will send rfkill notifications,
2839 * no need to poll the killswitch state anymore */
2840 cancel_delayed_work(&il->_3945.rfkill_poll);
2841
2842 il->is_open = 1;
2843 D_MAC80211("leave\n");
2844 return 0;
2845
2846out_release_irq:
2847 il->is_open = 0;
2848 D_MAC80211("leave - failed\n");
2849 return ret;
2850}
2851
2852static void
2853il3945_mac_stop(struct ieee80211_hw *hw)
2854{
2855 struct il_priv *il = hw->priv;
2856
2857 D_MAC80211("enter\n");
2858
2859 if (!il->is_open) {
2860 D_MAC80211("leave - skip\n");
2861 return;
2862 }
2863
2864 il->is_open = 0;
2865
2866 il3945_down(il);
2867
2868 flush_workqueue(il->workqueue);
2869
2870 /* start polling the killswitch state again */
2871 queue_delayed_work(il->workqueue, &il->_3945.rfkill_poll,
2872 round_jiffies_relative(2 * HZ));
2873
2874 D_MAC80211("leave\n");
2875}
2876
2877static void
2878il3945_mac_tx(struct ieee80211_hw *hw,
2879 struct ieee80211_tx_control *control,
2880 struct sk_buff *skb)
2881{
2882 struct il_priv *il = hw->priv;
2883
2884 D_MAC80211("enter\n");
2885
2886 D_TX("dev->xmit(%d bytes) at rate 0x%02x\n", skb->len,
2887 ieee80211_get_tx_rate(hw, IEEE80211_SKB_CB(skb))->bitrate);
2888
2889 if (il3945_tx_skb(il, control->sta, skb))
2890 dev_kfree_skb_any(skb);
2891
2892 D_MAC80211("leave\n");
2893}
2894
2895void
2896il3945_config_ap(struct il_priv *il)
2897{
2898 struct ieee80211_vif *vif = il->vif;
2899 int rc = 0;
2900
2901 if (test_bit(S_EXIT_PENDING, &il->status))
2902 return;
2903
2904 /* The following should be done only at AP bring up */
2905 if (!(il_is_associated(il))) {
2906
2907 /* RXON - unassoc (to set timing command) */
2908 il->staging.filter_flags &= ~RXON_FILTER_ASSOC_MSK;
2909 il3945_commit_rxon(il);
2910
2911 /* RXON Timing */
2912 rc = il_send_rxon_timing(il);
2913 if (rc)
2914 IL_WARN("C_RXON_TIMING failed - "
2915 "Attempting to continue.\n");
2916
2917 il->staging.assoc_id = 0;
2918
2919 if (vif->bss_conf.use_short_preamble)
2920 il->staging.flags |= RXON_FLG_SHORT_PREAMBLE_MSK;
2921 else
2922 il->staging.flags &= ~RXON_FLG_SHORT_PREAMBLE_MSK;
2923
2924 if (il->staging.flags & RXON_FLG_BAND_24G_MSK) {
2925 if (vif->bss_conf.use_short_slot)
2926 il->staging.flags |= RXON_FLG_SHORT_SLOT_MSK;
2927 else
2928 il->staging.flags &= ~RXON_FLG_SHORT_SLOT_MSK;
2929 }
2930 /* restore RXON assoc */
2931 il->staging.filter_flags |= RXON_FILTER_ASSOC_MSK;
2932 il3945_commit_rxon(il);
2933 }
2934 il3945_send_beacon_cmd(il);
2935}
2936
2937static int
2938il3945_mac_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
2939 struct ieee80211_vif *vif, struct ieee80211_sta *sta,
2940 struct ieee80211_key_conf *key)
2941{
2942 struct il_priv *il = hw->priv;
2943 int ret = 0;
2944 u8 sta_id = IL_INVALID_STATION;
2945 u8 static_key;
2946
2947 D_MAC80211("enter\n");
2948
2949 if (il3945_mod_params.sw_crypto) {
2950 D_MAC80211("leave - hwcrypto disabled\n");
2951 return -EOPNOTSUPP;
2952 }
2953
2954 /*
2955 * To support IBSS RSN, don't program group keys in IBSS, the
2956 * hardware will then not attempt to decrypt the frames.
2957 */
2958 if (vif->type == NL80211_IFTYPE_ADHOC &&
2959 !(key->flags & IEEE80211_KEY_FLAG_PAIRWISE)) {
2960 D_MAC80211("leave - IBSS RSN\n");
2961 return -EOPNOTSUPP;
2962 }
2963
2964 static_key = !il_is_associated(il);
2965
2966 if (!static_key) {
2967 sta_id = il_sta_id_or_broadcast(il, sta);
2968 if (sta_id == IL_INVALID_STATION) {
2969 D_MAC80211("leave - station not found\n");
2970 return -EINVAL;
2971 }
2972 }
2973
2974 mutex_lock(&il->mutex);
2975 il_scan_cancel_timeout(il, 100);
2976
2977 switch (cmd) {
2978 case SET_KEY:
2979 if (static_key)
2980 ret = il3945_set_static_key(il, key);
2981 else
2982 ret = il3945_set_dynamic_key(il, key, sta_id);
2983 D_MAC80211("enable hwcrypto key\n");
2984 break;
2985 case DISABLE_KEY:
2986 if (static_key)
2987 ret = il3945_remove_static_key(il);
2988 else
2989 ret = il3945_clear_sta_key_info(il, sta_id);
2990 D_MAC80211("disable hwcrypto key\n");
2991 break;
2992 default:
2993 ret = -EINVAL;
2994 }
2995
2996 D_MAC80211("leave ret %d\n", ret);
2997 mutex_unlock(&il->mutex);
2998
2999 return ret;
3000}
3001
3002static int
3003il3945_mac_sta_add(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
3004 struct ieee80211_sta *sta)
3005{
3006 struct il_priv *il = hw->priv;
3007 struct il3945_sta_priv *sta_priv = (void *)sta->drv_priv;
3008 int ret;
3009 bool is_ap = vif->type == NL80211_IFTYPE_STATION;
3010 u8 sta_id;
3011
3012 mutex_lock(&il->mutex);
3013 D_INFO("station %pM\n", sta->addr);
3014 sta_priv->common.sta_id = IL_INVALID_STATION;
3015
3016 ret = il_add_station_common(il, sta->addr, is_ap, sta, &sta_id);
3017 if (ret) {
3018 IL_ERR("Unable to add station %pM (%d)\n", sta->addr, ret);
3019 /* Should we return success if return code is EEXIST ? */
3020 mutex_unlock(&il->mutex);
3021 return ret;
3022 }
3023
3024 sta_priv->common.sta_id = sta_id;
3025
3026 /* Initialize rate scaling */
3027 D_INFO("Initializing rate scaling for station %pM\n", sta->addr);
3028 il3945_rs_rate_init(il, sta, sta_id);
3029 mutex_unlock(&il->mutex);
3030
3031 return 0;
3032}
3033
3034static void
3035il3945_configure_filter(struct ieee80211_hw *hw, unsigned int changed_flags,
3036 unsigned int *total_flags, u64 multicast)
3037{
3038 struct il_priv *il = hw->priv;
3039 __le32 filter_or = 0, filter_nand = 0;
3040
3041#define CHK(test, flag) do { \
3042 if (*total_flags & (test)) \
3043 filter_or |= (flag); \
3044 else \
3045 filter_nand |= (flag); \
3046 } while (0)
3047
3048 D_MAC80211("Enter: changed: 0x%x, total: 0x%x\n", changed_flags,
3049 *total_flags);
3050
3051 CHK(FIF_OTHER_BSS, RXON_FILTER_PROMISC_MSK);
3052 CHK(FIF_CONTROL, RXON_FILTER_CTL2HOST_MSK);
3053 CHK(FIF_BCN_PRBRESP_PROMISC, RXON_FILTER_BCON_AWARE_MSK);
3054
3055#undef CHK
3056
3057 mutex_lock(&il->mutex);
3058
3059 il->staging.filter_flags &= ~filter_nand;
3060 il->staging.filter_flags |= filter_or;
3061
3062 /*
3063 * Not committing directly because hardware can perform a scan,
3064 * but even if hw is ready, committing here breaks for some reason,
3065 * we'll eventually commit the filter flags change anyway.
3066 */
3067
3068 mutex_unlock(&il->mutex);
3069
3070 /*
3071 * Receiving all multicast frames is always enabled by the
3072 * default flags setup in il_connection_init_rx_config()
3073 * since we currently do not support programming multicast
3074 * filters into the device.
3075 */
3076 *total_flags &=
3077 FIF_OTHER_BSS | FIF_ALLMULTI |
3078 FIF_BCN_PRBRESP_PROMISC | FIF_CONTROL;
3079}
3080
3081/*****************************************************************************
3082 *
3083 * sysfs attributes
3084 *
3085 *****************************************************************************/
3086
3087#ifdef CONFIG_IWLEGACY_DEBUG
3088
3089/*
3090 * The following adds a new attribute to the sysfs representation
3091 * of this device driver (i.e. a new file in /sys/bus/pci/drivers/iwl/)
3092 * used for controlling the debug level.
3093 *
3094 * See the level definitions in iwl for details.
3095 *
3096 * The debug_level being managed using sysfs below is a per device debug
3097 * level that is used instead of the global debug level if it (the per
3098 * device debug level) is set.
3099 */
3100static ssize_t
3101il3945_show_debug_level(struct device *d, struct device_attribute *attr,
3102 char *buf)
3103{
3104 struct il_priv *il = dev_get_drvdata(d);
3105 return sprintf(buf, "0x%08X\n", il_get_debug_level(il));
3106}
3107
3108static ssize_t
3109il3945_store_debug_level(struct device *d, struct device_attribute *attr,
3110 const char *buf, size_t count)
3111{
3112 struct il_priv *il = dev_get_drvdata(d);
3113 unsigned long val;
3114 int ret;
3115
3116 ret = kstrtoul(buf, 0, &val);
3117 if (ret)
3118 IL_INFO("%s is not in hex or decimal form.\n", buf);
3119 else
3120 il->debug_level = val;
3121
3122 return strnlen(buf, count);
3123}
3124
3125static DEVICE_ATTR(debug_level, S_IWUSR | S_IRUGO, il3945_show_debug_level,
3126 il3945_store_debug_level);
3127
3128#endif /* CONFIG_IWLEGACY_DEBUG */
3129
3130static ssize_t
3131il3945_show_temperature(struct device *d, struct device_attribute *attr,
3132 char *buf)
3133{
3134 struct il_priv *il = dev_get_drvdata(d);
3135
3136 if (!il_is_alive(il))
3137 return -EAGAIN;
3138
3139 return sprintf(buf, "%d\n", il3945_hw_get_temperature(il));
3140}
3141
3142static DEVICE_ATTR(temperature, S_IRUGO, il3945_show_temperature, NULL);
3143
3144static ssize_t
3145il3945_show_tx_power(struct device *d, struct device_attribute *attr, char *buf)
3146{
3147 struct il_priv *il = dev_get_drvdata(d);
3148 return sprintf(buf, "%d\n", il->tx_power_user_lmt);
3149}
3150
3151static ssize_t
3152il3945_store_tx_power(struct device *d, struct device_attribute *attr,
3153 const char *buf, size_t count)
3154{
3155 struct il_priv *il = dev_get_drvdata(d);
3156 char *p = (char *)buf;
3157 u32 val;
3158
3159 val = simple_strtoul(p, &p, 10);
3160 if (p == buf)
3161 IL_INFO(": %s is not in decimal form.\n", buf);
3162 else
3163 il3945_hw_reg_set_txpower(il, val);
3164
3165 return count;
3166}
3167
3168static DEVICE_ATTR(tx_power, S_IWUSR | S_IRUGO, il3945_show_tx_power,
3169 il3945_store_tx_power);
3170
3171static ssize_t
3172il3945_show_flags(struct device *d, struct device_attribute *attr, char *buf)
3173{
3174 struct il_priv *il = dev_get_drvdata(d);
3175
3176 return sprintf(buf, "0x%04X\n", il->active.flags);
3177}
3178
3179static ssize_t
3180il3945_store_flags(struct device *d, struct device_attribute *attr,
3181 const char *buf, size_t count)
3182{
3183 struct il_priv *il = dev_get_drvdata(d);
3184 u32 flags = simple_strtoul(buf, NULL, 0);
3185
3186 mutex_lock(&il->mutex);
3187 if (le32_to_cpu(il->staging.flags) != flags) {
3188 /* Cancel any currently running scans... */
3189 if (il_scan_cancel_timeout(il, 100))
3190 IL_WARN("Could not cancel scan.\n");
3191 else {
3192 D_INFO("Committing rxon.flags = 0x%04X\n", flags);
3193 il->staging.flags = cpu_to_le32(flags);
3194 il3945_commit_rxon(il);
3195 }
3196 }
3197 mutex_unlock(&il->mutex);
3198
3199 return count;
3200}
3201
3202static DEVICE_ATTR(flags, S_IWUSR | S_IRUGO, il3945_show_flags,
3203 il3945_store_flags);
3204
3205static ssize_t
3206il3945_show_filter_flags(struct device *d, struct device_attribute *attr,
3207 char *buf)
3208{
3209 struct il_priv *il = dev_get_drvdata(d);
3210
3211 return sprintf(buf, "0x%04X\n", le32_to_cpu(il->active.filter_flags));
3212}
3213
3214static ssize_t
3215il3945_store_filter_flags(struct device *d, struct device_attribute *attr,
3216 const char *buf, size_t count)
3217{
3218 struct il_priv *il = dev_get_drvdata(d);
3219 u32 filter_flags = simple_strtoul(buf, NULL, 0);
3220
3221 mutex_lock(&il->mutex);
3222 if (le32_to_cpu(il->staging.filter_flags) != filter_flags) {
3223 /* Cancel any currently running scans... */
3224 if (il_scan_cancel_timeout(il, 100))
3225 IL_WARN("Could not cancel scan.\n");
3226 else {
3227 D_INFO("Committing rxon.filter_flags = " "0x%04X\n",
3228 filter_flags);
3229 il->staging.filter_flags = cpu_to_le32(filter_flags);
3230 il3945_commit_rxon(il);
3231 }
3232 }
3233 mutex_unlock(&il->mutex);
3234
3235 return count;
3236}
3237
3238static DEVICE_ATTR(filter_flags, S_IWUSR | S_IRUGO, il3945_show_filter_flags,
3239 il3945_store_filter_flags);
3240
3241static ssize_t
3242il3945_show_measurement(struct device *d, struct device_attribute *attr,
3243 char *buf)
3244{
3245 struct il_priv *il = dev_get_drvdata(d);
3246 struct il_spectrum_notification measure_report;
3247 u32 size = sizeof(measure_report), len = 0, ofs = 0;
3248 u8 *data = (u8 *) &measure_report;
3249 unsigned long flags;
3250
3251 spin_lock_irqsave(&il->lock, flags);
3252 if (!(il->measurement_status & MEASUREMENT_READY)) {
3253 spin_unlock_irqrestore(&il->lock, flags);
3254 return 0;
3255 }
3256 memcpy(&measure_report, &il->measure_report, size);
3257 il->measurement_status = 0;
3258 spin_unlock_irqrestore(&il->lock, flags);
3259
3260 while (size && PAGE_SIZE - len) {
3261 hex_dump_to_buffer(data + ofs, size, 16, 1, buf + len,
3262 PAGE_SIZE - len, true);
3263 len = strlen(buf);
3264 if (PAGE_SIZE - len)
3265 buf[len++] = '\n';
3266
3267 ofs += 16;
3268 size -= min(size, 16U);
3269 }
3270
3271 return len;
3272}
3273
3274static ssize_t
3275il3945_store_measurement(struct device *d, struct device_attribute *attr,
3276 const char *buf, size_t count)
3277{
3278 struct il_priv *il = dev_get_drvdata(d);
3279 struct ieee80211_measurement_params params = {
3280 .channel = le16_to_cpu(il->active.channel),
3281 .start_time = cpu_to_le64(il->_3945.last_tsf),
3282 .duration = cpu_to_le16(1),
3283 };
3284 u8 type = IL_MEASURE_BASIC;
3285 u8 buffer[32];
3286 u8 channel;
3287
3288 if (count) {
3289 char *p = buffer;
3290 strlcpy(buffer, buf, sizeof(buffer));
3291 channel = simple_strtoul(p, NULL, 0);
3292 if (channel)
3293 params.channel = channel;
3294
3295 p = buffer;
3296 while (*p && *p != ' ')
3297 p++;
3298 if (*p)
3299 type = simple_strtoul(p + 1, NULL, 0);
3300 }
3301
3302 D_INFO("Invoking measurement of type %d on " "channel %d (for '%s')\n",
3303 type, params.channel, buf);
3304 il3945_get_measurement(il, &params, type);
3305
3306 return count;
3307}
3308
3309static DEVICE_ATTR(measurement, S_IRUSR | S_IWUSR, il3945_show_measurement,
3310 il3945_store_measurement);
3311
3312static ssize_t
3313il3945_store_retry_rate(struct device *d, struct device_attribute *attr,
3314 const char *buf, size_t count)
3315{
3316 struct il_priv *il = dev_get_drvdata(d);
3317
3318 il->retry_rate = simple_strtoul(buf, NULL, 0);
3319 if (il->retry_rate <= 0)
3320 il->retry_rate = 1;
3321
3322 return count;
3323}
3324
3325static ssize_t
3326il3945_show_retry_rate(struct device *d, struct device_attribute *attr,
3327 char *buf)
3328{
3329 struct il_priv *il = dev_get_drvdata(d);
3330 return sprintf(buf, "%d", il->retry_rate);
3331}
3332
3333static DEVICE_ATTR(retry_rate, S_IWUSR | S_IRUSR, il3945_show_retry_rate,
3334 il3945_store_retry_rate);
3335
3336static ssize_t
3337il3945_show_channels(struct device *d, struct device_attribute *attr, char *buf)
3338{
3339 /* all this shit doesn't belong into sysfs anyway */
3340 return 0;
3341}
3342
3343static DEVICE_ATTR(channels, S_IRUSR, il3945_show_channels, NULL);
3344
3345static ssize_t
3346il3945_show_antenna(struct device *d, struct device_attribute *attr, char *buf)
3347{
3348 struct il_priv *il = dev_get_drvdata(d);
3349
3350 if (!il_is_alive(il))
3351 return -EAGAIN;
3352
3353 return sprintf(buf, "%d\n", il3945_mod_params.antenna);
3354}
3355
3356static ssize_t
3357il3945_store_antenna(struct device *d, struct device_attribute *attr,
3358 const char *buf, size_t count)
3359{
3360 struct il_priv *il __maybe_unused = dev_get_drvdata(d);
3361 int ant;
3362
3363 if (count == 0)
3364 return 0;
3365
3366 if (sscanf(buf, "%1i", &ant) != 1) {
3367 D_INFO("not in hex or decimal form.\n");
3368 return count;
3369 }
3370
3371 if (ant >= 0 && ant <= 2) {
3372 D_INFO("Setting antenna select to %d.\n", ant);
3373 il3945_mod_params.antenna = (enum il3945_antenna)ant;
3374 } else
3375 D_INFO("Bad antenna select value %d.\n", ant);
3376
3377 return count;
3378}
3379
3380static DEVICE_ATTR(antenna, S_IWUSR | S_IRUGO, il3945_show_antenna,
3381 il3945_store_antenna);
3382
3383static ssize_t
3384il3945_show_status(struct device *d, struct device_attribute *attr, char *buf)
3385{
3386 struct il_priv *il = dev_get_drvdata(d);
3387 if (!il_is_alive(il))
3388 return -EAGAIN;
3389 return sprintf(buf, "0x%08x\n", (int)il->status);
3390}
3391
3392static DEVICE_ATTR(status, S_IRUGO, il3945_show_status, NULL);
3393
3394static ssize_t
3395il3945_dump_error_log(struct device *d, struct device_attribute *attr,
3396 const char *buf, size_t count)
3397{
3398 struct il_priv *il = dev_get_drvdata(d);
3399 char *p = (char *)buf;
3400
3401 if (p[0] == '1')
3402 il3945_dump_nic_error_log(il);
3403
3404 return strnlen(buf, count);
3405}
3406
3407static DEVICE_ATTR(dump_errors, S_IWUSR, NULL, il3945_dump_error_log);
3408
3409/*****************************************************************************
3410 *
3411 * driver setup and tear down
3412 *
3413 *****************************************************************************/
3414
3415static void
3416il3945_setup_deferred_work(struct il_priv *il)
3417{
3418 il->workqueue = create_singlethread_workqueue(DRV_NAME);
3419
3420 init_waitqueue_head(&il->wait_command_queue);
3421
3422 INIT_WORK(&il->restart, il3945_bg_restart);
3423 INIT_WORK(&il->rx_replenish, il3945_bg_rx_replenish);
3424 INIT_DELAYED_WORK(&il->init_alive_start, il3945_bg_init_alive_start);
3425 INIT_DELAYED_WORK(&il->alive_start, il3945_bg_alive_start);
3426 INIT_DELAYED_WORK(&il->_3945.rfkill_poll, il3945_rfkill_poll);
3427
3428 il_setup_scan_deferred_work(il);
3429
3430 il3945_hw_setup_deferred_work(il);
3431
3432 setup_timer(&il->watchdog, il_bg_watchdog, (unsigned long)il);
3433
3434 tasklet_init(&il->irq_tasklet,
3435 (void (*)(unsigned long))il3945_irq_tasklet,
3436 (unsigned long)il);
3437}
3438
3439static void
3440il3945_cancel_deferred_work(struct il_priv *il)
3441{
3442 il3945_hw_cancel_deferred_work(il);
3443
3444 cancel_delayed_work_sync(&il->init_alive_start);
3445 cancel_delayed_work(&il->alive_start);
3446
3447 il_cancel_scan_deferred_work(il);
3448}
3449
3450static struct attribute *il3945_sysfs_entries[] = {
3451 &dev_attr_antenna.attr,
3452 &dev_attr_channels.attr,
3453 &dev_attr_dump_errors.attr,
3454 &dev_attr_flags.attr,
3455 &dev_attr_filter_flags.attr,
3456 &dev_attr_measurement.attr,
3457 &dev_attr_retry_rate.attr,
3458 &dev_attr_status.attr,
3459 &dev_attr_temperature.attr,
3460 &dev_attr_tx_power.attr,
3461#ifdef CONFIG_IWLEGACY_DEBUG
3462 &dev_attr_debug_level.attr,
3463#endif
3464 NULL
3465};
3466
3467static struct attribute_group il3945_attribute_group = {
3468 .name = NULL, /* put in device directory */
3469 .attrs = il3945_sysfs_entries,
3470};
3471
3472static struct ieee80211_ops il3945_mac_ops __read_mostly = {
3473 .tx = il3945_mac_tx,
3474 .start = il3945_mac_start,
3475 .stop = il3945_mac_stop,
3476 .add_interface = il_mac_add_interface,
3477 .remove_interface = il_mac_remove_interface,
3478 .change_interface = il_mac_change_interface,
3479 .config = il_mac_config,
3480 .configure_filter = il3945_configure_filter,
3481 .set_key = il3945_mac_set_key,
3482 .conf_tx = il_mac_conf_tx,
3483 .reset_tsf = il_mac_reset_tsf,
3484 .bss_info_changed = il_mac_bss_info_changed,
3485 .hw_scan = il_mac_hw_scan,
3486 .sta_add = il3945_mac_sta_add,
3487 .sta_remove = il_mac_sta_remove,
3488 .tx_last_beacon = il_mac_tx_last_beacon,
3489 .flush = il_mac_flush,
3490};
3491
3492static int
3493il3945_init_drv(struct il_priv *il)
3494{
3495 int ret;
3496 struct il3945_eeprom *eeprom = (struct il3945_eeprom *)il->eeprom;
3497
3498 il->retry_rate = 1;
3499 il->beacon_skb = NULL;
3500
3501 spin_lock_init(&il->sta_lock);
3502 spin_lock_init(&il->hcmd_lock);
3503
3504 INIT_LIST_HEAD(&il->free_frames);
3505
3506 mutex_init(&il->mutex);
3507
3508 il->ieee_channels = NULL;
3509 il->ieee_rates = NULL;
3510 il->band = IEEE80211_BAND_2GHZ;
3511
3512 il->iw_mode = NL80211_IFTYPE_STATION;
3513 il->missed_beacon_threshold = IL_MISSED_BEACON_THRESHOLD_DEF;
3514
3515 /* initialize force reset */
3516 il->force_reset.reset_duration = IL_DELAY_NEXT_FORCE_FW_RELOAD;
3517
3518 if (eeprom->version < EEPROM_3945_EEPROM_VERSION) {
3519 IL_WARN("Unsupported EEPROM version: 0x%04X\n",
3520 eeprom->version);
3521 ret = -EINVAL;
3522 goto err;
3523 }
3524 ret = il_init_channel_map(il);
3525 if (ret) {
3526 IL_ERR("initializing regulatory failed: %d\n", ret);
3527 goto err;
3528 }
3529
3530 /* Set up txpower settings in driver for all channels */
3531 if (il3945_txpower_set_from_eeprom(il)) {
3532 ret = -EIO;
3533 goto err_free_channel_map;
3534 }
3535
3536 ret = il_init_geos(il);
3537 if (ret) {
3538 IL_ERR("initializing geos failed: %d\n", ret);
3539 goto err_free_channel_map;
3540 }
3541 il3945_init_hw_rates(il, il->ieee_rates);
3542
3543 return 0;
3544
3545err_free_channel_map:
3546 il_free_channel_map(il);
3547err:
3548 return ret;
3549}
3550
3551#define IL3945_MAX_PROBE_REQUEST 200
3552
3553static int
3554il3945_setup_mac(struct il_priv *il)
3555{
3556 int ret;
3557 struct ieee80211_hw *hw = il->hw;
3558
3559 hw->rate_control_algorithm = "iwl-3945-rs";
3560 hw->sta_data_size = sizeof(struct il3945_sta_priv);
3561 hw->vif_data_size = sizeof(struct il_vif_priv);
3562
3563 /* Tell mac80211 our characteristics */
3564 ieee80211_hw_set(hw, SUPPORTS_DYNAMIC_PS);
3565 ieee80211_hw_set(hw, SUPPORTS_PS);
3566 ieee80211_hw_set(hw, SIGNAL_DBM);
3567 ieee80211_hw_set(hw, SPECTRUM_MGMT);
3568
3569 hw->wiphy->interface_modes =
3570 BIT(NL80211_IFTYPE_STATION) | BIT(NL80211_IFTYPE_ADHOC);
3571
3572 hw->wiphy->flags |= WIPHY_FLAG_IBSS_RSN;
3573 hw->wiphy->regulatory_flags |= REGULATORY_CUSTOM_REG |
3574 REGULATORY_DISABLE_BEACON_HINTS;
3575
3576 hw->wiphy->flags &= ~WIPHY_FLAG_PS_ON_BY_DEFAULT;
3577
3578 hw->wiphy->max_scan_ssids = PROBE_OPTION_MAX_3945;
3579 /* we create the 802.11 header and a zero-length SSID element */
3580 hw->wiphy->max_scan_ie_len = IL3945_MAX_PROBE_REQUEST - 24 - 2;
3581
3582 /* Default value; 4 EDCA QOS priorities */
3583 hw->queues = 4;
3584
3585 if (il->bands[IEEE80211_BAND_2GHZ].n_channels)
3586 il->hw->wiphy->bands[IEEE80211_BAND_2GHZ] =
3587 &il->bands[IEEE80211_BAND_2GHZ];
3588
3589 if (il->bands[IEEE80211_BAND_5GHZ].n_channels)
3590 il->hw->wiphy->bands[IEEE80211_BAND_5GHZ] =
3591 &il->bands[IEEE80211_BAND_5GHZ];
3592
3593 il_leds_init(il);
3594
3595 ret = ieee80211_register_hw(il->hw);
3596 if (ret) {
3597 IL_ERR("Failed to register hw (error %d)\n", ret);
3598 return ret;
3599 }
3600 il->mac80211_registered = 1;
3601
3602 return 0;
3603}
3604
3605static int
3606il3945_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
3607{
3608 int err = 0;
3609 struct il_priv *il;
3610 struct ieee80211_hw *hw;
3611 struct il_cfg *cfg = (struct il_cfg *)(ent->driver_data);
3612 struct il3945_eeprom *eeprom;
3613 unsigned long flags;
3614
3615 /***********************
3616 * 1. Allocating HW data
3617 * ********************/
3618
3619 hw = ieee80211_alloc_hw(sizeof(struct il_priv), &il3945_mac_ops);
3620 if (!hw) {
3621 err = -ENOMEM;
3622 goto out;
3623 }
3624 il = hw->priv;
3625 il->hw = hw;
3626 SET_IEEE80211_DEV(hw, &pdev->dev);
3627
3628 il->cmd_queue = IL39_CMD_QUEUE_NUM;
3629
3630 /*
3631 * Disabling hardware scan means that mac80211 will perform scans
3632 * "the hard way", rather than using device's scan.
3633 */
3634 if (il3945_mod_params.disable_hw_scan) {
3635 D_INFO("Disabling hw_scan\n");
3636 il3945_mac_ops.hw_scan = NULL;
3637 }
3638
3639 D_INFO("*** LOAD DRIVER ***\n");
3640 il->cfg = cfg;
3641 il->ops = &il3945_ops;
3642#ifdef CONFIG_IWLEGACY_DEBUGFS
3643 il->debugfs_ops = &il3945_debugfs_ops;
3644#endif
3645 il->pci_dev = pdev;
3646 il->inta_mask = CSR_INI_SET_MASK;
3647
3648 /***************************
3649 * 2. Initializing PCI bus
3650 * *************************/
3651 pci_disable_link_state(pdev,
3652 PCIE_LINK_STATE_L0S | PCIE_LINK_STATE_L1 |
3653 PCIE_LINK_STATE_CLKPM);
3654
3655 if (pci_enable_device(pdev)) {
3656 err = -ENODEV;
3657 goto out_ieee80211_free_hw;
3658 }
3659
3660 pci_set_master(pdev);
3661
3662 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
3663 if (!err)
3664 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
3665 if (err) {
3666 IL_WARN("No suitable DMA available.\n");
3667 goto out_pci_disable_device;
3668 }
3669
3670 pci_set_drvdata(pdev, il);
3671 err = pci_request_regions(pdev, DRV_NAME);
3672 if (err)
3673 goto out_pci_disable_device;
3674
3675 /***********************
3676 * 3. Read REV Register
3677 * ********************/
3678 il->hw_base = pci_ioremap_bar(pdev, 0);
3679 if (!il->hw_base) {
3680 err = -ENODEV;
3681 goto out_pci_release_regions;
3682 }
3683
3684 D_INFO("pci_resource_len = 0x%08llx\n",
3685 (unsigned long long)pci_resource_len(pdev, 0));
3686 D_INFO("pci_resource_base = %p\n", il->hw_base);
3687
3688 /* We disable the RETRY_TIMEOUT register (0x41) to keep
3689 * PCI Tx retries from interfering with C3 CPU state */
3690 pci_write_config_byte(pdev, 0x41, 0x00);
3691
3692 /* these spin locks will be used in apm_init and EEPROM access
3693 * we should init now
3694 */
3695 spin_lock_init(&il->reg_lock);
3696 spin_lock_init(&il->lock);
3697
3698 /*
3699 * stop and reset the on-board processor just in case it is in a
3700 * strange state ... like being left stranded by a primary kernel
3701 * and this is now the kdump kernel trying to start up
3702 */
3703 _il_wr(il, CSR_RESET, CSR_RESET_REG_FLAG_NEVO_RESET);
3704
3705 /***********************
3706 * 4. Read EEPROM
3707 * ********************/
3708
3709 /* Read the EEPROM */
3710 err = il_eeprom_init(il);
3711 if (err) {
3712 IL_ERR("Unable to init EEPROM\n");
3713 goto out_iounmap;
3714 }
3715 /* MAC Address location in EEPROM same for 3945/4965 */
3716 eeprom = (struct il3945_eeprom *)il->eeprom;
3717 D_INFO("MAC address: %pM\n", eeprom->mac_address);
3718 SET_IEEE80211_PERM_ADDR(il->hw, eeprom->mac_address);
3719
3720 /***********************
3721 * 5. Setup HW Constants
3722 * ********************/
3723 /* Device-specific setup */
3724 err = il3945_hw_set_hw_params(il);
3725 if (err) {
3726 IL_ERR("failed to set hw settings\n");
3727 goto out_eeprom_free;
3728 }
3729
3730 /***********************
3731 * 6. Setup il
3732 * ********************/
3733
3734 err = il3945_init_drv(il);
3735 if (err) {
3736 IL_ERR("initializing driver failed\n");
3737 goto out_unset_hw_params;
3738 }
3739
3740 IL_INFO("Detected Intel Wireless WiFi Link %s\n", il->cfg->name);
3741
3742 /***********************
3743 * 7. Setup Services
3744 * ********************/
3745
3746 spin_lock_irqsave(&il->lock, flags);
3747 il_disable_interrupts(il);
3748 spin_unlock_irqrestore(&il->lock, flags);
3749
3750 pci_enable_msi(il->pci_dev);
3751
3752 err = request_irq(il->pci_dev->irq, il_isr, IRQF_SHARED, DRV_NAME, il);
3753 if (err) {
3754 IL_ERR("Error allocating IRQ %d\n", il->pci_dev->irq);
3755 goto out_disable_msi;
3756 }
3757
3758 err = sysfs_create_group(&pdev->dev.kobj, &il3945_attribute_group);
3759 if (err) {
3760 IL_ERR("failed to create sysfs device attributes\n");
3761 goto out_release_irq;
3762 }
3763
3764 il_set_rxon_channel(il, &il->bands[IEEE80211_BAND_2GHZ].channels[5]);
3765 il3945_setup_deferred_work(il);
3766 il3945_setup_handlers(il);
3767 il_power_initialize(il);
3768
3769 /*********************************
3770 * 8. Setup and Register mac80211
3771 * *******************************/
3772
3773 il_enable_interrupts(il);
3774
3775 err = il3945_setup_mac(il);
3776 if (err)
3777 goto out_remove_sysfs;
3778
3779 err = il_dbgfs_register(il, DRV_NAME);
3780 if (err)
3781 IL_ERR("failed to create debugfs files. Ignoring error: %d\n",
3782 err);
3783
3784 /* Start monitoring the killswitch */
3785 queue_delayed_work(il->workqueue, &il->_3945.rfkill_poll, 2 * HZ);
3786
3787 return 0;
3788
3789out_remove_sysfs:
3790 destroy_workqueue(il->workqueue);
3791 il->workqueue = NULL;
3792 sysfs_remove_group(&pdev->dev.kobj, &il3945_attribute_group);
3793out_release_irq:
3794 free_irq(il->pci_dev->irq, il);
3795out_disable_msi:
3796 pci_disable_msi(il->pci_dev);
3797 il_free_geos(il);
3798 il_free_channel_map(il);
3799out_unset_hw_params:
3800 il3945_unset_hw_params(il);
3801out_eeprom_free:
3802 il_eeprom_free(il);
3803out_iounmap:
3804 iounmap(il->hw_base);
3805out_pci_release_regions:
3806 pci_release_regions(pdev);
3807out_pci_disable_device:
3808 pci_disable_device(pdev);
3809out_ieee80211_free_hw:
3810 ieee80211_free_hw(il->hw);
3811out:
3812 return err;
3813}
3814
3815static void
3816il3945_pci_remove(struct pci_dev *pdev)
3817{
3818 struct il_priv *il = pci_get_drvdata(pdev);
3819 unsigned long flags;
3820
3821 if (!il)
3822 return;
3823
3824 D_INFO("*** UNLOAD DRIVER ***\n");
3825
3826 il_dbgfs_unregister(il);
3827
3828 set_bit(S_EXIT_PENDING, &il->status);
3829
3830 il_leds_exit(il);
3831
3832 if (il->mac80211_registered) {
3833 ieee80211_unregister_hw(il->hw);
3834 il->mac80211_registered = 0;
3835 } else {
3836 il3945_down(il);
3837 }
3838
3839 /*
3840 * Make sure device is reset to low power before unloading driver.
3841 * This may be redundant with il_down(), but there are paths to
3842 * run il_down() without calling apm_ops.stop(), and there are
3843 * paths to avoid running il_down() at all before leaving driver.
3844 * This (inexpensive) call *makes sure* device is reset.
3845 */
3846 il_apm_stop(il);
3847
3848 /* make sure we flush any pending irq or
3849 * tasklet for the driver
3850 */
3851 spin_lock_irqsave(&il->lock, flags);
3852 il_disable_interrupts(il);
3853 spin_unlock_irqrestore(&il->lock, flags);
3854
3855 il3945_synchronize_irq(il);
3856
3857 sysfs_remove_group(&pdev->dev.kobj, &il3945_attribute_group);
3858
3859 cancel_delayed_work_sync(&il->_3945.rfkill_poll);
3860
3861 il3945_dealloc_ucode_pci(il);
3862
3863 if (il->rxq.bd)
3864 il3945_rx_queue_free(il, &il->rxq);
3865 il3945_hw_txq_ctx_free(il);
3866
3867 il3945_unset_hw_params(il);
3868
3869 /*netif_stop_queue(dev); */
3870 flush_workqueue(il->workqueue);
3871
3872 /* ieee80211_unregister_hw calls il3945_mac_stop, which flushes
3873 * il->workqueue... so we can't take down the workqueue
3874 * until now... */
3875 destroy_workqueue(il->workqueue);
3876 il->workqueue = NULL;
3877
3878 free_irq(pdev->irq, il);
3879 pci_disable_msi(pdev);
3880
3881 iounmap(il->hw_base);
3882 pci_release_regions(pdev);
3883 pci_disable_device(pdev);
3884
3885 il_free_channel_map(il);
3886 il_free_geos(il);
3887 kfree(il->scan_cmd);
3888 if (il->beacon_skb)
3889 dev_kfree_skb(il->beacon_skb);
3890
3891 ieee80211_free_hw(il->hw);
3892}
3893
3894/*****************************************************************************
3895 *
3896 * driver and module entry point
3897 *
3898 *****************************************************************************/
3899
3900static struct pci_driver il3945_driver = {
3901 .name = DRV_NAME,
3902 .id_table = il3945_hw_card_ids,
3903 .probe = il3945_pci_probe,
3904 .remove = il3945_pci_remove,
3905 .driver.pm = IL_LEGACY_PM_OPS,
3906};
3907
3908static int __init
3909il3945_init(void)
3910{
3911
3912 int ret;
3913 pr_info(DRV_DESCRIPTION ", " DRV_VERSION "\n");
3914 pr_info(DRV_COPYRIGHT "\n");
3915
3916 ret = il3945_rate_control_register();
3917 if (ret) {
3918 pr_err("Unable to register rate control algorithm: %d\n", ret);
3919 return ret;
3920 }
3921
3922 ret = pci_register_driver(&il3945_driver);
3923 if (ret) {
3924 pr_err("Unable to initialize PCI module\n");
3925 goto error_register;
3926 }
3927
3928 return ret;
3929
3930error_register:
3931 il3945_rate_control_unregister();
3932 return ret;
3933}
3934
3935static void __exit
3936il3945_exit(void)
3937{
3938 pci_unregister_driver(&il3945_driver);
3939 il3945_rate_control_unregister();
3940}
3941
3942MODULE_FIRMWARE(IL3945_MODULE_FIRMWARE(IL3945_UCODE_API_MAX));
3943
3944module_param_named(antenna, il3945_mod_params.antenna, int, S_IRUGO);
3945MODULE_PARM_DESC(antenna, "select antenna (1=Main, 2=Aux, default 0 [both])");
3946module_param_named(swcrypto, il3945_mod_params.sw_crypto, int, S_IRUGO);
3947MODULE_PARM_DESC(swcrypto, "using software crypto (default 1 [software])");
3948module_param_named(disable_hw_scan, il3945_mod_params.disable_hw_scan, int,
3949 S_IRUGO);
3950MODULE_PARM_DESC(disable_hw_scan, "disable hardware scanning (default 1)");
3951#ifdef CONFIG_IWLEGACY_DEBUG
3952module_param_named(debug, il_debug_level, uint, S_IRUGO | S_IWUSR);
3953MODULE_PARM_DESC(debug, "debug output mask");
3954#endif
3955module_param_named(fw_restart, il3945_mod_params.restart_fw, int, S_IRUGO);
3956MODULE_PARM_DESC(fw_restart, "restart firmware in case of error");
3957
3958module_exit(il3945_exit);
3959module_init(il3945_init);
diff --git a/drivers/net/wireless/intel/iwlegacy/3945-rs.c b/drivers/net/wireless/intel/iwlegacy/3945-rs.c
new file mode 100644
index 000000000000..76b0729ade17
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlegacy/3945-rs.c
@@ -0,0 +1,979 @@
1/******************************************************************************
2 *
3 * Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of version 2 of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
17 *
18 * The full GNU General Public License is included in this distribution in the
19 * file called LICENSE.
20 *
21 * Contact Information:
22 * Intel Linux Wireless <ilw@linux.intel.com>
23 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
24 *
25 *****************************************************************************/
26
27#include <linux/kernel.h>
28#include <linux/skbuff.h>
29#include <linux/slab.h>
30#include <net/mac80211.h>
31
32#include <linux/netdevice.h>
33#include <linux/etherdevice.h>
34#include <linux/delay.h>
35
36#include <linux/workqueue.h>
37
38#include "commands.h"
39#include "3945.h"
40
41#define RS_NAME "iwl-3945-rs"
42
43static s32 il3945_expected_tpt_g[RATE_COUNT_3945] = {
44 7, 13, 35, 58, 0, 0, 76, 104, 130, 168, 191, 202
45};
46
47static s32 il3945_expected_tpt_g_prot[RATE_COUNT_3945] = {
48 7, 13, 35, 58, 0, 0, 0, 80, 93, 113, 123, 125
49};
50
51static s32 il3945_expected_tpt_a[RATE_COUNT_3945] = {
52 0, 0, 0, 0, 40, 57, 72, 98, 121, 154, 177, 186
53};
54
55static s32 il3945_expected_tpt_b[RATE_COUNT_3945] = {
56 7, 13, 35, 58, 0, 0, 0, 0, 0, 0, 0, 0
57};
58
59struct il3945_tpt_entry {
60 s8 min_rssi;
61 u8 idx;
62};
63
64static struct il3945_tpt_entry il3945_tpt_table_a[] = {
65 {-60, RATE_54M_IDX},
66 {-64, RATE_48M_IDX},
67 {-72, RATE_36M_IDX},
68 {-80, RATE_24M_IDX},
69 {-84, RATE_18M_IDX},
70 {-85, RATE_12M_IDX},
71 {-87, RATE_9M_IDX},
72 {-89, RATE_6M_IDX}
73};
74
75static struct il3945_tpt_entry il3945_tpt_table_g[] = {
76 {-60, RATE_54M_IDX},
77 {-64, RATE_48M_IDX},
78 {-68, RATE_36M_IDX},
79 {-80, RATE_24M_IDX},
80 {-84, RATE_18M_IDX},
81 {-85, RATE_12M_IDX},
82 {-86, RATE_11M_IDX},
83 {-88, RATE_5M_IDX},
84 {-90, RATE_2M_IDX},
85 {-92, RATE_1M_IDX}
86};
87
88#define RATE_MAX_WINDOW 62
89#define RATE_FLUSH (3*HZ)
90#define RATE_WIN_FLUSH (HZ/2)
91#define IL39_RATE_HIGH_TH 11520
92#define IL_SUCCESS_UP_TH 8960
93#define IL_SUCCESS_DOWN_TH 10880
94#define RATE_MIN_FAILURE_TH 6
95#define RATE_MIN_SUCCESS_TH 8
96#define RATE_DECREASE_TH 1920
97#define RATE_RETRY_TH 15
98
99static u8
100il3945_get_rate_idx_by_rssi(s32 rssi, enum ieee80211_band band)
101{
102 u32 idx = 0;
103 u32 table_size = 0;
104 struct il3945_tpt_entry *tpt_table = NULL;
105
106 if (rssi < IL_MIN_RSSI_VAL || rssi > IL_MAX_RSSI_VAL)
107 rssi = IL_MIN_RSSI_VAL;
108
109 switch (band) {
110 case IEEE80211_BAND_2GHZ:
111 tpt_table = il3945_tpt_table_g;
112 table_size = ARRAY_SIZE(il3945_tpt_table_g);
113 break;
114 case IEEE80211_BAND_5GHZ:
115 tpt_table = il3945_tpt_table_a;
116 table_size = ARRAY_SIZE(il3945_tpt_table_a);
117 break;
118 default:
119 BUG();
120 break;
121 }
122
123 while (idx < table_size && rssi < tpt_table[idx].min_rssi)
124 idx++;
125
126 idx = min(idx, table_size - 1);
127
128 return tpt_table[idx].idx;
129}
130
131static void
132il3945_clear_win(struct il3945_rate_scale_data *win)
133{
134 win->data = 0;
135 win->success_counter = 0;
136 win->success_ratio = -1;
137 win->counter = 0;
138 win->average_tpt = IL_INVALID_VALUE;
139 win->stamp = 0;
140}
141
142/**
143 * il3945_rate_scale_flush_wins - flush out the rate scale wins
144 *
145 * Returns the number of wins that have gathered data but were
146 * not flushed. If there were any that were not flushed, then
147 * reschedule the rate flushing routine.
148 */
149static int
150il3945_rate_scale_flush_wins(struct il3945_rs_sta *rs_sta)
151{
152 int unflushed = 0;
153 int i;
154 unsigned long flags;
155 struct il_priv *il __maybe_unused = rs_sta->il;
156
157 /*
158 * For each rate, if we have collected data on that rate
159 * and it has been more than RATE_WIN_FLUSH
160 * since we flushed, clear out the gathered stats
161 */
162 for (i = 0; i < RATE_COUNT_3945; i++) {
163 if (!rs_sta->win[i].counter)
164 continue;
165
166 spin_lock_irqsave(&rs_sta->lock, flags);
167 if (time_after(jiffies, rs_sta->win[i].stamp + RATE_WIN_FLUSH)) {
168 D_RATE("flushing %d samples of rate " "idx %d\n",
169 rs_sta->win[i].counter, i);
170 il3945_clear_win(&rs_sta->win[i]);
171 } else
172 unflushed++;
173 spin_unlock_irqrestore(&rs_sta->lock, flags);
174 }
175
176 return unflushed;
177}
178
179#define RATE_FLUSH_MAX 5000 /* msec */
180#define RATE_FLUSH_MIN 50 /* msec */
181#define IL_AVERAGE_PACKETS 1500
182
183static void
184il3945_bg_rate_scale_flush(unsigned long data)
185{
186 struct il3945_rs_sta *rs_sta = (void *)data;
187 struct il_priv *il __maybe_unused = rs_sta->il;
188 int unflushed = 0;
189 unsigned long flags;
190 u32 packet_count, duration, pps;
191
192 D_RATE("enter\n");
193
194 unflushed = il3945_rate_scale_flush_wins(rs_sta);
195
196 spin_lock_irqsave(&rs_sta->lock, flags);
197
198 /* Number of packets Rx'd since last time this timer ran */
199 packet_count = (rs_sta->tx_packets - rs_sta->last_tx_packets) + 1;
200
201 rs_sta->last_tx_packets = rs_sta->tx_packets + 1;
202
203 if (unflushed) {
204 duration =
205 jiffies_to_msecs(jiffies - rs_sta->last_partial_flush);
206
207 D_RATE("Tx'd %d packets in %dms\n", packet_count, duration);
208
209 /* Determine packets per second */
210 if (duration)
211 pps = (packet_count * 1000) / duration;
212 else
213 pps = 0;
214
215 if (pps) {
216 duration = (IL_AVERAGE_PACKETS * 1000) / pps;
217 if (duration < RATE_FLUSH_MIN)
218 duration = RATE_FLUSH_MIN;
219 else if (duration > RATE_FLUSH_MAX)
220 duration = RATE_FLUSH_MAX;
221 } else
222 duration = RATE_FLUSH_MAX;
223
224 rs_sta->flush_time = msecs_to_jiffies(duration);
225
226 D_RATE("new flush period: %d msec ave %d\n", duration,
227 packet_count);
228
229 mod_timer(&rs_sta->rate_scale_flush,
230 jiffies + rs_sta->flush_time);
231
232 rs_sta->last_partial_flush = jiffies;
233 } else {
234 rs_sta->flush_time = RATE_FLUSH;
235 rs_sta->flush_pending = 0;
236 }
237 /* If there weren't any unflushed entries, we don't schedule the timer
238 * to run again */
239
240 rs_sta->last_flush = jiffies;
241
242 spin_unlock_irqrestore(&rs_sta->lock, flags);
243
244 D_RATE("leave\n");
245}
246
247/**
248 * il3945_collect_tx_data - Update the success/failure sliding win
249 *
250 * We keep a sliding win of the last 64 packets transmitted
251 * at this rate. win->data contains the bitmask of successful
252 * packets.
253 */
254static void
255il3945_collect_tx_data(struct il3945_rs_sta *rs_sta,
256 struct il3945_rate_scale_data *win, int success,
257 int retries, int idx)
258{
259 unsigned long flags;
260 s32 fail_count;
261 struct il_priv *il __maybe_unused = rs_sta->il;
262
263 if (!retries) {
264 D_RATE("leave: retries == 0 -- should be at least 1\n");
265 return;
266 }
267
268 spin_lock_irqsave(&rs_sta->lock, flags);
269
270 /*
271 * Keep track of only the latest 62 tx frame attempts in this rate's
272 * history win; anything older isn't really relevant any more.
273 * If we have filled up the sliding win, drop the oldest attempt;
274 * if the oldest attempt (highest bit in bitmap) shows "success",
275 * subtract "1" from the success counter (this is the main reason
276 * we keep these bitmaps!).
277 * */
278 while (retries > 0) {
279 if (win->counter >= RATE_MAX_WINDOW) {
280
281 /* remove earliest */
282 win->counter = RATE_MAX_WINDOW - 1;
283
284 if (win->data & (1ULL << (RATE_MAX_WINDOW - 1))) {
285 win->data &= ~(1ULL << (RATE_MAX_WINDOW - 1));
286 win->success_counter--;
287 }
288 }
289
290 /* Increment frames-attempted counter */
291 win->counter++;
292
293 /* Shift bitmap by one frame (throw away oldest history),
294 * OR in "1", and increment "success" if this
295 * frame was successful. */
296 win->data <<= 1;
297 if (success > 0) {
298 win->success_counter++;
299 win->data |= 0x1;
300 success--;
301 }
302
303 retries--;
304 }
305
306 /* Calculate current success ratio, avoid divide-by-0! */
307 if (win->counter > 0)
308 win->success_ratio =
309 128 * (100 * win->success_counter) / win->counter;
310 else
311 win->success_ratio = IL_INVALID_VALUE;
312
313 fail_count = win->counter - win->success_counter;
314
315 /* Calculate average throughput, if we have enough history. */
316 if (fail_count >= RATE_MIN_FAILURE_TH ||
317 win->success_counter >= RATE_MIN_SUCCESS_TH)
318 win->average_tpt =
319 ((win->success_ratio * rs_sta->expected_tpt[idx] +
320 64) / 128);
321 else
322 win->average_tpt = IL_INVALID_VALUE;
323
324 /* Tag this win as having been updated */
325 win->stamp = jiffies;
326
327 spin_unlock_irqrestore(&rs_sta->lock, flags);
328}
329
330/*
331 * Called after adding a new station to initialize rate scaling
332 */
333void
334il3945_rs_rate_init(struct il_priv *il, struct ieee80211_sta *sta, u8 sta_id)
335{
336 struct ieee80211_hw *hw = il->hw;
337 struct ieee80211_conf *conf = &il->hw->conf;
338 struct il3945_sta_priv *psta;
339 struct il3945_rs_sta *rs_sta;
340 struct ieee80211_supported_band *sband;
341 int i;
342
343 D_INFO("enter\n");
344 if (sta_id == il->hw_params.bcast_id)
345 goto out;
346
347 psta = (struct il3945_sta_priv *)sta->drv_priv;
348 rs_sta = &psta->rs_sta;
349 sband = hw->wiphy->bands[conf->chandef.chan->band];
350
351 rs_sta->il = il;
352
353 rs_sta->start_rate = RATE_INVALID;
354
355 /* default to just 802.11b */
356 rs_sta->expected_tpt = il3945_expected_tpt_b;
357
358 rs_sta->last_partial_flush = jiffies;
359 rs_sta->last_flush = jiffies;
360 rs_sta->flush_time = RATE_FLUSH;
361 rs_sta->last_tx_packets = 0;
362
363 rs_sta->rate_scale_flush.data = (unsigned long)rs_sta;
364 rs_sta->rate_scale_flush.function = il3945_bg_rate_scale_flush;
365
366 for (i = 0; i < RATE_COUNT_3945; i++)
367 il3945_clear_win(&rs_sta->win[i]);
368
369 /* TODO: what is a good starting rate for STA? About middle? Maybe not
370 * the lowest or the highest rate.. Could consider using RSSI from
371 * previous packets? Need to have IEEE 802.1X auth succeed immediately
372 * after assoc.. */
373
374 for (i = sband->n_bitrates - 1; i >= 0; i--) {
375 if (sta->supp_rates[sband->band] & (1 << i)) {
376 rs_sta->last_txrate_idx = i;
377 break;
378 }
379 }
380
381 il->_3945.sta_supp_rates = sta->supp_rates[sband->band];
382 /* For 5 GHz band it start at IL_FIRST_OFDM_RATE */
383 if (sband->band == IEEE80211_BAND_5GHZ) {
384 rs_sta->last_txrate_idx += IL_FIRST_OFDM_RATE;
385 il->_3945.sta_supp_rates <<= IL_FIRST_OFDM_RATE;
386 }
387
388out:
389 il->stations[sta_id].used &= ~IL_STA_UCODE_INPROGRESS;
390
391 D_INFO("leave\n");
392}
393
394static void *
395il3945_rs_alloc(struct ieee80211_hw *hw, struct dentry *debugfsdir)
396{
397 return hw->priv;
398}
399
400/* rate scale requires free function to be implemented */
401static void
402il3945_rs_free(void *il)
403{
404}
405
406static void *
407il3945_rs_alloc_sta(void *il_priv, struct ieee80211_sta *sta, gfp_t gfp)
408{
409 struct il3945_rs_sta *rs_sta;
410 struct il3945_sta_priv *psta = (void *)sta->drv_priv;
411 struct il_priv *il __maybe_unused = il_priv;
412
413 D_RATE("enter\n");
414
415 rs_sta = &psta->rs_sta;
416
417 spin_lock_init(&rs_sta->lock);
418 init_timer(&rs_sta->rate_scale_flush);
419
420 D_RATE("leave\n");
421
422 return rs_sta;
423}
424
425static void
426il3945_rs_free_sta(void *il_priv, struct ieee80211_sta *sta, void *il_sta)
427{
428 struct il3945_rs_sta *rs_sta = il_sta;
429
430 /*
431 * Be careful not to use any members of il3945_rs_sta (like trying
432 * to use il_priv to print out debugging) since it may not be fully
433 * initialized at this point.
434 */
435 del_timer_sync(&rs_sta->rate_scale_flush);
436}
437
438/**
439 * il3945_rs_tx_status - Update rate control values based on Tx results
440 *
441 * NOTE: Uses il_priv->retry_rate for the # of retries attempted by
442 * the hardware for each rate.
443 */
444static void
445il3945_rs_tx_status(void *il_rate, struct ieee80211_supported_band *sband,
446 struct ieee80211_sta *sta, void *il_sta,
447 struct sk_buff *skb)
448{
449 s8 retries = 0, current_count;
450 int scale_rate_idx, first_idx, last_idx;
451 unsigned long flags;
452 struct il_priv *il = (struct il_priv *)il_rate;
453 struct il3945_rs_sta *rs_sta = il_sta;
454 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
455
456 D_RATE("enter\n");
457
458 retries = info->status.rates[0].count;
459 /* Sanity Check for retries */
460 if (retries > RATE_RETRY_TH)
461 retries = RATE_RETRY_TH;
462
463 first_idx = sband->bitrates[info->status.rates[0].idx].hw_value;
464 if (first_idx < 0 || first_idx >= RATE_COUNT_3945) {
465 D_RATE("leave: Rate out of bounds: %d\n", first_idx);
466 return;
467 }
468
469 if (!il_sta) {
470 D_RATE("leave: No STA il data to update!\n");
471 return;
472 }
473
474 /* Treat uninitialized rate scaling data same as non-existing. */
475 if (!rs_sta->il) {
476 D_RATE("leave: STA il data uninitialized!\n");
477 return;
478 }
479
480 rs_sta->tx_packets++;
481
482 scale_rate_idx = first_idx;
483 last_idx = first_idx;
484
485 /*
486 * Update the win for each rate. We determine which rates
487 * were Tx'd based on the total number of retries vs. the number
488 * of retries configured for each rate -- currently set to the
489 * il value 'retry_rate' vs. rate specific
490 *
491 * On exit from this while loop last_idx indicates the rate
492 * at which the frame was finally transmitted (or failed if no
493 * ACK)
494 */
495 while (retries > 1) {
496 if ((retries - 1) < il->retry_rate) {
497 current_count = (retries - 1);
498 last_idx = scale_rate_idx;
499 } else {
500 current_count = il->retry_rate;
501 last_idx = il3945_rs_next_rate(il, scale_rate_idx);
502 }
503
504 /* Update this rate accounting for as many retries
505 * as was used for it (per current_count) */
506 il3945_collect_tx_data(rs_sta, &rs_sta->win[scale_rate_idx], 0,
507 current_count, scale_rate_idx);
508 D_RATE("Update rate %d for %d retries.\n", scale_rate_idx,
509 current_count);
510
511 retries -= current_count;
512
513 scale_rate_idx = last_idx;
514 }
515
516 /* Update the last idx win with success/failure based on ACK */
517 D_RATE("Update rate %d with %s.\n", last_idx,
518 (info->flags & IEEE80211_TX_STAT_ACK) ? "success" : "failure");
519 il3945_collect_tx_data(rs_sta, &rs_sta->win[last_idx],
520 info->flags & IEEE80211_TX_STAT_ACK, 1,
521 last_idx);
522
523 /* We updated the rate scale win -- if its been more than
524 * flush_time since the last run, schedule the flush
525 * again */
526 spin_lock_irqsave(&rs_sta->lock, flags);
527
528 if (!rs_sta->flush_pending &&
529 time_after(jiffies, rs_sta->last_flush + rs_sta->flush_time)) {
530
531 rs_sta->last_partial_flush = jiffies;
532 rs_sta->flush_pending = 1;
533 mod_timer(&rs_sta->rate_scale_flush,
534 jiffies + rs_sta->flush_time);
535 }
536
537 spin_unlock_irqrestore(&rs_sta->lock, flags);
538
539 D_RATE("leave\n");
540}
541
542static u16
543il3945_get_adjacent_rate(struct il3945_rs_sta *rs_sta, u8 idx, u16 rate_mask,
544 enum ieee80211_band band)
545{
546 u8 high = RATE_INVALID;
547 u8 low = RATE_INVALID;
548 struct il_priv *il __maybe_unused = rs_sta->il;
549
550 /* 802.11A walks to the next literal adjacent rate in
551 * the rate table */
552 if (unlikely(band == IEEE80211_BAND_5GHZ)) {
553 int i;
554 u32 mask;
555
556 /* Find the previous rate that is in the rate mask */
557 i = idx - 1;
558 for (mask = (1 << i); i >= 0; i--, mask >>= 1) {
559 if (rate_mask & mask) {
560 low = i;
561 break;
562 }
563 }
564
565 /* Find the next rate that is in the rate mask */
566 i = idx + 1;
567 for (mask = (1 << i); i < RATE_COUNT_3945; i++, mask <<= 1) {
568 if (rate_mask & mask) {
569 high = i;
570 break;
571 }
572 }
573
574 return (high << 8) | low;
575 }
576
577 low = idx;
578 while (low != RATE_INVALID) {
579 if (rs_sta->tgg)
580 low = il3945_rates[low].prev_rs_tgg;
581 else
582 low = il3945_rates[low].prev_rs;
583 if (low == RATE_INVALID)
584 break;
585 if (rate_mask & (1 << low))
586 break;
587 D_RATE("Skipping masked lower rate: %d\n", low);
588 }
589
590 high = idx;
591 while (high != RATE_INVALID) {
592 if (rs_sta->tgg)
593 high = il3945_rates[high].next_rs_tgg;
594 else
595 high = il3945_rates[high].next_rs;
596 if (high == RATE_INVALID)
597 break;
598 if (rate_mask & (1 << high))
599 break;
600 D_RATE("Skipping masked higher rate: %d\n", high);
601 }
602
603 return (high << 8) | low;
604}
605
606/**
607 * il3945_rs_get_rate - find the rate for the requested packet
608 *
609 * Returns the ieee80211_rate structure allocated by the driver.
610 *
611 * The rate control algorithm has no internal mapping between hw_mode's
612 * rate ordering and the rate ordering used by the rate control algorithm.
613 *
614 * The rate control algorithm uses a single table of rates that goes across
615 * the entire A/B/G spectrum vs. being limited to just one particular
616 * hw_mode.
617 *
618 * As such, we can't convert the idx obtained below into the hw_mode's
619 * rate table and must reference the driver allocated rate table
620 *
621 */
622static void
623il3945_rs_get_rate(void *il_r, struct ieee80211_sta *sta, void *il_sta,
624 struct ieee80211_tx_rate_control *txrc)
625{
626 struct ieee80211_supported_band *sband = txrc->sband;
627 struct sk_buff *skb = txrc->skb;
628 u8 low = RATE_INVALID;
629 u8 high = RATE_INVALID;
630 u16 high_low;
631 int idx;
632 struct il3945_rs_sta *rs_sta = il_sta;
633 struct il3945_rate_scale_data *win = NULL;
634 int current_tpt = IL_INVALID_VALUE;
635 int low_tpt = IL_INVALID_VALUE;
636 int high_tpt = IL_INVALID_VALUE;
637 u32 fail_count;
638 s8 scale_action = 0;
639 unsigned long flags;
640 u16 rate_mask;
641 s8 max_rate_idx = -1;
642 struct il_priv *il __maybe_unused = (struct il_priv *)il_r;
643 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
644
645 D_RATE("enter\n");
646
647 /* Treat uninitialized rate scaling data same as non-existing. */
648 if (rs_sta && !rs_sta->il) {
649 D_RATE("Rate scaling information not initialized yet.\n");
650 il_sta = NULL;
651 }
652
653 if (rate_control_send_low(sta, il_sta, txrc))
654 return;
655
656 rate_mask = sta->supp_rates[sband->band];
657
658 /* get user max rate if set */
659 max_rate_idx = txrc->max_rate_idx;
660 if (sband->band == IEEE80211_BAND_5GHZ && max_rate_idx != -1)
661 max_rate_idx += IL_FIRST_OFDM_RATE;
662 if (max_rate_idx < 0 || max_rate_idx >= RATE_COUNT)
663 max_rate_idx = -1;
664
665 idx = min(rs_sta->last_txrate_idx & 0xffff, RATE_COUNT_3945 - 1);
666
667 if (sband->band == IEEE80211_BAND_5GHZ)
668 rate_mask = rate_mask << IL_FIRST_OFDM_RATE;
669
670 spin_lock_irqsave(&rs_sta->lock, flags);
671
672 /* for recent assoc, choose best rate regarding
673 * to rssi value
674 */
675 if (rs_sta->start_rate != RATE_INVALID) {
676 if (rs_sta->start_rate < idx &&
677 (rate_mask & (1 << rs_sta->start_rate)))
678 idx = rs_sta->start_rate;
679 rs_sta->start_rate = RATE_INVALID;
680 }
681
682 /* force user max rate if set by user */
683 if (max_rate_idx != -1 && max_rate_idx < idx) {
684 if (rate_mask & (1 << max_rate_idx))
685 idx = max_rate_idx;
686 }
687
688 win = &(rs_sta->win[idx]);
689
690 fail_count = win->counter - win->success_counter;
691
692 if (fail_count < RATE_MIN_FAILURE_TH &&
693 win->success_counter < RATE_MIN_SUCCESS_TH) {
694 spin_unlock_irqrestore(&rs_sta->lock, flags);
695
696 D_RATE("Invalid average_tpt on rate %d: "
697 "counter: %d, success_counter: %d, "
698 "expected_tpt is %sNULL\n", idx, win->counter,
699 win->success_counter,
700 rs_sta->expected_tpt ? "not " : "");
701
702 /* Can't calculate this yet; not enough history */
703 win->average_tpt = IL_INVALID_VALUE;
704 goto out;
705
706 }
707
708 current_tpt = win->average_tpt;
709
710 high_low =
711 il3945_get_adjacent_rate(rs_sta, idx, rate_mask, sband->band);
712 low = high_low & 0xff;
713 high = (high_low >> 8) & 0xff;
714
715 /* If user set max rate, dont allow higher than user constrain */
716 if (max_rate_idx != -1 && max_rate_idx < high)
717 high = RATE_INVALID;
718
719 /* Collect Measured throughputs of adjacent rates */
720 if (low != RATE_INVALID)
721 low_tpt = rs_sta->win[low].average_tpt;
722
723 if (high != RATE_INVALID)
724 high_tpt = rs_sta->win[high].average_tpt;
725
726 spin_unlock_irqrestore(&rs_sta->lock, flags);
727
728 scale_action = 0;
729
730 /* Low success ratio , need to drop the rate */
731 if (win->success_ratio < RATE_DECREASE_TH || !current_tpt) {
732 D_RATE("decrease rate because of low success_ratio\n");
733 scale_action = -1;
734 /* No throughput measured yet for adjacent rates,
735 * try increase */
736 } else if (low_tpt == IL_INVALID_VALUE && high_tpt == IL_INVALID_VALUE) {
737
738 if (high != RATE_INVALID &&
739 win->success_ratio >= RATE_INCREASE_TH)
740 scale_action = 1;
741 else if (low != RATE_INVALID)
742 scale_action = 0;
743
744 /* Both adjacent throughputs are measured, but neither one has
745 * better throughput; we're using the best rate, don't change
746 * it! */
747 } else if (low_tpt != IL_INVALID_VALUE && high_tpt != IL_INVALID_VALUE
748 && low_tpt < current_tpt && high_tpt < current_tpt) {
749
750 D_RATE("No action -- low [%d] & high [%d] < "
751 "current_tpt [%d]\n", low_tpt, high_tpt, current_tpt);
752 scale_action = 0;
753
754 /* At least one of the rates has better throughput */
755 } else {
756 if (high_tpt != IL_INVALID_VALUE) {
757
758 /* High rate has better throughput, Increase
759 * rate */
760 if (high_tpt > current_tpt &&
761 win->success_ratio >= RATE_INCREASE_TH)
762 scale_action = 1;
763 else {
764 D_RATE("decrease rate because of high tpt\n");
765 scale_action = 0;
766 }
767 } else if (low_tpt != IL_INVALID_VALUE) {
768 if (low_tpt > current_tpt) {
769 D_RATE("decrease rate because of low tpt\n");
770 scale_action = -1;
771 } else if (win->success_ratio >= RATE_INCREASE_TH) {
772 /* Lower rate has better
773 * throughput,decrease rate */
774 scale_action = 1;
775 }
776 }
777 }
778
779 /* Sanity check; asked for decrease, but success rate or throughput
780 * has been good at old rate. Don't change it. */
781 if (scale_action == -1 && low != RATE_INVALID &&
782 (win->success_ratio > RATE_HIGH_TH ||
783 current_tpt > 100 * rs_sta->expected_tpt[low]))
784 scale_action = 0;
785
786 switch (scale_action) {
787 case -1:
788 /* Decrese rate */
789 if (low != RATE_INVALID)
790 idx = low;
791 break;
792 case 1:
793 /* Increase rate */
794 if (high != RATE_INVALID)
795 idx = high;
796
797 break;
798 case 0:
799 default:
800 /* No change */
801 break;
802 }
803
804 D_RATE("Selected %d (action %d) - low %d high %d\n", idx, scale_action,
805 low, high);
806
807out:
808
809 if (sband->band == IEEE80211_BAND_5GHZ) {
810 if (WARN_ON_ONCE(idx < IL_FIRST_OFDM_RATE))
811 idx = IL_FIRST_OFDM_RATE;
812 rs_sta->last_txrate_idx = idx;
813 info->control.rates[0].idx = idx - IL_FIRST_OFDM_RATE;
814 } else {
815 rs_sta->last_txrate_idx = idx;
816 info->control.rates[0].idx = rs_sta->last_txrate_idx;
817 }
818 info->control.rates[0].count = 1;
819
820 D_RATE("leave: %d\n", idx);
821}
822
823#ifdef CONFIG_MAC80211_DEBUGFS
824
825static ssize_t
826il3945_sta_dbgfs_stats_table_read(struct file *file, char __user *user_buf,
827 size_t count, loff_t *ppos)
828{
829 char *buff;
830 int desc = 0;
831 int j;
832 ssize_t ret;
833 struct il3945_rs_sta *lq_sta = file->private_data;
834
835 buff = kmalloc(1024, GFP_KERNEL);
836 if (!buff)
837 return -ENOMEM;
838
839 desc +=
840 sprintf(buff + desc,
841 "tx packets=%d last rate idx=%d\n"
842 "rate=0x%X flush time %d\n", lq_sta->tx_packets,
843 lq_sta->last_txrate_idx, lq_sta->start_rate,
844 jiffies_to_msecs(lq_sta->flush_time));
845 for (j = 0; j < RATE_COUNT_3945; j++) {
846 desc +=
847 sprintf(buff + desc, "counter=%d success=%d %%=%d\n",
848 lq_sta->win[j].counter,
849 lq_sta->win[j].success_counter,
850 lq_sta->win[j].success_ratio);
851 }
852 ret = simple_read_from_buffer(user_buf, count, ppos, buff, desc);
853 kfree(buff);
854 return ret;
855}
856
857static const struct file_operations rs_sta_dbgfs_stats_table_ops = {
858 .read = il3945_sta_dbgfs_stats_table_read,
859 .open = simple_open,
860 .llseek = default_llseek,
861};
862
863static void
864il3945_add_debugfs(void *il, void *il_sta, struct dentry *dir)
865{
866 struct il3945_rs_sta *lq_sta = il_sta;
867
868 lq_sta->rs_sta_dbgfs_stats_table_file =
869 debugfs_create_file("rate_stats_table", 0600, dir, lq_sta,
870 &rs_sta_dbgfs_stats_table_ops);
871
872}
873
874static void
875il3945_remove_debugfs(void *il, void *il_sta)
876{
877 struct il3945_rs_sta *lq_sta = il_sta;
878 debugfs_remove(lq_sta->rs_sta_dbgfs_stats_table_file);
879}
880#endif
881
882/*
883 * Initialization of rate scaling information is done by driver after
884 * the station is added. Since mac80211 calls this function before a
885 * station is added we ignore it.
886 */
887static void
888il3945_rs_rate_init_stub(void *il_r, struct ieee80211_supported_band *sband,
889 struct cfg80211_chan_def *chandef,
890 struct ieee80211_sta *sta, void *il_sta)
891{
892}
893
894static const struct rate_control_ops rs_ops = {
895 .name = RS_NAME,
896 .tx_status = il3945_rs_tx_status,
897 .get_rate = il3945_rs_get_rate,
898 .rate_init = il3945_rs_rate_init_stub,
899 .alloc = il3945_rs_alloc,
900 .free = il3945_rs_free,
901 .alloc_sta = il3945_rs_alloc_sta,
902 .free_sta = il3945_rs_free_sta,
903#ifdef CONFIG_MAC80211_DEBUGFS
904 .add_sta_debugfs = il3945_add_debugfs,
905 .remove_sta_debugfs = il3945_remove_debugfs,
906#endif
907
908};
909
910void
911il3945_rate_scale_init(struct ieee80211_hw *hw, s32 sta_id)
912{
913 struct il_priv *il = hw->priv;
914 s32 rssi = 0;
915 unsigned long flags;
916 struct il3945_rs_sta *rs_sta;
917 struct ieee80211_sta *sta;
918 struct il3945_sta_priv *psta;
919
920 D_RATE("enter\n");
921
922 rcu_read_lock();
923
924 sta = ieee80211_find_sta(il->vif, il->stations[sta_id].sta.sta.addr);
925 if (!sta) {
926 D_RATE("Unable to find station to initialize rate scaling.\n");
927 rcu_read_unlock();
928 return;
929 }
930
931 psta = (void *)sta->drv_priv;
932 rs_sta = &psta->rs_sta;
933
934 spin_lock_irqsave(&rs_sta->lock, flags);
935
936 rs_sta->tgg = 0;
937 switch (il->band) {
938 case IEEE80211_BAND_2GHZ:
939 /* TODO: this always does G, not a regression */
940 if (il->active.flags & RXON_FLG_TGG_PROTECT_MSK) {
941 rs_sta->tgg = 1;
942 rs_sta->expected_tpt = il3945_expected_tpt_g_prot;
943 } else
944 rs_sta->expected_tpt = il3945_expected_tpt_g;
945 break;
946 case IEEE80211_BAND_5GHZ:
947 rs_sta->expected_tpt = il3945_expected_tpt_a;
948 break;
949 default:
950 BUG();
951 break;
952 }
953
954 spin_unlock_irqrestore(&rs_sta->lock, flags);
955
956 rssi = il->_3945.last_rx_rssi;
957 if (rssi == 0)
958 rssi = IL_MIN_RSSI_VAL;
959
960 D_RATE("Network RSSI: %d\n", rssi);
961
962 rs_sta->start_rate = il3945_get_rate_idx_by_rssi(rssi, il->band);
963
964 D_RATE("leave: rssi %d assign rate idx: " "%d (plcp 0x%x)\n", rssi,
965 rs_sta->start_rate, il3945_rates[rs_sta->start_rate].plcp);
966 rcu_read_unlock();
967}
968
969int
970il3945_rate_control_register(void)
971{
972 return ieee80211_rate_control_register(&rs_ops);
973}
974
975void
976il3945_rate_control_unregister(void)
977{
978 ieee80211_rate_control_unregister(&rs_ops);
979}
diff --git a/drivers/net/wireless/intel/iwlegacy/3945.c b/drivers/net/wireless/intel/iwlegacy/3945.c
new file mode 100644
index 000000000000..93bdf684babe
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlegacy/3945.c
@@ -0,0 +1,2741 @@
1/******************************************************************************
2 *
3 * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of version 2 of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
17 *
18 * The full GNU General Public License is included in this distribution in the
19 * file called LICENSE.
20 *
21 * Contact Information:
22 * Intel Linux Wireless <ilw@linux.intel.com>
23 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
24 *
25 *****************************************************************************/
26
27#include <linux/kernel.h>
28#include <linux/module.h>
29#include <linux/slab.h>
30#include <linux/pci.h>
31#include <linux/dma-mapping.h>
32#include <linux/delay.h>
33#include <linux/sched.h>
34#include <linux/skbuff.h>
35#include <linux/netdevice.h>
36#include <linux/firmware.h>
37#include <linux/etherdevice.h>
38#include <asm/unaligned.h>
39#include <net/mac80211.h>
40
41#include "common.h"
42#include "3945.h"
43
44/* Send led command */
45static int
46il3945_send_led_cmd(struct il_priv *il, struct il_led_cmd *led_cmd)
47{
48 struct il_host_cmd cmd = {
49 .id = C_LEDS,
50 .len = sizeof(struct il_led_cmd),
51 .data = led_cmd,
52 .flags = CMD_ASYNC,
53 .callback = NULL,
54 };
55
56 return il_send_cmd(il, &cmd);
57}
58
59#define IL_DECLARE_RATE_INFO(r, ip, in, rp, rn, pp, np) \
60 [RATE_##r##M_IDX] = { RATE_##r##M_PLCP, \
61 RATE_##r##M_IEEE, \
62 RATE_##ip##M_IDX, \
63 RATE_##in##M_IDX, \
64 RATE_##rp##M_IDX, \
65 RATE_##rn##M_IDX, \
66 RATE_##pp##M_IDX, \
67 RATE_##np##M_IDX, \
68 RATE_##r##M_IDX_TBL, \
69 RATE_##ip##M_IDX_TBL }
70
71/*
72 * Parameter order:
73 * rate, prev rate, next rate, prev tgg rate, next tgg rate
74 *
75 * If there isn't a valid next or previous rate then INV is used which
76 * maps to RATE_INVALID
77 *
78 */
79const struct il3945_rate_info il3945_rates[RATE_COUNT_3945] = {
80 IL_DECLARE_RATE_INFO(1, INV, 2, INV, 2, INV, 2), /* 1mbps */
81 IL_DECLARE_RATE_INFO(2, 1, 5, 1, 5, 1, 5), /* 2mbps */
82 IL_DECLARE_RATE_INFO(5, 2, 6, 2, 11, 2, 11), /*5.5mbps */
83 IL_DECLARE_RATE_INFO(11, 9, 12, 5, 12, 5, 18), /* 11mbps */
84 IL_DECLARE_RATE_INFO(6, 5, 9, 5, 11, 5, 11), /* 6mbps */
85 IL_DECLARE_RATE_INFO(9, 6, 11, 5, 11, 5, 11), /* 9mbps */
86 IL_DECLARE_RATE_INFO(12, 11, 18, 11, 18, 11, 18), /* 12mbps */
87 IL_DECLARE_RATE_INFO(18, 12, 24, 12, 24, 11, 24), /* 18mbps */
88 IL_DECLARE_RATE_INFO(24, 18, 36, 18, 36, 18, 36), /* 24mbps */
89 IL_DECLARE_RATE_INFO(36, 24, 48, 24, 48, 24, 48), /* 36mbps */
90 IL_DECLARE_RATE_INFO(48, 36, 54, 36, 54, 36, 54), /* 48mbps */
91 IL_DECLARE_RATE_INFO(54, 48, INV, 48, INV, 48, INV), /* 54mbps */
92};
93
94static inline u8
95il3945_get_prev_ieee_rate(u8 rate_idx)
96{
97 u8 rate = il3945_rates[rate_idx].prev_ieee;
98
99 if (rate == RATE_INVALID)
100 rate = rate_idx;
101 return rate;
102}
103
104/* 1 = enable the il3945_disable_events() function */
105#define IL_EVT_DISABLE (0)
106#define IL_EVT_DISABLE_SIZE (1532/32)
107
108/**
109 * il3945_disable_events - Disable selected events in uCode event log
110 *
111 * Disable an event by writing "1"s into "disable"
112 * bitmap in SRAM. Bit position corresponds to Event # (id/type).
113 * Default values of 0 enable uCode events to be logged.
114 * Use for only special debugging. This function is just a placeholder as-is,
115 * you'll need to provide the special bits! ...
116 * ... and set IL_EVT_DISABLE to 1. */
117void
118il3945_disable_events(struct il_priv *il)
119{
120 int i;
121 u32 base; /* SRAM address of event log header */
122 u32 disable_ptr; /* SRAM address of event-disable bitmap array */
123 u32 array_size; /* # of u32 entries in array */
124 static const u32 evt_disable[IL_EVT_DISABLE_SIZE] = {
125 0x00000000, /* 31 - 0 Event id numbers */
126 0x00000000, /* 63 - 32 */
127 0x00000000, /* 95 - 64 */
128 0x00000000, /* 127 - 96 */
129 0x00000000, /* 159 - 128 */
130 0x00000000, /* 191 - 160 */
131 0x00000000, /* 223 - 192 */
132 0x00000000, /* 255 - 224 */
133 0x00000000, /* 287 - 256 */
134 0x00000000, /* 319 - 288 */
135 0x00000000, /* 351 - 320 */
136 0x00000000, /* 383 - 352 */
137 0x00000000, /* 415 - 384 */
138 0x00000000, /* 447 - 416 */
139 0x00000000, /* 479 - 448 */
140 0x00000000, /* 511 - 480 */
141 0x00000000, /* 543 - 512 */
142 0x00000000, /* 575 - 544 */
143 0x00000000, /* 607 - 576 */
144 0x00000000, /* 639 - 608 */
145 0x00000000, /* 671 - 640 */
146 0x00000000, /* 703 - 672 */
147 0x00000000, /* 735 - 704 */
148 0x00000000, /* 767 - 736 */
149 0x00000000, /* 799 - 768 */
150 0x00000000, /* 831 - 800 */
151 0x00000000, /* 863 - 832 */
152 0x00000000, /* 895 - 864 */
153 0x00000000, /* 927 - 896 */
154 0x00000000, /* 959 - 928 */
155 0x00000000, /* 991 - 960 */
156 0x00000000, /* 1023 - 992 */
157 0x00000000, /* 1055 - 1024 */
158 0x00000000, /* 1087 - 1056 */
159 0x00000000, /* 1119 - 1088 */
160 0x00000000, /* 1151 - 1120 */
161 0x00000000, /* 1183 - 1152 */
162 0x00000000, /* 1215 - 1184 */
163 0x00000000, /* 1247 - 1216 */
164 0x00000000, /* 1279 - 1248 */
165 0x00000000, /* 1311 - 1280 */
166 0x00000000, /* 1343 - 1312 */
167 0x00000000, /* 1375 - 1344 */
168 0x00000000, /* 1407 - 1376 */
169 0x00000000, /* 1439 - 1408 */
170 0x00000000, /* 1471 - 1440 */
171 0x00000000, /* 1503 - 1472 */
172 };
173
174 base = le32_to_cpu(il->card_alive.log_event_table_ptr);
175 if (!il3945_hw_valid_rtc_data_addr(base)) {
176 IL_ERR("Invalid event log pointer 0x%08X\n", base);
177 return;
178 }
179
180 disable_ptr = il_read_targ_mem(il, base + (4 * sizeof(u32)));
181 array_size = il_read_targ_mem(il, base + (5 * sizeof(u32)));
182
183 if (IL_EVT_DISABLE && array_size == IL_EVT_DISABLE_SIZE) {
184 D_INFO("Disabling selected uCode log events at 0x%x\n",
185 disable_ptr);
186 for (i = 0; i < IL_EVT_DISABLE_SIZE; i++)
187 il_write_targ_mem(il, disable_ptr + (i * sizeof(u32)),
188 evt_disable[i]);
189
190 } else {
191 D_INFO("Selected uCode log events may be disabled\n");
192 D_INFO(" by writing \"1\"s into disable bitmap\n");
193 D_INFO(" in SRAM at 0x%x, size %d u32s\n", disable_ptr,
194 array_size);
195 }
196
197}
198
199static int
200il3945_hwrate_to_plcp_idx(u8 plcp)
201{
202 int idx;
203
204 for (idx = 0; idx < RATE_COUNT_3945; idx++)
205 if (il3945_rates[idx].plcp == plcp)
206 return idx;
207 return -1;
208}
209
210#ifdef CONFIG_IWLEGACY_DEBUG
211#define TX_STATUS_ENTRY(x) case TX_3945_STATUS_FAIL_ ## x: return #x
212
213static const char *
214il3945_get_tx_fail_reason(u32 status)
215{
216 switch (status & TX_STATUS_MSK) {
217 case TX_3945_STATUS_SUCCESS:
218 return "SUCCESS";
219 TX_STATUS_ENTRY(SHORT_LIMIT);
220 TX_STATUS_ENTRY(LONG_LIMIT);
221 TX_STATUS_ENTRY(FIFO_UNDERRUN);
222 TX_STATUS_ENTRY(MGMNT_ABORT);
223 TX_STATUS_ENTRY(NEXT_FRAG);
224 TX_STATUS_ENTRY(LIFE_EXPIRE);
225 TX_STATUS_ENTRY(DEST_PS);
226 TX_STATUS_ENTRY(ABORTED);
227 TX_STATUS_ENTRY(BT_RETRY);
228 TX_STATUS_ENTRY(STA_INVALID);
229 TX_STATUS_ENTRY(FRAG_DROPPED);
230 TX_STATUS_ENTRY(TID_DISABLE);
231 TX_STATUS_ENTRY(FRAME_FLUSHED);
232 TX_STATUS_ENTRY(INSUFFICIENT_CF_POLL);
233 TX_STATUS_ENTRY(TX_LOCKED);
234 TX_STATUS_ENTRY(NO_BEACON_ON_RADAR);
235 }
236
237 return "UNKNOWN";
238}
239#else
240static inline const char *
241il3945_get_tx_fail_reason(u32 status)
242{
243 return "";
244}
245#endif
246
247/*
248 * get ieee prev rate from rate scale table.
249 * for A and B mode we need to overright prev
250 * value
251 */
252int
253il3945_rs_next_rate(struct il_priv *il, int rate)
254{
255 int next_rate = il3945_get_prev_ieee_rate(rate);
256
257 switch (il->band) {
258 case IEEE80211_BAND_5GHZ:
259 if (rate == RATE_12M_IDX)
260 next_rate = RATE_9M_IDX;
261 else if (rate == RATE_6M_IDX)
262 next_rate = RATE_6M_IDX;
263 break;
264 case IEEE80211_BAND_2GHZ:
265 if (!(il->_3945.sta_supp_rates & IL_OFDM_RATES_MASK) &&
266 il_is_associated(il)) {
267 if (rate == RATE_11M_IDX)
268 next_rate = RATE_5M_IDX;
269 }
270 break;
271
272 default:
273 break;
274 }
275
276 return next_rate;
277}
278
279/**
280 * il3945_tx_queue_reclaim - Reclaim Tx queue entries already Tx'd
281 *
282 * When FW advances 'R' idx, all entries between old and new 'R' idx
283 * need to be reclaimed. As result, some free space forms. If there is
284 * enough free space (> low mark), wake the stack that feeds us.
285 */
286static void
287il3945_tx_queue_reclaim(struct il_priv *il, int txq_id, int idx)
288{
289 struct il_tx_queue *txq = &il->txq[txq_id];
290 struct il_queue *q = &txq->q;
291 struct sk_buff *skb;
292
293 BUG_ON(txq_id == IL39_CMD_QUEUE_NUM);
294
295 for (idx = il_queue_inc_wrap(idx, q->n_bd); q->read_ptr != idx;
296 q->read_ptr = il_queue_inc_wrap(q->read_ptr, q->n_bd)) {
297
298 skb = txq->skbs[txq->q.read_ptr];
299 ieee80211_tx_status_irqsafe(il->hw, skb);
300 txq->skbs[txq->q.read_ptr] = NULL;
301 il->ops->txq_free_tfd(il, txq);
302 }
303
304 if (il_queue_space(q) > q->low_mark && txq_id >= 0 &&
305 txq_id != IL39_CMD_QUEUE_NUM && il->mac80211_registered)
306 il_wake_queue(il, txq);
307}
308
309/**
310 * il3945_hdl_tx - Handle Tx response
311 */
312static void
313il3945_hdl_tx(struct il_priv *il, struct il_rx_buf *rxb)
314{
315 struct il_rx_pkt *pkt = rxb_addr(rxb);
316 u16 sequence = le16_to_cpu(pkt->hdr.sequence);
317 int txq_id = SEQ_TO_QUEUE(sequence);
318 int idx = SEQ_TO_IDX(sequence);
319 struct il_tx_queue *txq = &il->txq[txq_id];
320 struct ieee80211_tx_info *info;
321 struct il3945_tx_resp *tx_resp = (void *)&pkt->u.raw[0];
322 u32 status = le32_to_cpu(tx_resp->status);
323 int rate_idx;
324 int fail;
325
326 if (idx >= txq->q.n_bd || il_queue_used(&txq->q, idx) == 0) {
327 IL_ERR("Read idx for DMA queue txq_id (%d) idx %d "
328 "is out of range [0-%d] %d %d\n", txq_id, idx,
329 txq->q.n_bd, txq->q.write_ptr, txq->q.read_ptr);
330 return;
331 }
332
333 /*
334 * Firmware will not transmit frame on passive channel, if it not yet
335 * received some valid frame on that channel. When this error happen
336 * we have to wait until firmware will unblock itself i.e. when we
337 * note received beacon or other frame. We unblock queues in
338 * il3945_pass_packet_to_mac80211 or in il_mac_bss_info_changed.
339 */
340 if (unlikely((status & TX_STATUS_MSK) == TX_STATUS_FAIL_PASSIVE_NO_RX) &&
341 il->iw_mode == NL80211_IFTYPE_STATION) {
342 il_stop_queues_by_reason(il, IL_STOP_REASON_PASSIVE);
343 D_INFO("Stopped queues - RX waiting on passive channel\n");
344 }
345
346 txq->time_stamp = jiffies;
347 info = IEEE80211_SKB_CB(txq->skbs[txq->q.read_ptr]);
348 ieee80211_tx_info_clear_status(info);
349
350 /* Fill the MRR chain with some info about on-chip retransmissions */
351 rate_idx = il3945_hwrate_to_plcp_idx(tx_resp->rate);
352 if (info->band == IEEE80211_BAND_5GHZ)
353 rate_idx -= IL_FIRST_OFDM_RATE;
354
355 fail = tx_resp->failure_frame;
356
357 info->status.rates[0].idx = rate_idx;
358 info->status.rates[0].count = fail + 1; /* add final attempt */
359
360 /* tx_status->rts_retry_count = tx_resp->failure_rts; */
361 info->flags |=
362 ((status & TX_STATUS_MSK) ==
363 TX_STATUS_SUCCESS) ? IEEE80211_TX_STAT_ACK : 0;
364
365 D_TX("Tx queue %d Status %s (0x%08x) plcp rate %d retries %d\n", txq_id,
366 il3945_get_tx_fail_reason(status), status, tx_resp->rate,
367 tx_resp->failure_frame);
368
369 D_TX_REPLY("Tx queue reclaim %d\n", idx);
370 il3945_tx_queue_reclaim(il, txq_id, idx);
371
372 if (status & TX_ABORT_REQUIRED_MSK)
373 IL_ERR("TODO: Implement Tx ABORT REQUIRED!!!\n");
374}
375
376/*****************************************************************************
377 *
378 * Intel PRO/Wireless 3945ABG/BG Network Connection
379 *
380 * RX handler implementations
381 *
382 *****************************************************************************/
383#ifdef CONFIG_IWLEGACY_DEBUGFS
384static void
385il3945_accumulative_stats(struct il_priv *il, __le32 * stats)
386{
387 int i;
388 __le32 *prev_stats;
389 u32 *accum_stats;
390 u32 *delta, *max_delta;
391
392 prev_stats = (__le32 *) &il->_3945.stats;
393 accum_stats = (u32 *) &il->_3945.accum_stats;
394 delta = (u32 *) &il->_3945.delta_stats;
395 max_delta = (u32 *) &il->_3945.max_delta;
396
397 for (i = sizeof(__le32); i < sizeof(struct il3945_notif_stats);
398 i +=
399 sizeof(__le32), stats++, prev_stats++, delta++, max_delta++,
400 accum_stats++) {
401 if (le32_to_cpu(*stats) > le32_to_cpu(*prev_stats)) {
402 *delta =
403 (le32_to_cpu(*stats) - le32_to_cpu(*prev_stats));
404 *accum_stats += *delta;
405 if (*delta > *max_delta)
406 *max_delta = *delta;
407 }
408 }
409
410 /* reset accumulative stats for "no-counter" type stats */
411 il->_3945.accum_stats.general.temperature =
412 il->_3945.stats.general.temperature;
413 il->_3945.accum_stats.general.ttl_timestamp =
414 il->_3945.stats.general.ttl_timestamp;
415}
416#endif
417
418void
419il3945_hdl_stats(struct il_priv *il, struct il_rx_buf *rxb)
420{
421 struct il_rx_pkt *pkt = rxb_addr(rxb);
422
423 D_RX("Statistics notification received (%d vs %d).\n",
424 (int)sizeof(struct il3945_notif_stats),
425 le32_to_cpu(pkt->len_n_flags) & IL_RX_FRAME_SIZE_MSK);
426#ifdef CONFIG_IWLEGACY_DEBUGFS
427 il3945_accumulative_stats(il, (__le32 *) &pkt->u.raw);
428#endif
429
430 memcpy(&il->_3945.stats, pkt->u.raw, sizeof(il->_3945.stats));
431}
432
433void
434il3945_hdl_c_stats(struct il_priv *il, struct il_rx_buf *rxb)
435{
436 struct il_rx_pkt *pkt = rxb_addr(rxb);
437 __le32 *flag = (__le32 *) &pkt->u.raw;
438
439 if (le32_to_cpu(*flag) & UCODE_STATS_CLEAR_MSK) {
440#ifdef CONFIG_IWLEGACY_DEBUGFS
441 memset(&il->_3945.accum_stats, 0,
442 sizeof(struct il3945_notif_stats));
443 memset(&il->_3945.delta_stats, 0,
444 sizeof(struct il3945_notif_stats));
445 memset(&il->_3945.max_delta, 0,
446 sizeof(struct il3945_notif_stats));
447#endif
448 D_RX("Statistics have been cleared\n");
449 }
450 il3945_hdl_stats(il, rxb);
451}
452
453/******************************************************************************
454 *
455 * Misc. internal state and helper functions
456 *
457 ******************************************************************************/
458
459/* This is necessary only for a number of stats, see the caller. */
460static int
461il3945_is_network_packet(struct il_priv *il, struct ieee80211_hdr *header)
462{
463 /* Filter incoming packets to determine if they are targeted toward
464 * this network, discarding packets coming from ourselves */
465 switch (il->iw_mode) {
466 case NL80211_IFTYPE_ADHOC: /* Header: Dest. | Source | BSSID */
467 /* packets to our IBSS update information */
468 return ether_addr_equal_64bits(header->addr3, il->bssid);
469 case NL80211_IFTYPE_STATION: /* Header: Dest. | AP{BSSID} | Source */
470 /* packets to our IBSS update information */
471 return ether_addr_equal_64bits(header->addr2, il->bssid);
472 default:
473 return 1;
474 }
475}
476
477#define SMALL_PACKET_SIZE 256
478
479static void
480il3945_pass_packet_to_mac80211(struct il_priv *il, struct il_rx_buf *rxb,
481 struct ieee80211_rx_status *stats)
482{
483 struct il_rx_pkt *pkt = rxb_addr(rxb);
484 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)IL_RX_DATA(pkt);
485 struct il3945_rx_frame_hdr *rx_hdr = IL_RX_HDR(pkt);
486 struct il3945_rx_frame_end *rx_end = IL_RX_END(pkt);
487 u32 len = le16_to_cpu(rx_hdr->len);
488 struct sk_buff *skb;
489 __le16 fc = hdr->frame_control;
490 u32 fraglen = PAGE_SIZE << il->hw_params.rx_page_order;
491
492 /* We received data from the HW, so stop the watchdog */
493 if (unlikely(len + IL39_RX_FRAME_SIZE > fraglen)) {
494 D_DROP("Corruption detected!\n");
495 return;
496 }
497
498 /* We only process data packets if the interface is open */
499 if (unlikely(!il->is_open)) {
500 D_DROP("Dropping packet while interface is not open.\n");
501 return;
502 }
503
504 if (unlikely(test_bit(IL_STOP_REASON_PASSIVE, &il->stop_reason))) {
505 il_wake_queues_by_reason(il, IL_STOP_REASON_PASSIVE);
506 D_INFO("Woke queues - frame received on passive channel\n");
507 }
508
509 skb = dev_alloc_skb(SMALL_PACKET_SIZE);
510 if (!skb) {
511 IL_ERR("dev_alloc_skb failed\n");
512 return;
513 }
514
515 if (!il3945_mod_params.sw_crypto)
516 il_set_decrypted_flag(il, (struct ieee80211_hdr *)pkt,
517 le32_to_cpu(rx_end->status), stats);
518
519 /* If frame is small enough to fit into skb->head, copy it
520 * and do not consume a full page
521 */
522 if (len <= SMALL_PACKET_SIZE) {
523 memcpy(skb_put(skb, len), rx_hdr->payload, len);
524 } else {
525 skb_add_rx_frag(skb, 0, rxb->page,
526 (void *)rx_hdr->payload - (void *)pkt, len,
527 fraglen);
528 il->alloc_rxb_page--;
529 rxb->page = NULL;
530 }
531 il_update_stats(il, false, fc, len);
532 memcpy(IEEE80211_SKB_RXCB(skb), stats, sizeof(*stats));
533
534 ieee80211_rx(il->hw, skb);
535}
536
537#define IL_DELAY_NEXT_SCAN_AFTER_ASSOC (HZ*6)
538
539static void
540il3945_hdl_rx(struct il_priv *il, struct il_rx_buf *rxb)
541{
542 struct ieee80211_hdr *header;
543 struct ieee80211_rx_status rx_status = {};
544 struct il_rx_pkt *pkt = rxb_addr(rxb);
545 struct il3945_rx_frame_stats *rx_stats = IL_RX_STATS(pkt);
546 struct il3945_rx_frame_hdr *rx_hdr = IL_RX_HDR(pkt);
547 struct il3945_rx_frame_end *rx_end = IL_RX_END(pkt);
548 u16 rx_stats_sig_avg __maybe_unused = le16_to_cpu(rx_stats->sig_avg);
549 u16 rx_stats_noise_diff __maybe_unused =
550 le16_to_cpu(rx_stats->noise_diff);
551 u8 network_packet;
552
553 rx_status.flag = 0;
554 rx_status.mactime = le64_to_cpu(rx_end->timestamp);
555 rx_status.band =
556 (rx_hdr->
557 phy_flags & RX_RES_PHY_FLAGS_BAND_24_MSK) ? IEEE80211_BAND_2GHZ :
558 IEEE80211_BAND_5GHZ;
559 rx_status.freq =
560 ieee80211_channel_to_frequency(le16_to_cpu(rx_hdr->channel),
561 rx_status.band);
562
563 rx_status.rate_idx = il3945_hwrate_to_plcp_idx(rx_hdr->rate);
564 if (rx_status.band == IEEE80211_BAND_5GHZ)
565 rx_status.rate_idx -= IL_FIRST_OFDM_RATE;
566
567 rx_status.antenna =
568 (le16_to_cpu(rx_hdr->phy_flags) & RX_RES_PHY_FLAGS_ANTENNA_MSK) >>
569 4;
570
571 /* set the preamble flag if appropriate */
572 if (rx_hdr->phy_flags & RX_RES_PHY_FLAGS_SHORT_PREAMBLE_MSK)
573 rx_status.flag |= RX_FLAG_SHORTPRE;
574
575 if ((unlikely(rx_stats->phy_count > 20))) {
576 D_DROP("dsp size out of range [0,20]: %d\n",
577 rx_stats->phy_count);
578 return;
579 }
580
581 if (!(rx_end->status & RX_RES_STATUS_NO_CRC32_ERROR) ||
582 !(rx_end->status & RX_RES_STATUS_NO_RXE_OVERFLOW)) {
583 D_RX("Bad CRC or FIFO: 0x%08X.\n", rx_end->status);
584 return;
585 }
586
587 /* Convert 3945's rssi indicator to dBm */
588 rx_status.signal = rx_stats->rssi - IL39_RSSI_OFFSET;
589
590 D_STATS("Rssi %d sig_avg %d noise_diff %d\n", rx_status.signal,
591 rx_stats_sig_avg, rx_stats_noise_diff);
592
593 header = (struct ieee80211_hdr *)IL_RX_DATA(pkt);
594
595 network_packet = il3945_is_network_packet(il, header);
596
597 D_STATS("[%c] %d RSSI:%d Signal:%u, Rate:%u\n",
598 network_packet ? '*' : ' ', le16_to_cpu(rx_hdr->channel),
599 rx_status.signal, rx_status.signal, rx_status.rate_idx);
600
601 if (network_packet) {
602 il->_3945.last_beacon_time =
603 le32_to_cpu(rx_end->beacon_timestamp);
604 il->_3945.last_tsf = le64_to_cpu(rx_end->timestamp);
605 il->_3945.last_rx_rssi = rx_status.signal;
606 }
607
608 il3945_pass_packet_to_mac80211(il, rxb, &rx_status);
609}
610
611int
612il3945_hw_txq_attach_buf_to_tfd(struct il_priv *il, struct il_tx_queue *txq,
613 dma_addr_t addr, u16 len, u8 reset, u8 pad)
614{
615 int count;
616 struct il_queue *q;
617 struct il3945_tfd *tfd, *tfd_tmp;
618
619 q = &txq->q;
620 tfd_tmp = (struct il3945_tfd *)txq->tfds;
621 tfd = &tfd_tmp[q->write_ptr];
622
623 if (reset)
624 memset(tfd, 0, sizeof(*tfd));
625
626 count = TFD_CTL_COUNT_GET(le32_to_cpu(tfd->control_flags));
627
628 if (count >= NUM_TFD_CHUNKS || count < 0) {
629 IL_ERR("Error can not send more than %d chunks\n",
630 NUM_TFD_CHUNKS);
631 return -EINVAL;
632 }
633
634 tfd->tbs[count].addr = cpu_to_le32(addr);
635 tfd->tbs[count].len = cpu_to_le32(len);
636
637 count++;
638
639 tfd->control_flags =
640 cpu_to_le32(TFD_CTL_COUNT_SET(count) | TFD_CTL_PAD_SET(pad));
641
642 return 0;
643}
644
645/**
646 * il3945_hw_txq_free_tfd - Free one TFD, those at idx [txq->q.read_ptr]
647 *
648 * Does NOT advance any idxes
649 */
650void
651il3945_hw_txq_free_tfd(struct il_priv *il, struct il_tx_queue *txq)
652{
653 struct il3945_tfd *tfd_tmp = (struct il3945_tfd *)txq->tfds;
654 int idx = txq->q.read_ptr;
655 struct il3945_tfd *tfd = &tfd_tmp[idx];
656 struct pci_dev *dev = il->pci_dev;
657 int i;
658 int counter;
659
660 /* sanity check */
661 counter = TFD_CTL_COUNT_GET(le32_to_cpu(tfd->control_flags));
662 if (counter > NUM_TFD_CHUNKS) {
663 IL_ERR("Too many chunks: %i\n", counter);
664 /* @todo issue fatal error, it is quite serious situation */
665 return;
666 }
667
668 /* Unmap tx_cmd */
669 if (counter)
670 pci_unmap_single(dev, dma_unmap_addr(&txq->meta[idx], mapping),
671 dma_unmap_len(&txq->meta[idx], len),
672 PCI_DMA_TODEVICE);
673
674 /* unmap chunks if any */
675
676 for (i = 1; i < counter; i++)
677 pci_unmap_single(dev, le32_to_cpu(tfd->tbs[i].addr),
678 le32_to_cpu(tfd->tbs[i].len),
679 PCI_DMA_TODEVICE);
680
681 /* free SKB */
682 if (txq->skbs) {
683 struct sk_buff *skb = txq->skbs[txq->q.read_ptr];
684
685 /* can be called from irqs-disabled context */
686 if (skb) {
687 dev_kfree_skb_any(skb);
688 txq->skbs[txq->q.read_ptr] = NULL;
689 }
690 }
691}
692
693/**
694 * il3945_hw_build_tx_cmd_rate - Add rate portion to TX_CMD:
695 *
696*/
697void
698il3945_hw_build_tx_cmd_rate(struct il_priv *il, struct il_device_cmd *cmd,
699 struct ieee80211_tx_info *info,
700 struct ieee80211_hdr *hdr, int sta_id)
701{
702 u16 hw_value = ieee80211_get_tx_rate(il->hw, info)->hw_value;
703 u16 rate_idx = min(hw_value & 0xffff, RATE_COUNT_3945 - 1);
704 u16 rate_mask;
705 int rate;
706 const u8 rts_retry_limit = 7;
707 u8 data_retry_limit;
708 __le32 tx_flags;
709 __le16 fc = hdr->frame_control;
710 struct il3945_tx_cmd *tx_cmd = (struct il3945_tx_cmd *)cmd->cmd.payload;
711
712 rate = il3945_rates[rate_idx].plcp;
713 tx_flags = tx_cmd->tx_flags;
714
715 /* We need to figure out how to get the sta->supp_rates while
716 * in this running context */
717 rate_mask = RATES_MASK_3945;
718
719 /* Set retry limit on DATA packets and Probe Responses */
720 if (ieee80211_is_probe_resp(fc))
721 data_retry_limit = 3;
722 else
723 data_retry_limit = IL_DEFAULT_TX_RETRY;
724 tx_cmd->data_retry_limit = data_retry_limit;
725 /* Set retry limit on RTS packets */
726 tx_cmd->rts_retry_limit = min(data_retry_limit, rts_retry_limit);
727
728 tx_cmd->rate = rate;
729 tx_cmd->tx_flags = tx_flags;
730
731 /* OFDM */
732 tx_cmd->supp_rates[0] =
733 ((rate_mask & IL_OFDM_RATES_MASK) >> IL_FIRST_OFDM_RATE) & 0xFF;
734
735 /* CCK */
736 tx_cmd->supp_rates[1] = (rate_mask & 0xF);
737
738 D_RATE("Tx sta id: %d, rate: %d (plcp), flags: 0x%4X "
739 "cck/ofdm mask: 0x%x/0x%x\n", sta_id, tx_cmd->rate,
740 le32_to_cpu(tx_cmd->tx_flags), tx_cmd->supp_rates[1],
741 tx_cmd->supp_rates[0]);
742}
743
744static u8
745il3945_sync_sta(struct il_priv *il, int sta_id, u16 tx_rate)
746{
747 unsigned long flags_spin;
748 struct il_station_entry *station;
749
750 if (sta_id == IL_INVALID_STATION)
751 return IL_INVALID_STATION;
752
753 spin_lock_irqsave(&il->sta_lock, flags_spin);
754 station = &il->stations[sta_id];
755
756 station->sta.sta.modify_mask = STA_MODIFY_TX_RATE_MSK;
757 station->sta.rate_n_flags = cpu_to_le16(tx_rate);
758 station->sta.mode = STA_CONTROL_MODIFY_MSK;
759 il_send_add_sta(il, &station->sta, CMD_ASYNC);
760 spin_unlock_irqrestore(&il->sta_lock, flags_spin);
761
762 D_RATE("SCALE sync station %d to rate %d\n", sta_id, tx_rate);
763 return sta_id;
764}
765
766static void
767il3945_set_pwr_vmain(struct il_priv *il)
768{
769/*
770 * (for documentation purposes)
771 * to set power to V_AUX, do
772
773 if (pci_pme_capable(il->pci_dev, PCI_D3cold)) {
774 il_set_bits_mask_prph(il, APMG_PS_CTRL_REG,
775 APMG_PS_CTRL_VAL_PWR_SRC_VAUX,
776 ~APMG_PS_CTRL_MSK_PWR_SRC);
777
778 _il_poll_bit(il, CSR_GPIO_IN,
779 CSR_GPIO_IN_VAL_VAUX_PWR_SRC,
780 CSR_GPIO_IN_BIT_AUX_POWER, 5000);
781 }
782 */
783
784 il_set_bits_mask_prph(il, APMG_PS_CTRL_REG,
785 APMG_PS_CTRL_VAL_PWR_SRC_VMAIN,
786 ~APMG_PS_CTRL_MSK_PWR_SRC);
787
788 _il_poll_bit(il, CSR_GPIO_IN, CSR_GPIO_IN_VAL_VMAIN_PWR_SRC,
789 CSR_GPIO_IN_BIT_AUX_POWER, 5000);
790}
791
792static int
793il3945_rx_init(struct il_priv *il, struct il_rx_queue *rxq)
794{
795 il_wr(il, FH39_RCSR_RBD_BASE(0), rxq->bd_dma);
796 il_wr(il, FH39_RCSR_RPTR_ADDR(0), rxq->rb_stts_dma);
797 il_wr(il, FH39_RCSR_WPTR(0), 0);
798 il_wr(il, FH39_RCSR_CONFIG(0),
799 FH39_RCSR_RX_CONFIG_REG_VAL_DMA_CHNL_EN_ENABLE |
800 FH39_RCSR_RX_CONFIG_REG_VAL_RDRBD_EN_ENABLE |
801 FH39_RCSR_RX_CONFIG_REG_BIT_WR_STTS_EN |
802 FH39_RCSR_RX_CONFIG_REG_VAL_MAX_FRAG_SIZE_128 | (RX_QUEUE_SIZE_LOG
803 <<
804 FH39_RCSR_RX_CONFIG_REG_POS_RBDC_SIZE)
805 | FH39_RCSR_RX_CONFIG_REG_VAL_IRQ_DEST_INT_HOST | (1 <<
806 FH39_RCSR_RX_CONFIG_REG_POS_IRQ_RBTH)
807 | FH39_RCSR_RX_CONFIG_REG_VAL_MSG_MODE_FH);
808
809 /* fake read to flush all prev I/O */
810 il_rd(il, FH39_RSSR_CTRL);
811
812 return 0;
813}
814
815static int
816il3945_tx_reset(struct il_priv *il)
817{
818 /* bypass mode */
819 il_wr_prph(il, ALM_SCD_MODE_REG, 0x2);
820
821 /* RA 0 is active */
822 il_wr_prph(il, ALM_SCD_ARASTAT_REG, 0x01);
823
824 /* all 6 fifo are active */
825 il_wr_prph(il, ALM_SCD_TXFACT_REG, 0x3f);
826
827 il_wr_prph(il, ALM_SCD_SBYP_MODE_1_REG, 0x010000);
828 il_wr_prph(il, ALM_SCD_SBYP_MODE_2_REG, 0x030002);
829 il_wr_prph(il, ALM_SCD_TXF4MF_REG, 0x000004);
830 il_wr_prph(il, ALM_SCD_TXF5MF_REG, 0x000005);
831
832 il_wr(il, FH39_TSSR_CBB_BASE, il->_3945.shared_phys);
833
834 il_wr(il, FH39_TSSR_MSG_CONFIG,
835 FH39_TSSR_TX_MSG_CONFIG_REG_VAL_SNOOP_RD_TXPD_ON |
836 FH39_TSSR_TX_MSG_CONFIG_REG_VAL_ORDER_RD_TXPD_ON |
837 FH39_TSSR_TX_MSG_CONFIG_REG_VAL_MAX_FRAG_SIZE_128B |
838 FH39_TSSR_TX_MSG_CONFIG_REG_VAL_SNOOP_RD_TFD_ON |
839 FH39_TSSR_TX_MSG_CONFIG_REG_VAL_ORDER_RD_CBB_ON |
840 FH39_TSSR_TX_MSG_CONFIG_REG_VAL_ORDER_RSP_WAIT_TH |
841 FH39_TSSR_TX_MSG_CONFIG_REG_VAL_RSP_WAIT_TH);
842
843 return 0;
844}
845
846/**
847 * il3945_txq_ctx_reset - Reset TX queue context
848 *
849 * Destroys all DMA structures and initialize them again
850 */
851static int
852il3945_txq_ctx_reset(struct il_priv *il)
853{
854 int rc, txq_id;
855
856 il3945_hw_txq_ctx_free(il);
857
858 /* allocate tx queue structure */
859 rc = il_alloc_txq_mem(il);
860 if (rc)
861 return rc;
862
863 /* Tx CMD queue */
864 rc = il3945_tx_reset(il);
865 if (rc)
866 goto error;
867
868 /* Tx queue(s) */
869 for (txq_id = 0; txq_id < il->hw_params.max_txq_num; txq_id++) {
870 rc = il_tx_queue_init(il, txq_id);
871 if (rc) {
872 IL_ERR("Tx %d queue init failed\n", txq_id);
873 goto error;
874 }
875 }
876
877 return rc;
878
879error:
880 il3945_hw_txq_ctx_free(il);
881 return rc;
882}
883
884/*
885 * Start up 3945's basic functionality after it has been reset
886 * (e.g. after platform boot, or shutdown via il_apm_stop())
887 * NOTE: This does not load uCode nor start the embedded processor
888 */
889static int
890il3945_apm_init(struct il_priv *il)
891{
892 int ret = il_apm_init(il);
893
894 /* Clear APMG (NIC's internal power management) interrupts */
895 il_wr_prph(il, APMG_RTC_INT_MSK_REG, 0x0);
896 il_wr_prph(il, APMG_RTC_INT_STT_REG, 0xFFFFFFFF);
897
898 /* Reset radio chip */
899 il_set_bits_prph(il, APMG_PS_CTRL_REG, APMG_PS_CTRL_VAL_RESET_REQ);
900 udelay(5);
901 il_clear_bits_prph(il, APMG_PS_CTRL_REG, APMG_PS_CTRL_VAL_RESET_REQ);
902
903 return ret;
904}
905
906static void
907il3945_nic_config(struct il_priv *il)
908{
909 struct il3945_eeprom *eeprom = (struct il3945_eeprom *)il->eeprom;
910 unsigned long flags;
911 u8 rev_id = il->pci_dev->revision;
912
913 spin_lock_irqsave(&il->lock, flags);
914
915 /* Determine HW type */
916 D_INFO("HW Revision ID = 0x%X\n", rev_id);
917
918 if (rev_id & PCI_CFG_REV_ID_BIT_RTP)
919 D_INFO("RTP type\n");
920 else if (rev_id & PCI_CFG_REV_ID_BIT_BASIC_SKU) {
921 D_INFO("3945 RADIO-MB type\n");
922 il_set_bit(il, CSR_HW_IF_CONFIG_REG,
923 CSR39_HW_IF_CONFIG_REG_BIT_3945_MB);
924 } else {
925 D_INFO("3945 RADIO-MM type\n");
926 il_set_bit(il, CSR_HW_IF_CONFIG_REG,
927 CSR39_HW_IF_CONFIG_REG_BIT_3945_MM);
928 }
929
930 if (EEPROM_SKU_CAP_OP_MODE_MRC == eeprom->sku_cap) {
931 D_INFO("SKU OP mode is mrc\n");
932 il_set_bit(il, CSR_HW_IF_CONFIG_REG,
933 CSR39_HW_IF_CONFIG_REG_BIT_SKU_MRC);
934 } else
935 D_INFO("SKU OP mode is basic\n");
936
937 if ((eeprom->board_revision & 0xF0) == 0xD0) {
938 D_INFO("3945ABG revision is 0x%X\n", eeprom->board_revision);
939 il_set_bit(il, CSR_HW_IF_CONFIG_REG,
940 CSR39_HW_IF_CONFIG_REG_BIT_BOARD_TYPE);
941 } else {
942 D_INFO("3945ABG revision is 0x%X\n", eeprom->board_revision);
943 il_clear_bit(il, CSR_HW_IF_CONFIG_REG,
944 CSR39_HW_IF_CONFIG_REG_BIT_BOARD_TYPE);
945 }
946
947 if (eeprom->almgor_m_version <= 1) {
948 il_set_bit(il, CSR_HW_IF_CONFIG_REG,
949 CSR39_HW_IF_CONFIG_REG_BITS_SILICON_TYPE_A);
950 D_INFO("Card M type A version is 0x%X\n",
951 eeprom->almgor_m_version);
952 } else {
953 D_INFO("Card M type B version is 0x%X\n",
954 eeprom->almgor_m_version);
955 il_set_bit(il, CSR_HW_IF_CONFIG_REG,
956 CSR39_HW_IF_CONFIG_REG_BITS_SILICON_TYPE_B);
957 }
958 spin_unlock_irqrestore(&il->lock, flags);
959
960 if (eeprom->sku_cap & EEPROM_SKU_CAP_SW_RF_KILL_ENABLE)
961 D_RF_KILL("SW RF KILL supported in EEPROM.\n");
962
963 if (eeprom->sku_cap & EEPROM_SKU_CAP_HW_RF_KILL_ENABLE)
964 D_RF_KILL("HW RF KILL supported in EEPROM.\n");
965}
966
967int
968il3945_hw_nic_init(struct il_priv *il)
969{
970 int rc;
971 unsigned long flags;
972 struct il_rx_queue *rxq = &il->rxq;
973
974 spin_lock_irqsave(&il->lock, flags);
975 il3945_apm_init(il);
976 spin_unlock_irqrestore(&il->lock, flags);
977
978 il3945_set_pwr_vmain(il);
979 il3945_nic_config(il);
980
981 /* Allocate the RX queue, or reset if it is already allocated */
982 if (!rxq->bd) {
983 rc = il_rx_queue_alloc(il);
984 if (rc) {
985 IL_ERR("Unable to initialize Rx queue\n");
986 return -ENOMEM;
987 }
988 } else
989 il3945_rx_queue_reset(il, rxq);
990
991 il3945_rx_replenish(il);
992
993 il3945_rx_init(il, rxq);
994
995 /* Look at using this instead:
996 rxq->need_update = 1;
997 il_rx_queue_update_write_ptr(il, rxq);
998 */
999
1000 il_wr(il, FH39_RCSR_WPTR(0), rxq->write & ~7);
1001
1002 rc = il3945_txq_ctx_reset(il);
1003 if (rc)
1004 return rc;
1005
1006 set_bit(S_INIT, &il->status);
1007
1008 return 0;
1009}
1010
1011/**
1012 * il3945_hw_txq_ctx_free - Free TXQ Context
1013 *
1014 * Destroy all TX DMA queues and structures
1015 */
1016void
1017il3945_hw_txq_ctx_free(struct il_priv *il)
1018{
1019 int txq_id;
1020
1021 /* Tx queues */
1022 if (il->txq)
1023 for (txq_id = 0; txq_id < il->hw_params.max_txq_num; txq_id++)
1024 if (txq_id == IL39_CMD_QUEUE_NUM)
1025 il_cmd_queue_free(il);
1026 else
1027 il_tx_queue_free(il, txq_id);
1028
1029 /* free tx queue structure */
1030 il_free_txq_mem(il);
1031}
1032
1033void
1034il3945_hw_txq_ctx_stop(struct il_priv *il)
1035{
1036 int txq_id;
1037
1038 /* stop SCD */
1039 _il_wr_prph(il, ALM_SCD_MODE_REG, 0);
1040 _il_wr_prph(il, ALM_SCD_TXFACT_REG, 0);
1041
1042 /* reset TFD queues */
1043 for (txq_id = 0; txq_id < il->hw_params.max_txq_num; txq_id++) {
1044 _il_wr(il, FH39_TCSR_CONFIG(txq_id), 0x0);
1045 _il_poll_bit(il, FH39_TSSR_TX_STATUS,
1046 FH39_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(txq_id),
1047 FH39_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(txq_id),
1048 1000);
1049 }
1050}
1051
1052/**
1053 * il3945_hw_reg_adjust_power_by_temp
1054 * return idx delta into power gain settings table
1055*/
1056static int
1057il3945_hw_reg_adjust_power_by_temp(int new_reading, int old_reading)
1058{
1059 return (new_reading - old_reading) * (-11) / 100;
1060}
1061
1062/**
1063 * il3945_hw_reg_temp_out_of_range - Keep temperature in sane range
1064 */
1065static inline int
1066il3945_hw_reg_temp_out_of_range(int temperature)
1067{
1068 return (temperature < -260 || temperature > 25) ? 1 : 0;
1069}
1070
1071int
1072il3945_hw_get_temperature(struct il_priv *il)
1073{
1074 return _il_rd(il, CSR_UCODE_DRV_GP2);
1075}
1076
1077/**
1078 * il3945_hw_reg_txpower_get_temperature
1079 * get the current temperature by reading from NIC
1080*/
1081static int
1082il3945_hw_reg_txpower_get_temperature(struct il_priv *il)
1083{
1084 struct il3945_eeprom *eeprom = (struct il3945_eeprom *)il->eeprom;
1085 int temperature;
1086
1087 temperature = il3945_hw_get_temperature(il);
1088
1089 /* driver's okay range is -260 to +25.
1090 * human readable okay range is 0 to +285 */
1091 D_INFO("Temperature: %d\n", temperature + IL_TEMP_CONVERT);
1092
1093 /* handle insane temp reading */
1094 if (il3945_hw_reg_temp_out_of_range(temperature)) {
1095 IL_ERR("Error bad temperature value %d\n", temperature);
1096
1097 /* if really really hot(?),
1098 * substitute the 3rd band/group's temp measured at factory */
1099 if (il->last_temperature > 100)
1100 temperature = eeprom->groups[2].temperature;
1101 else /* else use most recent "sane" value from driver */
1102 temperature = il->last_temperature;
1103 }
1104
1105 return temperature; /* raw, not "human readable" */
1106}
1107
1108/* Adjust Txpower only if temperature variance is greater than threshold.
1109 *
1110 * Both are lower than older versions' 9 degrees */
1111#define IL_TEMPERATURE_LIMIT_TIMER 6
1112
1113/**
1114 * il3945_is_temp_calib_needed - determines if new calibration is needed
1115 *
1116 * records new temperature in tx_mgr->temperature.
1117 * replaces tx_mgr->last_temperature *only* if calib needed
1118 * (assumes caller will actually do the calibration!). */
1119static int
1120il3945_is_temp_calib_needed(struct il_priv *il)
1121{
1122 int temp_diff;
1123
1124 il->temperature = il3945_hw_reg_txpower_get_temperature(il);
1125 temp_diff = il->temperature - il->last_temperature;
1126
1127 /* get absolute value */
1128 if (temp_diff < 0) {
1129 D_POWER("Getting cooler, delta %d,\n", temp_diff);
1130 temp_diff = -temp_diff;
1131 } else if (temp_diff == 0)
1132 D_POWER("Same temp,\n");
1133 else
1134 D_POWER("Getting warmer, delta %d,\n", temp_diff);
1135
1136 /* if we don't need calibration, *don't* update last_temperature */
1137 if (temp_diff < IL_TEMPERATURE_LIMIT_TIMER) {
1138 D_POWER("Timed thermal calib not needed\n");
1139 return 0;
1140 }
1141
1142 D_POWER("Timed thermal calib needed\n");
1143
1144 /* assume that caller will actually do calib ...
1145 * update the "last temperature" value */
1146 il->last_temperature = il->temperature;
1147 return 1;
1148}
1149
1150#define IL_MAX_GAIN_ENTRIES 78
1151#define IL_CCK_FROM_OFDM_POWER_DIFF -5
1152#define IL_CCK_FROM_OFDM_IDX_DIFF (10)
1153
1154/* radio and DSP power table, each step is 1/2 dB.
1155 * 1st number is for RF analog gain, 2nd number is for DSP pre-DAC gain. */
1156static struct il3945_tx_power power_gain_table[2][IL_MAX_GAIN_ENTRIES] = {
1157 {
1158 {251, 127}, /* 2.4 GHz, highest power */
1159 {251, 127},
1160 {251, 127},
1161 {251, 127},
1162 {251, 125},
1163 {251, 110},
1164 {251, 105},
1165 {251, 98},
1166 {187, 125},
1167 {187, 115},
1168 {187, 108},
1169 {187, 99},
1170 {243, 119},
1171 {243, 111},
1172 {243, 105},
1173 {243, 97},
1174 {243, 92},
1175 {211, 106},
1176 {211, 100},
1177 {179, 120},
1178 {179, 113},
1179 {179, 107},
1180 {147, 125},
1181 {147, 119},
1182 {147, 112},
1183 {147, 106},
1184 {147, 101},
1185 {147, 97},
1186 {147, 91},
1187 {115, 107},
1188 {235, 121},
1189 {235, 115},
1190 {235, 109},
1191 {203, 127},
1192 {203, 121},
1193 {203, 115},
1194 {203, 108},
1195 {203, 102},
1196 {203, 96},
1197 {203, 92},
1198 {171, 110},
1199 {171, 104},
1200 {171, 98},
1201 {139, 116},
1202 {227, 125},
1203 {227, 119},
1204 {227, 113},
1205 {227, 107},
1206 {227, 101},
1207 {227, 96},
1208 {195, 113},
1209 {195, 106},
1210 {195, 102},
1211 {195, 95},
1212 {163, 113},
1213 {163, 106},
1214 {163, 102},
1215 {163, 95},
1216 {131, 113},
1217 {131, 106},
1218 {131, 102},
1219 {131, 95},
1220 {99, 113},
1221 {99, 106},
1222 {99, 102},
1223 {99, 95},
1224 {67, 113},
1225 {67, 106},
1226 {67, 102},
1227 {67, 95},
1228 {35, 113},
1229 {35, 106},
1230 {35, 102},
1231 {35, 95},
1232 {3, 113},
1233 {3, 106},
1234 {3, 102},
1235 {3, 95} /* 2.4 GHz, lowest power */
1236 },
1237 {
1238 {251, 127}, /* 5.x GHz, highest power */
1239 {251, 120},
1240 {251, 114},
1241 {219, 119},
1242 {219, 101},
1243 {187, 113},
1244 {187, 102},
1245 {155, 114},
1246 {155, 103},
1247 {123, 117},
1248 {123, 107},
1249 {123, 99},
1250 {123, 92},
1251 {91, 108},
1252 {59, 125},
1253 {59, 118},
1254 {59, 109},
1255 {59, 102},
1256 {59, 96},
1257 {59, 90},
1258 {27, 104},
1259 {27, 98},
1260 {27, 92},
1261 {115, 118},
1262 {115, 111},
1263 {115, 104},
1264 {83, 126},
1265 {83, 121},
1266 {83, 113},
1267 {83, 105},
1268 {83, 99},
1269 {51, 118},
1270 {51, 111},
1271 {51, 104},
1272 {51, 98},
1273 {19, 116},
1274 {19, 109},
1275 {19, 102},
1276 {19, 98},
1277 {19, 93},
1278 {171, 113},
1279 {171, 107},
1280 {171, 99},
1281 {139, 120},
1282 {139, 113},
1283 {139, 107},
1284 {139, 99},
1285 {107, 120},
1286 {107, 113},
1287 {107, 107},
1288 {107, 99},
1289 {75, 120},
1290 {75, 113},
1291 {75, 107},
1292 {75, 99},
1293 {43, 120},
1294 {43, 113},
1295 {43, 107},
1296 {43, 99},
1297 {11, 120},
1298 {11, 113},
1299 {11, 107},
1300 {11, 99},
1301 {131, 107},
1302 {131, 99},
1303 {99, 120},
1304 {99, 113},
1305 {99, 107},
1306 {99, 99},
1307 {67, 120},
1308 {67, 113},
1309 {67, 107},
1310 {67, 99},
1311 {35, 120},
1312 {35, 113},
1313 {35, 107},
1314 {35, 99},
1315 {3, 120} /* 5.x GHz, lowest power */
1316 }
1317};
1318
1319static inline u8
1320il3945_hw_reg_fix_power_idx(int idx)
1321{
1322 if (idx < 0)
1323 return 0;
1324 if (idx >= IL_MAX_GAIN_ENTRIES)
1325 return IL_MAX_GAIN_ENTRIES - 1;
1326 return (u8) idx;
1327}
1328
1329/* Kick off thermal recalibration check every 60 seconds */
1330#define REG_RECALIB_PERIOD (60)
1331
1332/**
1333 * il3945_hw_reg_set_scan_power - Set Tx power for scan probe requests
1334 *
1335 * Set (in our channel info database) the direct scan Tx power for 1 Mbit (CCK)
1336 * or 6 Mbit (OFDM) rates.
1337 */
1338static void
1339il3945_hw_reg_set_scan_power(struct il_priv *il, u32 scan_tbl_idx, s32 rate_idx,
1340 const s8 *clip_pwrs,
1341 struct il_channel_info *ch_info, int band_idx)
1342{
1343 struct il3945_scan_power_info *scan_power_info;
1344 s8 power;
1345 u8 power_idx;
1346
1347 scan_power_info = &ch_info->scan_pwr_info[scan_tbl_idx];
1348
1349 /* use this channel group's 6Mbit clipping/saturation pwr,
1350 * but cap at regulatory scan power restriction (set during init
1351 * based on eeprom channel data) for this channel. */
1352 power = min(ch_info->scan_power, clip_pwrs[RATE_6M_IDX_TBL]);
1353
1354 power = min(power, il->tx_power_user_lmt);
1355 scan_power_info->requested_power = power;
1356
1357 /* find difference between new scan *power* and current "normal"
1358 * Tx *power* for 6Mb. Use this difference (x2) to adjust the
1359 * current "normal" temperature-compensated Tx power *idx* for
1360 * this rate (1Mb or 6Mb) to yield new temp-compensated scan power
1361 * *idx*. */
1362 power_idx =
1363 ch_info->power_info[rate_idx].power_table_idx - (power -
1364 ch_info->
1365 power_info
1366 [RATE_6M_IDX_TBL].
1367 requested_power) *
1368 2;
1369
1370 /* store reference idx that we use when adjusting *all* scan
1371 * powers. So we can accommodate user (all channel) or spectrum
1372 * management (single channel) power changes "between" temperature
1373 * feedback compensation procedures.
1374 * don't force fit this reference idx into gain table; it may be a
1375 * negative number. This will help avoid errors when we're at
1376 * the lower bounds (highest gains, for warmest temperatures)
1377 * of the table. */
1378
1379 /* don't exceed table bounds for "real" setting */
1380 power_idx = il3945_hw_reg_fix_power_idx(power_idx);
1381
1382 scan_power_info->power_table_idx = power_idx;
1383 scan_power_info->tpc.tx_gain =
1384 power_gain_table[band_idx][power_idx].tx_gain;
1385 scan_power_info->tpc.dsp_atten =
1386 power_gain_table[band_idx][power_idx].dsp_atten;
1387}
1388
1389/**
1390 * il3945_send_tx_power - fill in Tx Power command with gain settings
1391 *
1392 * Configures power settings for all rates for the current channel,
1393 * using values from channel info struct, and send to NIC
1394 */
1395static int
1396il3945_send_tx_power(struct il_priv *il)
1397{
1398 int rate_idx, i;
1399 const struct il_channel_info *ch_info = NULL;
1400 struct il3945_txpowertable_cmd txpower = {
1401 .channel = il->active.channel,
1402 };
1403 u16 chan;
1404
1405 if (WARN_ONCE
1406 (test_bit(S_SCAN_HW, &il->status),
1407 "TX Power requested while scanning!\n"))
1408 return -EAGAIN;
1409
1410 chan = le16_to_cpu(il->active.channel);
1411
1412 txpower.band = (il->band == IEEE80211_BAND_5GHZ) ? 0 : 1;
1413 ch_info = il_get_channel_info(il, il->band, chan);
1414 if (!ch_info) {
1415 IL_ERR("Failed to get channel info for channel %d [%d]\n", chan,
1416 il->band);
1417 return -EINVAL;
1418 }
1419
1420 if (!il_is_channel_valid(ch_info)) {
1421 D_POWER("Not calling TX_PWR_TBL_CMD on " "non-Tx channel.\n");
1422 return 0;
1423 }
1424
1425 /* fill cmd with power settings for all rates for current channel */
1426 /* Fill OFDM rate */
1427 for (rate_idx = IL_FIRST_OFDM_RATE, i = 0;
1428 rate_idx <= IL39_LAST_OFDM_RATE; rate_idx++, i++) {
1429
1430 txpower.power[i].tpc = ch_info->power_info[i].tpc;
1431 txpower.power[i].rate = il3945_rates[rate_idx].plcp;
1432
1433 D_POWER("ch %d:%d rf %d dsp %3d rate code 0x%02x\n",
1434 le16_to_cpu(txpower.channel), txpower.band,
1435 txpower.power[i].tpc.tx_gain,
1436 txpower.power[i].tpc.dsp_atten, txpower.power[i].rate);
1437 }
1438 /* Fill CCK rates */
1439 for (rate_idx = IL_FIRST_CCK_RATE; rate_idx <= IL_LAST_CCK_RATE;
1440 rate_idx++, i++) {
1441 txpower.power[i].tpc = ch_info->power_info[i].tpc;
1442 txpower.power[i].rate = il3945_rates[rate_idx].plcp;
1443
1444 D_POWER("ch %d:%d rf %d dsp %3d rate code 0x%02x\n",
1445 le16_to_cpu(txpower.channel), txpower.band,
1446 txpower.power[i].tpc.tx_gain,
1447 txpower.power[i].tpc.dsp_atten, txpower.power[i].rate);
1448 }
1449
1450 return il_send_cmd_pdu(il, C_TX_PWR_TBL,
1451 sizeof(struct il3945_txpowertable_cmd),
1452 &txpower);
1453
1454}
1455
1456/**
1457 * il3945_hw_reg_set_new_power - Configures power tables at new levels
1458 * @ch_info: Channel to update. Uses power_info.requested_power.
1459 *
1460 * Replace requested_power and base_power_idx ch_info fields for
1461 * one channel.
1462 *
1463 * Called if user or spectrum management changes power preferences.
1464 * Takes into account h/w and modulation limitations (clip power).
1465 *
1466 * This does *not* send anything to NIC, just sets up ch_info for one channel.
1467 *
1468 * NOTE: reg_compensate_for_temperature_dif() *must* be run after this to
1469 * properly fill out the scan powers, and actual h/w gain settings,
1470 * and send changes to NIC
1471 */
1472static int
1473il3945_hw_reg_set_new_power(struct il_priv *il, struct il_channel_info *ch_info)
1474{
1475 struct il3945_channel_power_info *power_info;
1476 int power_changed = 0;
1477 int i;
1478 const s8 *clip_pwrs;
1479 int power;
1480
1481 /* Get this chnlgrp's rate-to-max/clip-powers table */
1482 clip_pwrs = il->_3945.clip_groups[ch_info->group_idx].clip_powers;
1483
1484 /* Get this channel's rate-to-current-power settings table */
1485 power_info = ch_info->power_info;
1486
1487 /* update OFDM Txpower settings */
1488 for (i = RATE_6M_IDX_TBL; i <= RATE_54M_IDX_TBL; i++, ++power_info) {
1489 int delta_idx;
1490
1491 /* limit new power to be no more than h/w capability */
1492 power = min(ch_info->curr_txpow, clip_pwrs[i]);
1493 if (power == power_info->requested_power)
1494 continue;
1495
1496 /* find difference between old and new requested powers,
1497 * update base (non-temp-compensated) power idx */
1498 delta_idx = (power - power_info->requested_power) * 2;
1499 power_info->base_power_idx -= delta_idx;
1500
1501 /* save new requested power value */
1502 power_info->requested_power = power;
1503
1504 power_changed = 1;
1505 }
1506
1507 /* update CCK Txpower settings, based on OFDM 12M setting ...
1508 * ... all CCK power settings for a given channel are the *same*. */
1509 if (power_changed) {
1510 power =
1511 ch_info->power_info[RATE_12M_IDX_TBL].requested_power +
1512 IL_CCK_FROM_OFDM_POWER_DIFF;
1513
1514 /* do all CCK rates' il3945_channel_power_info structures */
1515 for (i = RATE_1M_IDX_TBL; i <= RATE_11M_IDX_TBL; i++) {
1516 power_info->requested_power = power;
1517 power_info->base_power_idx =
1518 ch_info->power_info[RATE_12M_IDX_TBL].
1519 base_power_idx + IL_CCK_FROM_OFDM_IDX_DIFF;
1520 ++power_info;
1521 }
1522 }
1523
1524 return 0;
1525}
1526
1527/**
1528 * il3945_hw_reg_get_ch_txpower_limit - returns new power limit for channel
1529 *
1530 * NOTE: Returned power limit may be less (but not more) than requested,
1531 * based strictly on regulatory (eeprom and spectrum mgt) limitations
1532 * (no consideration for h/w clipping limitations).
1533 */
1534static int
1535il3945_hw_reg_get_ch_txpower_limit(struct il_channel_info *ch_info)
1536{
1537 s8 max_power;
1538
1539#if 0
1540 /* if we're using TGd limits, use lower of TGd or EEPROM */
1541 if (ch_info->tgd_data.max_power != 0)
1542 max_power =
1543 min(ch_info->tgd_data.max_power,
1544 ch_info->eeprom.max_power_avg);
1545
1546 /* else just use EEPROM limits */
1547 else
1548#endif
1549 max_power = ch_info->eeprom.max_power_avg;
1550
1551 return min(max_power, ch_info->max_power_avg);
1552}
1553
1554/**
1555 * il3945_hw_reg_comp_txpower_temp - Compensate for temperature
1556 *
1557 * Compensate txpower settings of *all* channels for temperature.
1558 * This only accounts for the difference between current temperature
1559 * and the factory calibration temperatures, and bases the new settings
1560 * on the channel's base_power_idx.
1561 *
1562 * If RxOn is "associated", this sends the new Txpower to NIC!
1563 */
1564static int
1565il3945_hw_reg_comp_txpower_temp(struct il_priv *il)
1566{
1567 struct il_channel_info *ch_info = NULL;
1568 struct il3945_eeprom *eeprom = (struct il3945_eeprom *)il->eeprom;
1569 int delta_idx;
1570 const s8 *clip_pwrs; /* array of h/w max power levels for each rate */
1571 u8 a_band;
1572 u8 rate_idx;
1573 u8 scan_tbl_idx;
1574 u8 i;
1575 int ref_temp;
1576 int temperature = il->temperature;
1577
1578 if (il->disable_tx_power_cal || test_bit(S_SCANNING, &il->status)) {
1579 /* do not perform tx power calibration */
1580 return 0;
1581 }
1582 /* set up new Tx power info for each and every channel, 2.4 and 5.x */
1583 for (i = 0; i < il->channel_count; i++) {
1584 ch_info = &il->channel_info[i];
1585 a_band = il_is_channel_a_band(ch_info);
1586
1587 /* Get this chnlgrp's factory calibration temperature */
1588 ref_temp = (s16) eeprom->groups[ch_info->group_idx].temperature;
1589
1590 /* get power idx adjustment based on current and factory
1591 * temps */
1592 delta_idx =
1593 il3945_hw_reg_adjust_power_by_temp(temperature, ref_temp);
1594
1595 /* set tx power value for all rates, OFDM and CCK */
1596 for (rate_idx = 0; rate_idx < RATE_COUNT_3945; rate_idx++) {
1597 int power_idx =
1598 ch_info->power_info[rate_idx].base_power_idx;
1599
1600 /* temperature compensate */
1601 power_idx += delta_idx;
1602
1603 /* stay within table range */
1604 power_idx = il3945_hw_reg_fix_power_idx(power_idx);
1605 ch_info->power_info[rate_idx].power_table_idx =
1606 (u8) power_idx;
1607 ch_info->power_info[rate_idx].tpc =
1608 power_gain_table[a_band][power_idx];
1609 }
1610
1611 /* Get this chnlgrp's rate-to-max/clip-powers table */
1612 clip_pwrs =
1613 il->_3945.clip_groups[ch_info->group_idx].clip_powers;
1614
1615 /* set scan tx power, 1Mbit for CCK, 6Mbit for OFDM */
1616 for (scan_tbl_idx = 0; scan_tbl_idx < IL_NUM_SCAN_RATES;
1617 scan_tbl_idx++) {
1618 s32 actual_idx =
1619 (scan_tbl_idx ==
1620 0) ? RATE_1M_IDX_TBL : RATE_6M_IDX_TBL;
1621 il3945_hw_reg_set_scan_power(il, scan_tbl_idx,
1622 actual_idx, clip_pwrs,
1623 ch_info, a_band);
1624 }
1625 }
1626
1627 /* send Txpower command for current channel to ucode */
1628 return il->ops->send_tx_power(il);
1629}
1630
1631int
1632il3945_hw_reg_set_txpower(struct il_priv *il, s8 power)
1633{
1634 struct il_channel_info *ch_info;
1635 s8 max_power;
1636 u8 a_band;
1637 u8 i;
1638
1639 if (il->tx_power_user_lmt == power) {
1640 D_POWER("Requested Tx power same as current " "limit: %ddBm.\n",
1641 power);
1642 return 0;
1643 }
1644
1645 D_POWER("Setting upper limit clamp to %ddBm.\n", power);
1646 il->tx_power_user_lmt = power;
1647
1648 /* set up new Tx powers for each and every channel, 2.4 and 5.x */
1649
1650 for (i = 0; i < il->channel_count; i++) {
1651 ch_info = &il->channel_info[i];
1652 a_band = il_is_channel_a_band(ch_info);
1653
1654 /* find minimum power of all user and regulatory constraints
1655 * (does not consider h/w clipping limitations) */
1656 max_power = il3945_hw_reg_get_ch_txpower_limit(ch_info);
1657 max_power = min(power, max_power);
1658 if (max_power != ch_info->curr_txpow) {
1659 ch_info->curr_txpow = max_power;
1660
1661 /* this considers the h/w clipping limitations */
1662 il3945_hw_reg_set_new_power(il, ch_info);
1663 }
1664 }
1665
1666 /* update txpower settings for all channels,
1667 * send to NIC if associated. */
1668 il3945_is_temp_calib_needed(il);
1669 il3945_hw_reg_comp_txpower_temp(il);
1670
1671 return 0;
1672}
1673
1674static int
1675il3945_send_rxon_assoc(struct il_priv *il)
1676{
1677 int rc = 0;
1678 struct il_rx_pkt *pkt;
1679 struct il3945_rxon_assoc_cmd rxon_assoc;
1680 struct il_host_cmd cmd = {
1681 .id = C_RXON_ASSOC,
1682 .len = sizeof(rxon_assoc),
1683 .flags = CMD_WANT_SKB,
1684 .data = &rxon_assoc,
1685 };
1686 const struct il_rxon_cmd *rxon1 = &il->staging;
1687 const struct il_rxon_cmd *rxon2 = &il->active;
1688
1689 if (rxon1->flags == rxon2->flags &&
1690 rxon1->filter_flags == rxon2->filter_flags &&
1691 rxon1->cck_basic_rates == rxon2->cck_basic_rates &&
1692 rxon1->ofdm_basic_rates == rxon2->ofdm_basic_rates) {
1693 D_INFO("Using current RXON_ASSOC. Not resending.\n");
1694 return 0;
1695 }
1696
1697 rxon_assoc.flags = il->staging.flags;
1698 rxon_assoc.filter_flags = il->staging.filter_flags;
1699 rxon_assoc.ofdm_basic_rates = il->staging.ofdm_basic_rates;
1700 rxon_assoc.cck_basic_rates = il->staging.cck_basic_rates;
1701 rxon_assoc.reserved = 0;
1702
1703 rc = il_send_cmd_sync(il, &cmd);
1704 if (rc)
1705 return rc;
1706
1707 pkt = (struct il_rx_pkt *)cmd.reply_page;
1708 if (pkt->hdr.flags & IL_CMD_FAILED_MSK) {
1709 IL_ERR("Bad return from C_RXON_ASSOC command\n");
1710 rc = -EIO;
1711 }
1712
1713 il_free_pages(il, cmd.reply_page);
1714
1715 return rc;
1716}
1717
1718/**
1719 * il3945_commit_rxon - commit staging_rxon to hardware
1720 *
1721 * The RXON command in staging_rxon is committed to the hardware and
1722 * the active_rxon structure is updated with the new data. This
1723 * function correctly transitions out of the RXON_ASSOC_MSK state if
1724 * a HW tune is required based on the RXON structure changes.
1725 */
1726int
1727il3945_commit_rxon(struct il_priv *il)
1728{
1729 /* cast away the const for active_rxon in this function */
1730 struct il3945_rxon_cmd *active_rxon = (void *)&il->active;
1731 struct il3945_rxon_cmd *staging_rxon = (void *)&il->staging;
1732 int rc = 0;
1733 bool new_assoc = !!(staging_rxon->filter_flags & RXON_FILTER_ASSOC_MSK);
1734
1735 if (test_bit(S_EXIT_PENDING, &il->status))
1736 return -EINVAL;
1737
1738 if (!il_is_alive(il))
1739 return -1;
1740
1741 /* always get timestamp with Rx frame */
1742 staging_rxon->flags |= RXON_FLG_TSF2HOST_MSK;
1743
1744 /* select antenna */
1745 staging_rxon->flags &= ~(RXON_FLG_DIS_DIV_MSK | RXON_FLG_ANT_SEL_MSK);
1746 staging_rxon->flags |= il3945_get_antenna_flags(il);
1747
1748 rc = il_check_rxon_cmd(il);
1749 if (rc) {
1750 IL_ERR("Invalid RXON configuration. Not committing.\n");
1751 return -EINVAL;
1752 }
1753
1754 /* If we don't need to send a full RXON, we can use
1755 * il3945_rxon_assoc_cmd which is used to reconfigure filter
1756 * and other flags for the current radio configuration. */
1757 if (!il_full_rxon_required(il)) {
1758 rc = il_send_rxon_assoc(il);
1759 if (rc) {
1760 IL_ERR("Error setting RXON_ASSOC "
1761 "configuration (%d).\n", rc);
1762 return rc;
1763 }
1764
1765 memcpy(active_rxon, staging_rxon, sizeof(*active_rxon));
1766 /*
1767 * We do not commit tx power settings while channel changing,
1768 * do it now if tx power changed.
1769 */
1770 il_set_tx_power(il, il->tx_power_next, false);
1771 return 0;
1772 }
1773
1774 /* If we are currently associated and the new config requires
1775 * an RXON_ASSOC and the new config wants the associated mask enabled,
1776 * we must clear the associated from the active configuration
1777 * before we apply the new config */
1778 if (il_is_associated(il) && new_assoc) {
1779 D_INFO("Toggling associated bit on current RXON\n");
1780 active_rxon->filter_flags &= ~RXON_FILTER_ASSOC_MSK;
1781
1782 /*
1783 * reserved4 and 5 could have been filled by the iwlcore code.
1784 * Let's clear them before pushing to the 3945.
1785 */
1786 active_rxon->reserved4 = 0;
1787 active_rxon->reserved5 = 0;
1788 rc = il_send_cmd_pdu(il, C_RXON, sizeof(struct il3945_rxon_cmd),
1789 &il->active);
1790
1791 /* If the mask clearing failed then we set
1792 * active_rxon back to what it was previously */
1793 if (rc) {
1794 active_rxon->filter_flags |= RXON_FILTER_ASSOC_MSK;
1795 IL_ERR("Error clearing ASSOC_MSK on current "
1796 "configuration (%d).\n", rc);
1797 return rc;
1798 }
1799 il_clear_ucode_stations(il);
1800 il_restore_stations(il);
1801 }
1802
1803 D_INFO("Sending RXON\n" "* with%s RXON_FILTER_ASSOC_MSK\n"
1804 "* channel = %d\n" "* bssid = %pM\n", (new_assoc ? "" : "out"),
1805 le16_to_cpu(staging_rxon->channel), staging_rxon->bssid_addr);
1806
1807 /*
1808 * reserved4 and 5 could have been filled by the iwlcore code.
1809 * Let's clear them before pushing to the 3945.
1810 */
1811 staging_rxon->reserved4 = 0;
1812 staging_rxon->reserved5 = 0;
1813
1814 il_set_rxon_hwcrypto(il, !il3945_mod_params.sw_crypto);
1815
1816 /* Apply the new configuration */
1817 rc = il_send_cmd_pdu(il, C_RXON, sizeof(struct il3945_rxon_cmd),
1818 staging_rxon);
1819 if (rc) {
1820 IL_ERR("Error setting new configuration (%d).\n", rc);
1821 return rc;
1822 }
1823
1824 memcpy(active_rxon, staging_rxon, sizeof(*active_rxon));
1825
1826 if (!new_assoc) {
1827 il_clear_ucode_stations(il);
1828 il_restore_stations(il);
1829 }
1830
1831 /* If we issue a new RXON command which required a tune then we must
1832 * send a new TXPOWER command or we won't be able to Tx any frames */
1833 rc = il_set_tx_power(il, il->tx_power_next, true);
1834 if (rc) {
1835 IL_ERR("Error setting Tx power (%d).\n", rc);
1836 return rc;
1837 }
1838
1839 /* Init the hardware's rate fallback order based on the band */
1840 rc = il3945_init_hw_rate_table(il);
1841 if (rc) {
1842 IL_ERR("Error setting HW rate table: %02X\n", rc);
1843 return -EIO;
1844 }
1845
1846 return 0;
1847}
1848
1849/**
1850 * il3945_reg_txpower_periodic - called when time to check our temperature.
1851 *
1852 * -- reset periodic timer
1853 * -- see if temp has changed enough to warrant re-calibration ... if so:
1854 * -- correct coeffs for temp (can reset temp timer)
1855 * -- save this temp as "last",
1856 * -- send new set of gain settings to NIC
1857 * NOTE: This should continue working, even when we're not associated,
1858 * so we can keep our internal table of scan powers current. */
1859void
1860il3945_reg_txpower_periodic(struct il_priv *il)
1861{
1862 /* This will kick in the "brute force"
1863 * il3945_hw_reg_comp_txpower_temp() below */
1864 if (!il3945_is_temp_calib_needed(il))
1865 goto reschedule;
1866
1867 /* Set up a new set of temp-adjusted TxPowers, send to NIC.
1868 * This is based *only* on current temperature,
1869 * ignoring any previous power measurements */
1870 il3945_hw_reg_comp_txpower_temp(il);
1871
1872reschedule:
1873 queue_delayed_work(il->workqueue, &il->_3945.thermal_periodic,
1874 REG_RECALIB_PERIOD * HZ);
1875}
1876
1877static void
1878il3945_bg_reg_txpower_periodic(struct work_struct *work)
1879{
1880 struct il_priv *il = container_of(work, struct il_priv,
1881 _3945.thermal_periodic.work);
1882
1883 mutex_lock(&il->mutex);
1884 if (test_bit(S_EXIT_PENDING, &il->status) || il->txq == NULL)
1885 goto out;
1886
1887 il3945_reg_txpower_periodic(il);
1888out:
1889 mutex_unlock(&il->mutex);
1890}
1891
1892/**
1893 * il3945_hw_reg_get_ch_grp_idx - find the channel-group idx (0-4) for channel.
1894 *
1895 * This function is used when initializing channel-info structs.
1896 *
1897 * NOTE: These channel groups do *NOT* match the bands above!
1898 * These channel groups are based on factory-tested channels;
1899 * on A-band, EEPROM's "group frequency" entries represent the top
1900 * channel in each group 1-4. Group 5 All B/G channels are in group 0.
1901 */
1902static u16
1903il3945_hw_reg_get_ch_grp_idx(struct il_priv *il,
1904 const struct il_channel_info *ch_info)
1905{
1906 struct il3945_eeprom *eeprom = (struct il3945_eeprom *)il->eeprom;
1907 struct il3945_eeprom_txpower_group *ch_grp = &eeprom->groups[0];
1908 u8 group;
1909 u16 group_idx = 0; /* based on factory calib frequencies */
1910 u8 grp_channel;
1911
1912 /* Find the group idx for the channel ... don't use idx 1(?) */
1913 if (il_is_channel_a_band(ch_info)) {
1914 for (group = 1; group < 5; group++) {
1915 grp_channel = ch_grp[group].group_channel;
1916 if (ch_info->channel <= grp_channel) {
1917 group_idx = group;
1918 break;
1919 }
1920 }
1921 /* group 4 has a few channels *above* its factory cal freq */
1922 if (group == 5)
1923 group_idx = 4;
1924 } else
1925 group_idx = 0; /* 2.4 GHz, group 0 */
1926
1927 D_POWER("Chnl %d mapped to grp %d\n", ch_info->channel, group_idx);
1928 return group_idx;
1929}
1930
1931/**
1932 * il3945_hw_reg_get_matched_power_idx - Interpolate to get nominal idx
1933 *
1934 * Interpolate to get nominal (i.e. at factory calibration temperature) idx
1935 * into radio/DSP gain settings table for requested power.
1936 */
1937static int
1938il3945_hw_reg_get_matched_power_idx(struct il_priv *il, s8 requested_power,
1939 s32 setting_idx, s32 *new_idx)
1940{
1941 const struct il3945_eeprom_txpower_group *chnl_grp = NULL;
1942 struct il3945_eeprom *eeprom = (struct il3945_eeprom *)il->eeprom;
1943 s32 idx0, idx1;
1944 s32 power = 2 * requested_power;
1945 s32 i;
1946 const struct il3945_eeprom_txpower_sample *samples;
1947 s32 gains0, gains1;
1948 s32 res;
1949 s32 denominator;
1950
1951 chnl_grp = &eeprom->groups[setting_idx];
1952 samples = chnl_grp->samples;
1953 for (i = 0; i < 5; i++) {
1954 if (power == samples[i].power) {
1955 *new_idx = samples[i].gain_idx;
1956 return 0;
1957 }
1958 }
1959
1960 if (power > samples[1].power) {
1961 idx0 = 0;
1962 idx1 = 1;
1963 } else if (power > samples[2].power) {
1964 idx0 = 1;
1965 idx1 = 2;
1966 } else if (power > samples[3].power) {
1967 idx0 = 2;
1968 idx1 = 3;
1969 } else {
1970 idx0 = 3;
1971 idx1 = 4;
1972 }
1973
1974 denominator = (s32) samples[idx1].power - (s32) samples[idx0].power;
1975 if (denominator == 0)
1976 return -EINVAL;
1977 gains0 = (s32) samples[idx0].gain_idx * (1 << 19);
1978 gains1 = (s32) samples[idx1].gain_idx * (1 << 19);
1979 res =
1980 gains0 + (gains1 - gains0) * ((s32) power -
1981 (s32) samples[idx0].power) /
1982 denominator + (1 << 18);
1983 *new_idx = res >> 19;
1984 return 0;
1985}
1986
1987static void
1988il3945_hw_reg_init_channel_groups(struct il_priv *il)
1989{
1990 u32 i;
1991 s32 rate_idx;
1992 struct il3945_eeprom *eeprom = (struct il3945_eeprom *)il->eeprom;
1993 const struct il3945_eeprom_txpower_group *group;
1994
1995 D_POWER("Initializing factory calib info from EEPROM\n");
1996
1997 for (i = 0; i < IL_NUM_TX_CALIB_GROUPS; i++) {
1998 s8 *clip_pwrs; /* table of power levels for each rate */
1999 s8 satur_pwr; /* saturation power for each chnl group */
2000 group = &eeprom->groups[i];
2001
2002 /* sanity check on factory saturation power value */
2003 if (group->saturation_power < 40) {
2004 IL_WARN("Error: saturation power is %d, "
2005 "less than minimum expected 40\n",
2006 group->saturation_power);
2007 return;
2008 }
2009
2010 /*
2011 * Derive requested power levels for each rate, based on
2012 * hardware capabilities (saturation power for band).
2013 * Basic value is 3dB down from saturation, with further
2014 * power reductions for highest 3 data rates. These
2015 * backoffs provide headroom for high rate modulation
2016 * power peaks, without too much distortion (clipping).
2017 */
2018 /* we'll fill in this array with h/w max power levels */
2019 clip_pwrs = (s8 *) il->_3945.clip_groups[i].clip_powers;
2020
2021 /* divide factory saturation power by 2 to find -3dB level */
2022 satur_pwr = (s8) (group->saturation_power >> 1);
2023
2024 /* fill in channel group's nominal powers for each rate */
2025 for (rate_idx = 0; rate_idx < RATE_COUNT_3945;
2026 rate_idx++, clip_pwrs++) {
2027 switch (rate_idx) {
2028 case RATE_36M_IDX_TBL:
2029 if (i == 0) /* B/G */
2030 *clip_pwrs = satur_pwr;
2031 else /* A */
2032 *clip_pwrs = satur_pwr - 5;
2033 break;
2034 case RATE_48M_IDX_TBL:
2035 if (i == 0)
2036 *clip_pwrs = satur_pwr - 7;
2037 else
2038 *clip_pwrs = satur_pwr - 10;
2039 break;
2040 case RATE_54M_IDX_TBL:
2041 if (i == 0)
2042 *clip_pwrs = satur_pwr - 9;
2043 else
2044 *clip_pwrs = satur_pwr - 12;
2045 break;
2046 default:
2047 *clip_pwrs = satur_pwr;
2048 break;
2049 }
2050 }
2051 }
2052}
2053
2054/**
2055 * il3945_txpower_set_from_eeprom - Set channel power info based on EEPROM
2056 *
2057 * Second pass (during init) to set up il->channel_info
2058 *
2059 * Set up Tx-power settings in our channel info database for each VALID
2060 * (for this geo/SKU) channel, at all Tx data rates, based on eeprom values
2061 * and current temperature.
2062 *
2063 * Since this is based on current temperature (at init time), these values may
2064 * not be valid for very long, but it gives us a starting/default point,
2065 * and allows us to active (i.e. using Tx) scan.
2066 *
2067 * This does *not* write values to NIC, just sets up our internal table.
2068 */
2069int
2070il3945_txpower_set_from_eeprom(struct il_priv *il)
2071{
2072 struct il_channel_info *ch_info = NULL;
2073 struct il3945_channel_power_info *pwr_info;
2074 struct il3945_eeprom *eeprom = (struct il3945_eeprom *)il->eeprom;
2075 int delta_idx;
2076 u8 rate_idx;
2077 u8 scan_tbl_idx;
2078 const s8 *clip_pwrs; /* array of power levels for each rate */
2079 u8 gain, dsp_atten;
2080 s8 power;
2081 u8 pwr_idx, base_pwr_idx, a_band;
2082 u8 i;
2083 int temperature;
2084
2085 /* save temperature reference,
2086 * so we can determine next time to calibrate */
2087 temperature = il3945_hw_reg_txpower_get_temperature(il);
2088 il->last_temperature = temperature;
2089
2090 il3945_hw_reg_init_channel_groups(il);
2091
2092 /* initialize Tx power info for each and every channel, 2.4 and 5.x */
2093 for (i = 0, ch_info = il->channel_info; i < il->channel_count;
2094 i++, ch_info++) {
2095 a_band = il_is_channel_a_band(ch_info);
2096 if (!il_is_channel_valid(ch_info))
2097 continue;
2098
2099 /* find this channel's channel group (*not* "band") idx */
2100 ch_info->group_idx = il3945_hw_reg_get_ch_grp_idx(il, ch_info);
2101
2102 /* Get this chnlgrp's rate->max/clip-powers table */
2103 clip_pwrs =
2104 il->_3945.clip_groups[ch_info->group_idx].clip_powers;
2105
2106 /* calculate power idx *adjustment* value according to
2107 * diff between current temperature and factory temperature */
2108 delta_idx =
2109 il3945_hw_reg_adjust_power_by_temp(temperature,
2110 eeprom->groups[ch_info->
2111 group_idx].
2112 temperature);
2113
2114 D_POWER("Delta idx for channel %d: %d [%d]\n", ch_info->channel,
2115 delta_idx, temperature + IL_TEMP_CONVERT);
2116
2117 /* set tx power value for all OFDM rates */
2118 for (rate_idx = 0; rate_idx < IL_OFDM_RATES; rate_idx++) {
2119 s32 uninitialized_var(power_idx);
2120 int rc;
2121
2122 /* use channel group's clip-power table,
2123 * but don't exceed channel's max power */
2124 s8 pwr = min(ch_info->max_power_avg,
2125 clip_pwrs[rate_idx]);
2126
2127 pwr_info = &ch_info->power_info[rate_idx];
2128
2129 /* get base (i.e. at factory-measured temperature)
2130 * power table idx for this rate's power */
2131 rc = il3945_hw_reg_get_matched_power_idx(il, pwr,
2132 ch_info->
2133 group_idx,
2134 &power_idx);
2135 if (rc) {
2136 IL_ERR("Invalid power idx\n");
2137 return rc;
2138 }
2139 pwr_info->base_power_idx = (u8) power_idx;
2140
2141 /* temperature compensate */
2142 power_idx += delta_idx;
2143
2144 /* stay within range of gain table */
2145 power_idx = il3945_hw_reg_fix_power_idx(power_idx);
2146
2147 /* fill 1 OFDM rate's il3945_channel_power_info struct */
2148 pwr_info->requested_power = pwr;
2149 pwr_info->power_table_idx = (u8) power_idx;
2150 pwr_info->tpc.tx_gain =
2151 power_gain_table[a_band][power_idx].tx_gain;
2152 pwr_info->tpc.dsp_atten =
2153 power_gain_table[a_band][power_idx].dsp_atten;
2154 }
2155
2156 /* set tx power for CCK rates, based on OFDM 12 Mbit settings */
2157 pwr_info = &ch_info->power_info[RATE_12M_IDX_TBL];
2158 power = pwr_info->requested_power + IL_CCK_FROM_OFDM_POWER_DIFF;
2159 pwr_idx = pwr_info->power_table_idx + IL_CCK_FROM_OFDM_IDX_DIFF;
2160 base_pwr_idx =
2161 pwr_info->base_power_idx + IL_CCK_FROM_OFDM_IDX_DIFF;
2162
2163 /* stay within table range */
2164 pwr_idx = il3945_hw_reg_fix_power_idx(pwr_idx);
2165 gain = power_gain_table[a_band][pwr_idx].tx_gain;
2166 dsp_atten = power_gain_table[a_band][pwr_idx].dsp_atten;
2167
2168 /* fill each CCK rate's il3945_channel_power_info structure
2169 * NOTE: All CCK-rate Txpwrs are the same for a given chnl!
2170 * NOTE: CCK rates start at end of OFDM rates! */
2171 for (rate_idx = 0; rate_idx < IL_CCK_RATES; rate_idx++) {
2172 pwr_info =
2173 &ch_info->power_info[rate_idx + IL_OFDM_RATES];
2174 pwr_info->requested_power = power;
2175 pwr_info->power_table_idx = pwr_idx;
2176 pwr_info->base_power_idx = base_pwr_idx;
2177 pwr_info->tpc.tx_gain = gain;
2178 pwr_info->tpc.dsp_atten = dsp_atten;
2179 }
2180
2181 /* set scan tx power, 1Mbit for CCK, 6Mbit for OFDM */
2182 for (scan_tbl_idx = 0; scan_tbl_idx < IL_NUM_SCAN_RATES;
2183 scan_tbl_idx++) {
2184 s32 actual_idx =
2185 (scan_tbl_idx ==
2186 0) ? RATE_1M_IDX_TBL : RATE_6M_IDX_TBL;
2187 il3945_hw_reg_set_scan_power(il, scan_tbl_idx,
2188 actual_idx, clip_pwrs,
2189 ch_info, a_band);
2190 }
2191 }
2192
2193 return 0;
2194}
2195
2196int
2197il3945_hw_rxq_stop(struct il_priv *il)
2198{
2199 int ret;
2200
2201 _il_wr(il, FH39_RCSR_CONFIG(0), 0);
2202 ret = _il_poll_bit(il, FH39_RSSR_STATUS,
2203 FH39_RSSR_CHNL0_RX_STATUS_CHNL_IDLE,
2204 FH39_RSSR_CHNL0_RX_STATUS_CHNL_IDLE,
2205 1000);
2206 if (ret < 0)
2207 IL_ERR("Can't stop Rx DMA.\n");
2208
2209 return 0;
2210}
2211
2212int
2213il3945_hw_tx_queue_init(struct il_priv *il, struct il_tx_queue *txq)
2214{
2215 int txq_id = txq->q.id;
2216
2217 struct il3945_shared *shared_data = il->_3945.shared_virt;
2218
2219 shared_data->tx_base_ptr[txq_id] = cpu_to_le32((u32) txq->q.dma_addr);
2220
2221 il_wr(il, FH39_CBCC_CTRL(txq_id), 0);
2222 il_wr(il, FH39_CBCC_BASE(txq_id), 0);
2223
2224 il_wr(il, FH39_TCSR_CONFIG(txq_id),
2225 FH39_TCSR_TX_CONFIG_REG_VAL_CIRQ_RTC_NOINT |
2226 FH39_TCSR_TX_CONFIG_REG_VAL_MSG_MODE_TXF |
2227 FH39_TCSR_TX_CONFIG_REG_VAL_CIRQ_HOST_IFTFD |
2228 FH39_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE_VAL |
2229 FH39_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE);
2230
2231 /* fake read to flush all prev. writes */
2232 _il_rd(il, FH39_TSSR_CBB_BASE);
2233
2234 return 0;
2235}
2236
2237/*
2238 * HCMD utils
2239 */
2240static u16
2241il3945_get_hcmd_size(u8 cmd_id, u16 len)
2242{
2243 switch (cmd_id) {
2244 case C_RXON:
2245 return sizeof(struct il3945_rxon_cmd);
2246 case C_POWER_TBL:
2247 return sizeof(struct il3945_powertable_cmd);
2248 default:
2249 return len;
2250 }
2251}
2252
2253static u16
2254il3945_build_addsta_hcmd(const struct il_addsta_cmd *cmd, u8 * data)
2255{
2256 struct il3945_addsta_cmd *addsta = (struct il3945_addsta_cmd *)data;
2257 addsta->mode = cmd->mode;
2258 memcpy(&addsta->sta, &cmd->sta, sizeof(struct sta_id_modify));
2259 memcpy(&addsta->key, &cmd->key, sizeof(struct il4965_keyinfo));
2260 addsta->station_flags = cmd->station_flags;
2261 addsta->station_flags_msk = cmd->station_flags_msk;
2262 addsta->tid_disable_tx = cpu_to_le16(0);
2263 addsta->rate_n_flags = cmd->rate_n_flags;
2264 addsta->add_immediate_ba_tid = cmd->add_immediate_ba_tid;
2265 addsta->remove_immediate_ba_tid = cmd->remove_immediate_ba_tid;
2266 addsta->add_immediate_ba_ssn = cmd->add_immediate_ba_ssn;
2267
2268 return (u16) sizeof(struct il3945_addsta_cmd);
2269}
2270
2271static int
2272il3945_add_bssid_station(struct il_priv *il, const u8 * addr, u8 * sta_id_r)
2273{
2274 int ret;
2275 u8 sta_id;
2276 unsigned long flags;
2277
2278 if (sta_id_r)
2279 *sta_id_r = IL_INVALID_STATION;
2280
2281 ret = il_add_station_common(il, addr, 0, NULL, &sta_id);
2282 if (ret) {
2283 IL_ERR("Unable to add station %pM\n", addr);
2284 return ret;
2285 }
2286
2287 if (sta_id_r)
2288 *sta_id_r = sta_id;
2289
2290 spin_lock_irqsave(&il->sta_lock, flags);
2291 il->stations[sta_id].used |= IL_STA_LOCAL;
2292 spin_unlock_irqrestore(&il->sta_lock, flags);
2293
2294 return 0;
2295}
2296
2297static int
2298il3945_manage_ibss_station(struct il_priv *il, struct ieee80211_vif *vif,
2299 bool add)
2300{
2301 struct il_vif_priv *vif_priv = (void *)vif->drv_priv;
2302 int ret;
2303
2304 if (add) {
2305 ret =
2306 il3945_add_bssid_station(il, vif->bss_conf.bssid,
2307 &vif_priv->ibss_bssid_sta_id);
2308 if (ret)
2309 return ret;
2310
2311 il3945_sync_sta(il, vif_priv->ibss_bssid_sta_id,
2312 (il->band ==
2313 IEEE80211_BAND_5GHZ) ? RATE_6M_PLCP :
2314 RATE_1M_PLCP);
2315 il3945_rate_scale_init(il->hw, vif_priv->ibss_bssid_sta_id);
2316
2317 return 0;
2318 }
2319
2320 return il_remove_station(il, vif_priv->ibss_bssid_sta_id,
2321 vif->bss_conf.bssid);
2322}
2323
2324/**
2325 * il3945_init_hw_rate_table - Initialize the hardware rate fallback table
2326 */
2327int
2328il3945_init_hw_rate_table(struct il_priv *il)
2329{
2330 int rc, i, idx, prev_idx;
2331 struct il3945_rate_scaling_cmd rate_cmd = {
2332 .reserved = {0, 0, 0},
2333 };
2334 struct il3945_rate_scaling_info *table = rate_cmd.table;
2335
2336 for (i = 0; i < ARRAY_SIZE(il3945_rates); i++) {
2337 idx = il3945_rates[i].table_rs_idx;
2338
2339 table[idx].rate_n_flags = cpu_to_le16(il3945_rates[i].plcp);
2340 table[idx].try_cnt = il->retry_rate;
2341 prev_idx = il3945_get_prev_ieee_rate(i);
2342 table[idx].next_rate_idx = il3945_rates[prev_idx].table_rs_idx;
2343 }
2344
2345 switch (il->band) {
2346 case IEEE80211_BAND_5GHZ:
2347 D_RATE("Select A mode rate scale\n");
2348 /* If one of the following CCK rates is used,
2349 * have it fall back to the 6M OFDM rate */
2350 for (i = RATE_1M_IDX_TBL; i <= RATE_11M_IDX_TBL; i++)
2351 table[i].next_rate_idx =
2352 il3945_rates[IL_FIRST_OFDM_RATE].table_rs_idx;
2353
2354 /* Don't fall back to CCK rates */
2355 table[RATE_12M_IDX_TBL].next_rate_idx = RATE_9M_IDX_TBL;
2356
2357 /* Don't drop out of OFDM rates */
2358 table[RATE_6M_IDX_TBL].next_rate_idx =
2359 il3945_rates[IL_FIRST_OFDM_RATE].table_rs_idx;
2360 break;
2361
2362 case IEEE80211_BAND_2GHZ:
2363 D_RATE("Select B/G mode rate scale\n");
2364 /* If an OFDM rate is used, have it fall back to the
2365 * 1M CCK rates */
2366
2367 if (!(il->_3945.sta_supp_rates & IL_OFDM_RATES_MASK) &&
2368 il_is_associated(il)) {
2369
2370 idx = IL_FIRST_CCK_RATE;
2371 for (i = RATE_6M_IDX_TBL; i <= RATE_54M_IDX_TBL; i++)
2372 table[i].next_rate_idx =
2373 il3945_rates[idx].table_rs_idx;
2374
2375 idx = RATE_11M_IDX_TBL;
2376 /* CCK shouldn't fall back to OFDM... */
2377 table[idx].next_rate_idx = RATE_5M_IDX_TBL;
2378 }
2379 break;
2380
2381 default:
2382 WARN_ON(1);
2383 break;
2384 }
2385
2386 /* Update the rate scaling for control frame Tx */
2387 rate_cmd.table_id = 0;
2388 rc = il_send_cmd_pdu(il, C_RATE_SCALE, sizeof(rate_cmd), &rate_cmd);
2389 if (rc)
2390 return rc;
2391
2392 /* Update the rate scaling for data frame Tx */
2393 rate_cmd.table_id = 1;
2394 return il_send_cmd_pdu(il, C_RATE_SCALE, sizeof(rate_cmd), &rate_cmd);
2395}
2396
2397/* Called when initializing driver */
2398int
2399il3945_hw_set_hw_params(struct il_priv *il)
2400{
2401 memset((void *)&il->hw_params, 0, sizeof(struct il_hw_params));
2402
2403 il->_3945.shared_virt =
2404 dma_alloc_coherent(&il->pci_dev->dev, sizeof(struct il3945_shared),
2405 &il->_3945.shared_phys, GFP_KERNEL);
2406 if (!il->_3945.shared_virt)
2407 return -ENOMEM;
2408
2409 il->hw_params.bcast_id = IL3945_BROADCAST_ID;
2410
2411 /* Assign number of Usable TX queues */
2412 il->hw_params.max_txq_num = il->cfg->num_of_queues;
2413
2414 il->hw_params.tfd_size = sizeof(struct il3945_tfd);
2415 il->hw_params.rx_page_order = get_order(IL_RX_BUF_SIZE_3K);
2416 il->hw_params.max_rxq_size = RX_QUEUE_SIZE;
2417 il->hw_params.max_rxq_log = RX_QUEUE_SIZE_LOG;
2418 il->hw_params.max_stations = IL3945_STATION_COUNT;
2419
2420 il->sta_key_max_num = STA_KEY_MAX_NUM;
2421
2422 il->hw_params.rx_wrt_ptr_reg = FH39_RSCSR_CHNL0_WPTR;
2423 il->hw_params.max_beacon_itrvl = IL39_MAX_UCODE_BEACON_INTERVAL;
2424 il->hw_params.beacon_time_tsf_bits = IL3945_EXT_BEACON_TIME_POS;
2425
2426 return 0;
2427}
2428
2429unsigned int
2430il3945_hw_get_beacon_cmd(struct il_priv *il, struct il3945_frame *frame,
2431 u8 rate)
2432{
2433 struct il3945_tx_beacon_cmd *tx_beacon_cmd;
2434 unsigned int frame_size;
2435
2436 tx_beacon_cmd = (struct il3945_tx_beacon_cmd *)&frame->u;
2437 memset(tx_beacon_cmd, 0, sizeof(*tx_beacon_cmd));
2438
2439 tx_beacon_cmd->tx.sta_id = il->hw_params.bcast_id;
2440 tx_beacon_cmd->tx.stop_time.life_time = TX_CMD_LIFE_TIME_INFINITE;
2441
2442 frame_size =
2443 il3945_fill_beacon_frame(il, tx_beacon_cmd->frame,
2444 sizeof(frame->u) - sizeof(*tx_beacon_cmd));
2445
2446 BUG_ON(frame_size > MAX_MPDU_SIZE);
2447 tx_beacon_cmd->tx.len = cpu_to_le16((u16) frame_size);
2448
2449 tx_beacon_cmd->tx.rate = rate;
2450 tx_beacon_cmd->tx.tx_flags =
2451 (TX_CMD_FLG_SEQ_CTL_MSK | TX_CMD_FLG_TSF_MSK);
2452
2453 /* supp_rates[0] == OFDM start at IL_FIRST_OFDM_RATE */
2454 tx_beacon_cmd->tx.supp_rates[0] =
2455 (IL_OFDM_BASIC_RATES_MASK >> IL_FIRST_OFDM_RATE) & 0xFF;
2456
2457 tx_beacon_cmd->tx.supp_rates[1] = (IL_CCK_BASIC_RATES_MASK & 0xF);
2458
2459 return sizeof(struct il3945_tx_beacon_cmd) + frame_size;
2460}
2461
2462void
2463il3945_hw_handler_setup(struct il_priv *il)
2464{
2465 il->handlers[C_TX] = il3945_hdl_tx;
2466 il->handlers[N_3945_RX] = il3945_hdl_rx;
2467}
2468
2469void
2470il3945_hw_setup_deferred_work(struct il_priv *il)
2471{
2472 INIT_DELAYED_WORK(&il->_3945.thermal_periodic,
2473 il3945_bg_reg_txpower_periodic);
2474}
2475
2476void
2477il3945_hw_cancel_deferred_work(struct il_priv *il)
2478{
2479 cancel_delayed_work(&il->_3945.thermal_periodic);
2480}
2481
2482/* check contents of special bootstrap uCode SRAM */
2483static int
2484il3945_verify_bsm(struct il_priv *il)
2485{
2486 __le32 *image = il->ucode_boot.v_addr;
2487 u32 len = il->ucode_boot.len;
2488 u32 reg;
2489 u32 val;
2490
2491 D_INFO("Begin verify bsm\n");
2492
2493 /* verify BSM SRAM contents */
2494 val = il_rd_prph(il, BSM_WR_DWCOUNT_REG);
2495 for (reg = BSM_SRAM_LOWER_BOUND; reg < BSM_SRAM_LOWER_BOUND + len;
2496 reg += sizeof(u32), image++) {
2497 val = il_rd_prph(il, reg);
2498 if (val != le32_to_cpu(*image)) {
2499 IL_ERR("BSM uCode verification failed at "
2500 "addr 0x%08X+%u (of %u), is 0x%x, s/b 0x%x\n",
2501 BSM_SRAM_LOWER_BOUND, reg - BSM_SRAM_LOWER_BOUND,
2502 len, val, le32_to_cpu(*image));
2503 return -EIO;
2504 }
2505 }
2506
2507 D_INFO("BSM bootstrap uCode image OK\n");
2508
2509 return 0;
2510}
2511
2512/******************************************************************************
2513 *
2514 * EEPROM related functions
2515 *
2516 ******************************************************************************/
2517
2518/*
2519 * Clear the OWNER_MSK, to establish driver (instead of uCode running on
2520 * embedded controller) as EEPROM reader; each read is a series of pulses
2521 * to/from the EEPROM chip, not a single event, so even reads could conflict
2522 * if they weren't arbitrated by some ownership mechanism. Here, the driver
2523 * simply claims ownership, which should be safe when this function is called
2524 * (i.e. before loading uCode!).
2525 */
2526static int
2527il3945_eeprom_acquire_semaphore(struct il_priv *il)
2528{
2529 _il_clear_bit(il, CSR_EEPROM_GP, CSR_EEPROM_GP_IF_OWNER_MSK);
2530 return 0;
2531}
2532
2533static void
2534il3945_eeprom_release_semaphore(struct il_priv *il)
2535{
2536 return;
2537}
2538
2539 /**
2540 * il3945_load_bsm - Load bootstrap instructions
2541 *
2542 * BSM operation:
2543 *
2544 * The Bootstrap State Machine (BSM) stores a short bootstrap uCode program
2545 * in special SRAM that does not power down during RFKILL. When powering back
2546 * up after power-saving sleeps (or during initial uCode load), the BSM loads
2547 * the bootstrap program into the on-board processor, and starts it.
2548 *
2549 * The bootstrap program loads (via DMA) instructions and data for a new
2550 * program from host DRAM locations indicated by the host driver in the
2551 * BSM_DRAM_* registers. Once the new program is loaded, it starts
2552 * automatically.
2553 *
2554 * When initializing the NIC, the host driver points the BSM to the
2555 * "initialize" uCode image. This uCode sets up some internal data, then
2556 * notifies host via "initialize alive" that it is complete.
2557 *
2558 * The host then replaces the BSM_DRAM_* pointer values to point to the
2559 * normal runtime uCode instructions and a backup uCode data cache buffer
2560 * (filled initially with starting data values for the on-board processor),
2561 * then triggers the "initialize" uCode to load and launch the runtime uCode,
2562 * which begins normal operation.
2563 *
2564 * When doing a power-save shutdown, runtime uCode saves data SRAM into
2565 * the backup data cache in DRAM before SRAM is powered down.
2566 *
2567 * When powering back up, the BSM loads the bootstrap program. This reloads
2568 * the runtime uCode instructions and the backup data cache into SRAM,
2569 * and re-launches the runtime uCode from where it left off.
2570 */
2571static int
2572il3945_load_bsm(struct il_priv *il)
2573{
2574 __le32 *image = il->ucode_boot.v_addr;
2575 u32 len = il->ucode_boot.len;
2576 dma_addr_t pinst;
2577 dma_addr_t pdata;
2578 u32 inst_len;
2579 u32 data_len;
2580 int rc;
2581 int i;
2582 u32 done;
2583 u32 reg_offset;
2584
2585 D_INFO("Begin load bsm\n");
2586
2587 /* make sure bootstrap program is no larger than BSM's SRAM size */
2588 if (len > IL39_MAX_BSM_SIZE)
2589 return -EINVAL;
2590
2591 /* Tell bootstrap uCode where to find the "Initialize" uCode
2592 * in host DRAM ... host DRAM physical address bits 31:0 for 3945.
2593 * NOTE: il3945_initialize_alive_start() will replace these values,
2594 * after the "initialize" uCode has run, to point to
2595 * runtime/protocol instructions and backup data cache. */
2596 pinst = il->ucode_init.p_addr;
2597 pdata = il->ucode_init_data.p_addr;
2598 inst_len = il->ucode_init.len;
2599 data_len = il->ucode_init_data.len;
2600
2601 il_wr_prph(il, BSM_DRAM_INST_PTR_REG, pinst);
2602 il_wr_prph(il, BSM_DRAM_DATA_PTR_REG, pdata);
2603 il_wr_prph(il, BSM_DRAM_INST_BYTECOUNT_REG, inst_len);
2604 il_wr_prph(il, BSM_DRAM_DATA_BYTECOUNT_REG, data_len);
2605
2606 /* Fill BSM memory with bootstrap instructions */
2607 for (reg_offset = BSM_SRAM_LOWER_BOUND;
2608 reg_offset < BSM_SRAM_LOWER_BOUND + len;
2609 reg_offset += sizeof(u32), image++)
2610 _il_wr_prph(il, reg_offset, le32_to_cpu(*image));
2611
2612 rc = il3945_verify_bsm(il);
2613 if (rc)
2614 return rc;
2615
2616 /* Tell BSM to copy from BSM SRAM into instruction SRAM, when asked */
2617 il_wr_prph(il, BSM_WR_MEM_SRC_REG, 0x0);
2618 il_wr_prph(il, BSM_WR_MEM_DST_REG, IL39_RTC_INST_LOWER_BOUND);
2619 il_wr_prph(il, BSM_WR_DWCOUNT_REG, len / sizeof(u32));
2620
2621 /* Load bootstrap code into instruction SRAM now,
2622 * to prepare to load "initialize" uCode */
2623 il_wr_prph(il, BSM_WR_CTRL_REG, BSM_WR_CTRL_REG_BIT_START);
2624
2625 /* Wait for load of bootstrap uCode to finish */
2626 for (i = 0; i < 100; i++) {
2627 done = il_rd_prph(il, BSM_WR_CTRL_REG);
2628 if (!(done & BSM_WR_CTRL_REG_BIT_START))
2629 break;
2630 udelay(10);
2631 }
2632 if (i < 100)
2633 D_INFO("BSM write complete, poll %d iterations\n", i);
2634 else {
2635 IL_ERR("BSM write did not complete!\n");
2636 return -EIO;
2637 }
2638
2639 /* Enable future boot loads whenever power management unit triggers it
2640 * (e.g. when powering back up after power-save shutdown) */
2641 il_wr_prph(il, BSM_WR_CTRL_REG, BSM_WR_CTRL_REG_BIT_START_EN);
2642
2643 return 0;
2644}
2645
2646const struct il_ops il3945_ops = {
2647 .txq_attach_buf_to_tfd = il3945_hw_txq_attach_buf_to_tfd,
2648 .txq_free_tfd = il3945_hw_txq_free_tfd,
2649 .txq_init = il3945_hw_tx_queue_init,
2650 .load_ucode = il3945_load_bsm,
2651 .dump_nic_error_log = il3945_dump_nic_error_log,
2652 .apm_init = il3945_apm_init,
2653 .send_tx_power = il3945_send_tx_power,
2654 .is_valid_rtc_data_addr = il3945_hw_valid_rtc_data_addr,
2655 .eeprom_acquire_semaphore = il3945_eeprom_acquire_semaphore,
2656 .eeprom_release_semaphore = il3945_eeprom_release_semaphore,
2657
2658 .rxon_assoc = il3945_send_rxon_assoc,
2659 .commit_rxon = il3945_commit_rxon,
2660
2661 .get_hcmd_size = il3945_get_hcmd_size,
2662 .build_addsta_hcmd = il3945_build_addsta_hcmd,
2663 .request_scan = il3945_request_scan,
2664 .post_scan = il3945_post_scan,
2665
2666 .post_associate = il3945_post_associate,
2667 .config_ap = il3945_config_ap,
2668 .manage_ibss_station = il3945_manage_ibss_station,
2669
2670 .send_led_cmd = il3945_send_led_cmd,
2671};
2672
2673static struct il_cfg il3945_bg_cfg = {
2674 .name = "3945BG",
2675 .fw_name_pre = IL3945_FW_PRE,
2676 .ucode_api_max = IL3945_UCODE_API_MAX,
2677 .ucode_api_min = IL3945_UCODE_API_MIN,
2678 .sku = IL_SKU_G,
2679 .eeprom_ver = EEPROM_3945_EEPROM_VERSION,
2680 .mod_params = &il3945_mod_params,
2681 .led_mode = IL_LED_BLINK,
2682
2683 .eeprom_size = IL3945_EEPROM_IMG_SIZE,
2684 .num_of_queues = IL39_NUM_QUEUES,
2685 .pll_cfg_val = CSR39_ANA_PLL_CFG_VAL,
2686 .set_l0s = false,
2687 .use_bsm = true,
2688 .led_compensation = 64,
2689 .wd_timeout = IL_DEF_WD_TIMEOUT,
2690
2691 .regulatory_bands = {
2692 EEPROM_REGULATORY_BAND_1_CHANNELS,
2693 EEPROM_REGULATORY_BAND_2_CHANNELS,
2694 EEPROM_REGULATORY_BAND_3_CHANNELS,
2695 EEPROM_REGULATORY_BAND_4_CHANNELS,
2696 EEPROM_REGULATORY_BAND_5_CHANNELS,
2697 EEPROM_REGULATORY_BAND_NO_HT40,
2698 EEPROM_REGULATORY_BAND_NO_HT40,
2699 },
2700};
2701
2702static struct il_cfg il3945_abg_cfg = {
2703 .name = "3945ABG",
2704 .fw_name_pre = IL3945_FW_PRE,
2705 .ucode_api_max = IL3945_UCODE_API_MAX,
2706 .ucode_api_min = IL3945_UCODE_API_MIN,
2707 .sku = IL_SKU_A | IL_SKU_G,
2708 .eeprom_ver = EEPROM_3945_EEPROM_VERSION,
2709 .mod_params = &il3945_mod_params,
2710 .led_mode = IL_LED_BLINK,
2711
2712 .eeprom_size = IL3945_EEPROM_IMG_SIZE,
2713 .num_of_queues = IL39_NUM_QUEUES,
2714 .pll_cfg_val = CSR39_ANA_PLL_CFG_VAL,
2715 .set_l0s = false,
2716 .use_bsm = true,
2717 .led_compensation = 64,
2718 .wd_timeout = IL_DEF_WD_TIMEOUT,
2719
2720 .regulatory_bands = {
2721 EEPROM_REGULATORY_BAND_1_CHANNELS,
2722 EEPROM_REGULATORY_BAND_2_CHANNELS,
2723 EEPROM_REGULATORY_BAND_3_CHANNELS,
2724 EEPROM_REGULATORY_BAND_4_CHANNELS,
2725 EEPROM_REGULATORY_BAND_5_CHANNELS,
2726 EEPROM_REGULATORY_BAND_NO_HT40,
2727 EEPROM_REGULATORY_BAND_NO_HT40,
2728 },
2729};
2730
2731const struct pci_device_id il3945_hw_card_ids[] = {
2732 {IL_PCI_DEVICE(0x4222, 0x1005, il3945_bg_cfg)},
2733 {IL_PCI_DEVICE(0x4222, 0x1034, il3945_bg_cfg)},
2734 {IL_PCI_DEVICE(0x4222, 0x1044, il3945_bg_cfg)},
2735 {IL_PCI_DEVICE(0x4227, 0x1014, il3945_bg_cfg)},
2736 {IL_PCI_DEVICE(0x4222, PCI_ANY_ID, il3945_abg_cfg)},
2737 {IL_PCI_DEVICE(0x4227, PCI_ANY_ID, il3945_abg_cfg)},
2738 {0}
2739};
2740
2741MODULE_DEVICE_TABLE(pci, il3945_hw_card_ids);
diff --git a/drivers/net/wireless/intel/iwlegacy/3945.h b/drivers/net/wireless/intel/iwlegacy/3945.h
new file mode 100644
index 000000000000..00030d43a194
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlegacy/3945.h
@@ -0,0 +1,593 @@
1/******************************************************************************
2 *
3 * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of version 2 of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
17 *
18 * The full GNU General Public License is included in this distribution in the
19 * file called LICENSE.
20 *
21 * Contact Information:
22 * Intel Linux Wireless <ilw@linux.intel.com>
23 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
24 *
25 *****************************************************************************/
26
27#ifndef __il_3945_h__
28#define __il_3945_h__
29
30#include <linux/pci.h> /* for struct pci_device_id */
31#include <linux/kernel.h>
32#include <net/ieee80211_radiotap.h>
33
34/* Hardware specific file defines the PCI IDs table for that hardware module */
35extern const struct pci_device_id il3945_hw_card_ids[];
36
37#include "common.h"
38
39extern const struct il_ops il3945_ops;
40
41/* Highest firmware API version supported */
42#define IL3945_UCODE_API_MAX 2
43
44/* Lowest firmware API version supported */
45#define IL3945_UCODE_API_MIN 1
46
47#define IL3945_FW_PRE "iwlwifi-3945-"
48#define _IL3945_MODULE_FIRMWARE(api) IL3945_FW_PRE #api ".ucode"
49#define IL3945_MODULE_FIRMWARE(api) _IL3945_MODULE_FIRMWARE(api)
50
51/* Default noise level to report when noise measurement is not available.
52 * This may be because we're:
53 * 1) Not associated (4965, no beacon stats being sent to driver)
54 * 2) Scanning (noise measurement does not apply to associated channel)
55 * 3) Receiving CCK (3945 delivers noise info only for OFDM frames)
56 * Use default noise value of -127 ... this is below the range of measurable
57 * Rx dBm for either 3945 or 4965, so it can indicate "unmeasurable" to user.
58 * Also, -127 works better than 0 when averaging frames with/without
59 * noise info (e.g. averaging might be done in app); measured dBm values are
60 * always negative ... using a negative value as the default keeps all
61 * averages within an s8's (used in some apps) range of negative values. */
62#define IL_NOISE_MEAS_NOT_AVAILABLE (-127)
63
64/* Module parameters accessible from iwl-*.c */
65extern struct il_mod_params il3945_mod_params;
66
67struct il3945_rate_scale_data {
68 u64 data;
69 s32 success_counter;
70 s32 success_ratio;
71 s32 counter;
72 s32 average_tpt;
73 unsigned long stamp;
74};
75
76struct il3945_rs_sta {
77 spinlock_t lock;
78 struct il_priv *il;
79 s32 *expected_tpt;
80 unsigned long last_partial_flush;
81 unsigned long last_flush;
82 u32 flush_time;
83 u32 last_tx_packets;
84 u32 tx_packets;
85 u8 tgg;
86 u8 flush_pending;
87 u8 start_rate;
88 struct timer_list rate_scale_flush;
89 struct il3945_rate_scale_data win[RATE_COUNT_3945];
90#ifdef CONFIG_MAC80211_DEBUGFS
91 struct dentry *rs_sta_dbgfs_stats_table_file;
92#endif
93
94 /* used to be in sta_info */
95 int last_txrate_idx;
96};
97
98/*
99 * The common struct MUST be first because it is shared between
100 * 3945 and 4965!
101 */
102struct il3945_sta_priv {
103 struct il_station_priv_common common;
104 struct il3945_rs_sta rs_sta;
105};
106
107enum il3945_antenna {
108 IL_ANTENNA_DIVERSITY,
109 IL_ANTENNA_MAIN,
110 IL_ANTENNA_AUX
111};
112
113/*
114 * RTS threshold here is total size [2347] minus 4 FCS bytes
115 * Per spec:
116 * a value of 0 means RTS on all data/management packets
117 * a value > max MSDU size means no RTS
118 * else RTS for data/management frames where MPDU is larger
119 * than RTS value.
120 */
121#define DEFAULT_RTS_THRESHOLD 2347U
122#define MIN_RTS_THRESHOLD 0U
123#define MAX_RTS_THRESHOLD 2347U
124#define MAX_MSDU_SIZE 2304U
125#define MAX_MPDU_SIZE 2346U
126#define DEFAULT_BEACON_INTERVAL 100U
127#define DEFAULT_SHORT_RETRY_LIMIT 7U
128#define DEFAULT_LONG_RETRY_LIMIT 4U
129
130#define IL_TX_FIFO_AC0 0
131#define IL_TX_FIFO_AC1 1
132#define IL_TX_FIFO_AC2 2
133#define IL_TX_FIFO_AC3 3
134#define IL_TX_FIFO_HCCA_1 5
135#define IL_TX_FIFO_HCCA_2 6
136#define IL_TX_FIFO_NONE 7
137
138#define IEEE80211_DATA_LEN 2304
139#define IEEE80211_4ADDR_LEN 30
140#define IEEE80211_HLEN (IEEE80211_4ADDR_LEN)
141#define IEEE80211_FRAME_LEN (IEEE80211_DATA_LEN + IEEE80211_HLEN)
142
143struct il3945_frame {
144 union {
145 struct ieee80211_hdr frame;
146 struct il3945_tx_beacon_cmd beacon;
147 u8 raw[IEEE80211_FRAME_LEN];
148 u8 cmd[360];
149 } u;
150 struct list_head list;
151};
152
153#define SUP_RATE_11A_MAX_NUM_CHANNELS 8
154#define SUP_RATE_11B_MAX_NUM_CHANNELS 4
155#define SUP_RATE_11G_MAX_NUM_CHANNELS 12
156
157#define IL_SUPPORTED_RATES_IE_LEN 8
158
159#define SCAN_INTERVAL 100
160
161#define MAX_TID_COUNT 9
162
163#define IL_INVALID_RATE 0xFF
164#define IL_INVALID_VALUE -1
165
166#define STA_PS_STATUS_WAKE 0
167#define STA_PS_STATUS_SLEEP 1
168
169struct il3945_ibss_seq {
170 u8 mac[ETH_ALEN];
171 u16 seq_num;
172 u16 frag_num;
173 unsigned long packet_time;
174 struct list_head list;
175};
176
177#define IL_RX_HDR(x) ((struct il3945_rx_frame_hdr *)(\
178 x->u.rx_frame.stats.payload + \
179 x->u.rx_frame.stats.phy_count))
180#define IL_RX_END(x) ((struct il3945_rx_frame_end *)(\
181 IL_RX_HDR(x)->payload + \
182 le16_to_cpu(IL_RX_HDR(x)->len)))
183#define IL_RX_STATS(x) (&x->u.rx_frame.stats)
184#define IL_RX_DATA(x) (IL_RX_HDR(x)->payload)
185
186/******************************************************************************
187 *
188 * Functions implemented in iwl3945-base.c which are forward declared here
189 * for use by iwl-*.c
190 *
191 *****************************************************************************/
192int il3945_calc_db_from_ratio(int sig_ratio);
193void il3945_rx_replenish(void *data);
194void il3945_rx_queue_reset(struct il_priv *il, struct il_rx_queue *rxq);
195unsigned int il3945_fill_beacon_frame(struct il_priv *il,
196 struct ieee80211_hdr *hdr, int left);
197int il3945_dump_nic_event_log(struct il_priv *il, bool full_log, char **buf,
198 bool display);
199void il3945_dump_nic_error_log(struct il_priv *il);
200
201/******************************************************************************
202 *
203 * Functions implemented in iwl-[34]*.c which are forward declared here
204 * for use by iwl3945-base.c
205 *
206 * NOTE: The implementation of these functions are hardware specific
207 * which is why they are in the hardware specific files (vs. iwl-base.c)
208 *
209 * Naming convention --
210 * il3945_ <-- Its part of iwlwifi (should be changed to il3945_)
211 * il3945_hw_ <-- Hardware specific (implemented in iwl-XXXX.c by all HW)
212 * iwlXXXX_ <-- Hardware specific (implemented in iwl-XXXX.c for XXXX)
213 * il3945_bg_ <-- Called from work queue context
214 * il3945_mac_ <-- mac80211 callback
215 *
216 ****************************************************************************/
217void il3945_hw_handler_setup(struct il_priv *il);
218void il3945_hw_setup_deferred_work(struct il_priv *il);
219void il3945_hw_cancel_deferred_work(struct il_priv *il);
220int il3945_hw_rxq_stop(struct il_priv *il);
221int il3945_hw_set_hw_params(struct il_priv *il);
222int il3945_hw_nic_init(struct il_priv *il);
223int il3945_hw_nic_stop_master(struct il_priv *il);
224void il3945_hw_txq_ctx_free(struct il_priv *il);
225void il3945_hw_txq_ctx_stop(struct il_priv *il);
226int il3945_hw_nic_reset(struct il_priv *il);
227int il3945_hw_txq_attach_buf_to_tfd(struct il_priv *il, struct il_tx_queue *txq,
228 dma_addr_t addr, u16 len, u8 reset, u8 pad);
229void il3945_hw_txq_free_tfd(struct il_priv *il, struct il_tx_queue *txq);
230int il3945_hw_get_temperature(struct il_priv *il);
231int il3945_hw_tx_queue_init(struct il_priv *il, struct il_tx_queue *txq);
232unsigned int il3945_hw_get_beacon_cmd(struct il_priv *il,
233 struct il3945_frame *frame, u8 rate);
234void il3945_hw_build_tx_cmd_rate(struct il_priv *il, struct il_device_cmd *cmd,
235 struct ieee80211_tx_info *info,
236 struct ieee80211_hdr *hdr, int sta_id);
237int il3945_hw_reg_send_txpower(struct il_priv *il);
238int il3945_hw_reg_set_txpower(struct il_priv *il, s8 power);
239void il3945_hdl_stats(struct il_priv *il, struct il_rx_buf *rxb);
240void il3945_hdl_c_stats(struct il_priv *il, struct il_rx_buf *rxb);
241void il3945_disable_events(struct il_priv *il);
242int il4965_get_temperature(const struct il_priv *il);
243void il3945_post_associate(struct il_priv *il);
244void il3945_config_ap(struct il_priv *il);
245
246int il3945_commit_rxon(struct il_priv *il);
247
248/**
249 * il3945_hw_find_station - Find station id for a given BSSID
250 * @bssid: MAC address of station ID to find
251 *
252 * NOTE: This should not be hardware specific but the code has
253 * not yet been merged into a single common layer for managing the
254 * station tables.
255 */
256u8 il3945_hw_find_station(struct il_priv *il, const u8 *bssid);
257
258__le32 il3945_get_antenna_flags(const struct il_priv *il);
259int il3945_init_hw_rate_table(struct il_priv *il);
260void il3945_reg_txpower_periodic(struct il_priv *il);
261int il3945_txpower_set_from_eeprom(struct il_priv *il);
262
263int il3945_rs_next_rate(struct il_priv *il, int rate);
264
265/* scanning */
266int il3945_request_scan(struct il_priv *il, struct ieee80211_vif *vif);
267void il3945_post_scan(struct il_priv *il);
268
269/* rates */
270extern const struct il3945_rate_info il3945_rates[RATE_COUNT_3945];
271
272/* RSSI to dBm */
273#define IL39_RSSI_OFFSET 95
274
275/*
276 * EEPROM related constants, enums, and structures.
277 */
278#define EEPROM_SKU_CAP_OP_MODE_MRC (1 << 7)
279
280/*
281 * Mapping of a Tx power level, at factory calibration temperature,
282 * to a radio/DSP gain table idx.
283 * One for each of 5 "sample" power levels in each band.
284 * v_det is measured at the factory, using the 3945's built-in power amplifier
285 * (PA) output voltage detector. This same detector is used during Tx of
286 * long packets in normal operation to provide feedback as to proper output
287 * level.
288 * Data copied from EEPROM.
289 * DO NOT ALTER THIS STRUCTURE!!!
290 */
291struct il3945_eeprom_txpower_sample {
292 u8 gain_idx; /* idx into power (gain) setup table ... */
293 s8 power; /* ... for this pwr level for this chnl group */
294 u16 v_det; /* PA output voltage */
295} __packed;
296
297/*
298 * Mappings of Tx power levels -> nominal radio/DSP gain table idxes.
299 * One for each channel group (a.k.a. "band") (1 for BG, 4 for A).
300 * Tx power setup code interpolates between the 5 "sample" power levels
301 * to determine the nominal setup for a requested power level.
302 * Data copied from EEPROM.
303 * DO NOT ALTER THIS STRUCTURE!!!
304 */
305struct il3945_eeprom_txpower_group {
306 struct il3945_eeprom_txpower_sample samples[5]; /* 5 power levels */
307 s32 a, b, c, d, e; /* coefficients for voltage->power
308 * formula (signed) */
309 s32 Fa, Fb, Fc, Fd, Fe; /* these modify coeffs based on
310 * frequency (signed) */
311 s8 saturation_power; /* highest power possible by h/w in this
312 * band */
313 u8 group_channel; /* "representative" channel # in this band */
314 s16 temperature; /* h/w temperature at factory calib this band
315 * (signed) */
316} __packed;
317
318/*
319 * Temperature-based Tx-power compensation data, not band-specific.
320 * These coefficients are use to modify a/b/c/d/e coeffs based on
321 * difference between current temperature and factory calib temperature.
322 * Data copied from EEPROM.
323 */
324struct il3945_eeprom_temperature_corr {
325 u32 Ta;
326 u32 Tb;
327 u32 Tc;
328 u32 Td;
329 u32 Te;
330} __packed;
331
332/*
333 * EEPROM map
334 */
335struct il3945_eeprom {
336 u8 reserved0[16];
337 u16 device_id; /* abs.ofs: 16 */
338 u8 reserved1[2];
339 u16 pmc; /* abs.ofs: 20 */
340 u8 reserved2[20];
341 u8 mac_address[6]; /* abs.ofs: 42 */
342 u8 reserved3[58];
343 u16 board_revision; /* abs.ofs: 106 */
344 u8 reserved4[11];
345 u8 board_pba_number[9]; /* abs.ofs: 119 */
346 u8 reserved5[8];
347 u16 version; /* abs.ofs: 136 */
348 u8 sku_cap; /* abs.ofs: 138 */
349 u8 leds_mode; /* abs.ofs: 139 */
350 u16 oem_mode;
351 u16 wowlan_mode; /* abs.ofs: 142 */
352 u16 leds_time_interval; /* abs.ofs: 144 */
353 u8 leds_off_time; /* abs.ofs: 146 */
354 u8 leds_on_time; /* abs.ofs: 147 */
355 u8 almgor_m_version; /* abs.ofs: 148 */
356 u8 antenna_switch_type; /* abs.ofs: 149 */
357 u8 reserved6[42];
358 u8 sku_id[4]; /* abs.ofs: 192 */
359
360/*
361 * Per-channel regulatory data.
362 *
363 * Each channel that *might* be supported by 3945 has a fixed location
364 * in EEPROM containing EEPROM_CHANNEL_* usage flags (LSB) and max regulatory
365 * txpower (MSB).
366 *
367 * Entries immediately below are for 20 MHz channel width.
368 *
369 * 2.4 GHz channels 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14
370 */
371 u16 band_1_count; /* abs.ofs: 196 */
372 struct il_eeprom_channel band_1_channels[14]; /* abs.ofs: 198 */
373
374/*
375 * 4.9 GHz channels 183, 184, 185, 187, 188, 189, 192, 196,
376 * 5.0 GHz channels 7, 8, 11, 12, 16
377 * (4915-5080MHz) (none of these is ever supported)
378 */
379 u16 band_2_count; /* abs.ofs: 226 */
380 struct il_eeprom_channel band_2_channels[13]; /* abs.ofs: 228 */
381
382/*
383 * 5.2 GHz channels 34, 36, 38, 40, 42, 44, 46, 48, 52, 56, 60, 64
384 * (5170-5320MHz)
385 */
386 u16 band_3_count; /* abs.ofs: 254 */
387 struct il_eeprom_channel band_3_channels[12]; /* abs.ofs: 256 */
388
389/*
390 * 5.5 GHz channels 100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140
391 * (5500-5700MHz)
392 */
393 u16 band_4_count; /* abs.ofs: 280 */
394 struct il_eeprom_channel band_4_channels[11]; /* abs.ofs: 282 */
395
396/*
397 * 5.7 GHz channels 145, 149, 153, 157, 161, 165
398 * (5725-5825MHz)
399 */
400 u16 band_5_count; /* abs.ofs: 304 */
401 struct il_eeprom_channel band_5_channels[6]; /* abs.ofs: 306 */
402
403 u8 reserved9[194];
404
405/*
406 * 3945 Txpower calibration data.
407 */
408#define IL_NUM_TX_CALIB_GROUPS 5
409 struct il3945_eeprom_txpower_group groups[IL_NUM_TX_CALIB_GROUPS];
410/* abs.ofs: 512 */
411 struct il3945_eeprom_temperature_corr corrections; /* abs.ofs: 832 */
412 u8 reserved16[172]; /* fill out to full 1024 byte block */
413} __packed;
414
415#define IL3945_EEPROM_IMG_SIZE 1024
416
417/* End of EEPROM */
418
419#define PCI_CFG_REV_ID_BIT_BASIC_SKU (0x40) /* bit 6 */
420#define PCI_CFG_REV_ID_BIT_RTP (0x80) /* bit 7 */
421
422/* 4 DATA + 1 CMD. There are 2 HCCA queues that are not used. */
423#define IL39_NUM_QUEUES 5
424#define IL39_CMD_QUEUE_NUM 4
425
426#define IL_DEFAULT_TX_RETRY 15
427
428/*********************************************/
429
430#define RFD_SIZE 4
431#define NUM_TFD_CHUNKS 4
432
433#define TFD_CTL_COUNT_SET(n) (n << 24)
434#define TFD_CTL_COUNT_GET(ctl) ((ctl >> 24) & 7)
435#define TFD_CTL_PAD_SET(n) (n << 28)
436#define TFD_CTL_PAD_GET(ctl) (ctl >> 28)
437
438/* Sizes and addresses for instruction and data memory (SRAM) in
439 * 3945's embedded processor. Driver access is via HBUS_TARG_MEM_* regs. */
440#define IL39_RTC_INST_LOWER_BOUND (0x000000)
441#define IL39_RTC_INST_UPPER_BOUND (0x014000)
442
443#define IL39_RTC_DATA_LOWER_BOUND (0x800000)
444#define IL39_RTC_DATA_UPPER_BOUND (0x808000)
445
446#define IL39_RTC_INST_SIZE (IL39_RTC_INST_UPPER_BOUND - \
447 IL39_RTC_INST_LOWER_BOUND)
448#define IL39_RTC_DATA_SIZE (IL39_RTC_DATA_UPPER_BOUND - \
449 IL39_RTC_DATA_LOWER_BOUND)
450
451#define IL39_MAX_INST_SIZE IL39_RTC_INST_SIZE
452#define IL39_MAX_DATA_SIZE IL39_RTC_DATA_SIZE
453
454/* Size of uCode instruction memory in bootstrap state machine */
455#define IL39_MAX_BSM_SIZE IL39_RTC_INST_SIZE
456
457static inline int
458il3945_hw_valid_rtc_data_addr(u32 addr)
459{
460 return (addr >= IL39_RTC_DATA_LOWER_BOUND &&
461 addr < IL39_RTC_DATA_UPPER_BOUND);
462}
463
464/* Base physical address of il3945_shared is provided to FH39_TSSR_CBB_BASE
465 * and &il3945_shared.rx_read_ptr[0] is provided to FH39_RCSR_RPTR_ADDR(0) */
466struct il3945_shared {
467 __le32 tx_base_ptr[8];
468} __packed;
469
470/************************************/
471/* iwl3945 Flow Handler Definitions */
472/************************************/
473
474/**
475 * This I/O area is directly read/writable by driver (e.g. Linux uses writel())
476 * Addresses are offsets from device's PCI hardware base address.
477 */
478#define FH39_MEM_LOWER_BOUND (0x0800)
479#define FH39_MEM_UPPER_BOUND (0x1000)
480
481#define FH39_CBCC_TBL (FH39_MEM_LOWER_BOUND + 0x140)
482#define FH39_TFDB_TBL (FH39_MEM_LOWER_BOUND + 0x180)
483#define FH39_RCSR_TBL (FH39_MEM_LOWER_BOUND + 0x400)
484#define FH39_RSSR_TBL (FH39_MEM_LOWER_BOUND + 0x4c0)
485#define FH39_TCSR_TBL (FH39_MEM_LOWER_BOUND + 0x500)
486#define FH39_TSSR_TBL (FH39_MEM_LOWER_BOUND + 0x680)
487
488/* TFDB (Transmit Frame Buffer Descriptor) */
489#define FH39_TFDB(_ch, buf) (FH39_TFDB_TBL + \
490 ((_ch) * 2 + (buf)) * 0x28)
491#define FH39_TFDB_CHNL_BUF_CTRL_REG(_ch) (FH39_TFDB_TBL + 0x50 * (_ch))
492
493/* CBCC channel is [0,2] */
494#define FH39_CBCC(_ch) (FH39_CBCC_TBL + (_ch) * 0x8)
495#define FH39_CBCC_CTRL(_ch) (FH39_CBCC(_ch) + 0x00)
496#define FH39_CBCC_BASE(_ch) (FH39_CBCC(_ch) + 0x04)
497
498/* RCSR channel is [0,2] */
499#define FH39_RCSR(_ch) (FH39_RCSR_TBL + (_ch) * 0x40)
500#define FH39_RCSR_CONFIG(_ch) (FH39_RCSR(_ch) + 0x00)
501#define FH39_RCSR_RBD_BASE(_ch) (FH39_RCSR(_ch) + 0x04)
502#define FH39_RCSR_WPTR(_ch) (FH39_RCSR(_ch) + 0x20)
503#define FH39_RCSR_RPTR_ADDR(_ch) (FH39_RCSR(_ch) + 0x24)
504
505#define FH39_RSCSR_CHNL0_WPTR (FH39_RCSR_WPTR(0))
506
507/* RSSR */
508#define FH39_RSSR_CTRL (FH39_RSSR_TBL + 0x000)
509#define FH39_RSSR_STATUS (FH39_RSSR_TBL + 0x004)
510
511/* TCSR */
512#define FH39_TCSR(_ch) (FH39_TCSR_TBL + (_ch) * 0x20)
513#define FH39_TCSR_CONFIG(_ch) (FH39_TCSR(_ch) + 0x00)
514#define FH39_TCSR_CREDIT(_ch) (FH39_TCSR(_ch) + 0x04)
515#define FH39_TCSR_BUFF_STTS(_ch) (FH39_TCSR(_ch) + 0x08)
516
517/* TSSR */
518#define FH39_TSSR_CBB_BASE (FH39_TSSR_TBL + 0x000)
519#define FH39_TSSR_MSG_CONFIG (FH39_TSSR_TBL + 0x008)
520#define FH39_TSSR_TX_STATUS (FH39_TSSR_TBL + 0x010)
521
522/* DBM */
523
524#define FH39_SRVC_CHNL (6)
525
526#define FH39_RCSR_RX_CONFIG_REG_POS_RBDC_SIZE (20)
527#define FH39_RCSR_RX_CONFIG_REG_POS_IRQ_RBTH (4)
528
529#define FH39_RCSR_RX_CONFIG_REG_BIT_WR_STTS_EN (0x08000000)
530
531#define FH39_RCSR_RX_CONFIG_REG_VAL_DMA_CHNL_EN_ENABLE (0x80000000)
532
533#define FH39_RCSR_RX_CONFIG_REG_VAL_RDRBD_EN_ENABLE (0x20000000)
534
535#define FH39_RCSR_RX_CONFIG_REG_VAL_MAX_FRAG_SIZE_128 (0x01000000)
536
537#define FH39_RCSR_RX_CONFIG_REG_VAL_IRQ_DEST_INT_HOST (0x00001000)
538
539#define FH39_RCSR_RX_CONFIG_REG_VAL_MSG_MODE_FH (0x00000000)
540
541#define FH39_TCSR_TX_CONFIG_REG_VAL_MSG_MODE_TXF (0x00000000)
542#define FH39_TCSR_TX_CONFIG_REG_VAL_MSG_MODE_DRIVER (0x00000001)
543
544#define FH39_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_DISABLE_VAL (0x00000000)
545#define FH39_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE_VAL (0x00000008)
546
547#define FH39_TCSR_TX_CONFIG_REG_VAL_CIRQ_HOST_IFTFD (0x00200000)
548
549#define FH39_TCSR_TX_CONFIG_REG_VAL_CIRQ_RTC_NOINT (0x00000000)
550
551#define FH39_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_PAUSE (0x00000000)
552#define FH39_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE (0x80000000)
553
554#define FH39_TCSR_CHNL_TX_BUF_STS_REG_VAL_TFDB_VALID (0x00004000)
555
556#define FH39_TCSR_CHNL_TX_BUF_STS_REG_BIT_TFDB_WPTR (0x00000001)
557
558#define FH39_TSSR_TX_MSG_CONFIG_REG_VAL_SNOOP_RD_TXPD_ON (0xFF000000)
559#define FH39_TSSR_TX_MSG_CONFIG_REG_VAL_ORDER_RD_TXPD_ON (0x00FF0000)
560
561#define FH39_TSSR_TX_MSG_CONFIG_REG_VAL_MAX_FRAG_SIZE_128B (0x00000400)
562
563#define FH39_TSSR_TX_MSG_CONFIG_REG_VAL_SNOOP_RD_TFD_ON (0x00000100)
564#define FH39_TSSR_TX_MSG_CONFIG_REG_VAL_ORDER_RD_CBB_ON (0x00000080)
565
566#define FH39_TSSR_TX_MSG_CONFIG_REG_VAL_ORDER_RSP_WAIT_TH (0x00000020)
567#define FH39_TSSR_TX_MSG_CONFIG_REG_VAL_RSP_WAIT_TH (0x00000005)
568
569#define FH39_TSSR_TX_STATUS_REG_BIT_BUFS_EMPTY(_ch) (BIT(_ch) << 24)
570#define FH39_TSSR_TX_STATUS_REG_BIT_NO_PEND_REQ(_ch) (BIT(_ch) << 16)
571
572#define FH39_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(_ch) \
573 (FH39_TSSR_TX_STATUS_REG_BIT_BUFS_EMPTY(_ch) | \
574 FH39_TSSR_TX_STATUS_REG_BIT_NO_PEND_REQ(_ch))
575
576#define FH39_RSSR_CHNL0_RX_STATUS_CHNL_IDLE (0x01000000)
577
578struct il3945_tfd_tb {
579 __le32 addr;
580 __le32 len;
581} __packed;
582
583struct il3945_tfd {
584 __le32 control_flags;
585 struct il3945_tfd_tb tbs[4];
586 u8 __pad[28];
587} __packed;
588
589#ifdef CONFIG_IWLEGACY_DEBUGFS
590extern const struct il_debugfs_ops il3945_debugfs_ops;
591#endif
592
593#endif
diff --git a/drivers/net/wireless/intel/iwlegacy/4965-calib.c b/drivers/net/wireless/intel/iwlegacy/4965-calib.c
new file mode 100644
index 000000000000..e78bdefb8952
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlegacy/4965-calib.c
@@ -0,0 +1,934 @@
1/******************************************************************************
2 *
3 * This file is provided under a dual BSD/GPLv2 license. When using or
4 * redistributing this file, you may do so under either license.
5 *
6 * GPL LICENSE SUMMARY
7 *
8 * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of version 2 of the GNU General Public License as
12 * published by the Free Software Foundation.
13 *
14 * This program is distributed in the hope that it will be useful, but
15 * WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
22 * USA
23 *
24 * The full GNU General Public License is included in this distribution
25 * in the file called LICENSE.GPL.
26 *
27 * Contact Information:
28 * Intel Linux Wireless <ilw@linux.intel.com>
29 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
30 *
31 * BSD LICENSE
32 *
33 * Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved.
34 * All rights reserved.
35 *
36 * Redistribution and use in source and binary forms, with or without
37 * modification, are permitted provided that the following conditions
38 * are met:
39 *
40 * * Redistributions of source code must retain the above copyright
41 * notice, this list of conditions and the following disclaimer.
42 * * Redistributions in binary form must reproduce the above copyright
43 * notice, this list of conditions and the following disclaimer in
44 * the documentation and/or other materials provided with the
45 * distribution.
46 * * Neither the name Intel Corporation nor the names of its
47 * contributors may be used to endorse or promote products derived
48 * from this software without specific prior written permission.
49 *
50 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
51 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
52 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
53 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
54 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
55 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
56 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
57 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
58 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
59 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
60 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
61 *****************************************************************************/
62
63#include <linux/slab.h>
64#include <net/mac80211.h>
65
66#include "common.h"
67#include "4965.h"
68
69/*****************************************************************************
70 * INIT calibrations framework
71 *****************************************************************************/
72
73struct stats_general_data {
74 u32 beacon_silence_rssi_a;
75 u32 beacon_silence_rssi_b;
76 u32 beacon_silence_rssi_c;
77 u32 beacon_energy_a;
78 u32 beacon_energy_b;
79 u32 beacon_energy_c;
80};
81
82/*****************************************************************************
83 * RUNTIME calibrations framework
84 *****************************************************************************/
85
86/* "false alarms" are signals that our DSP tries to lock onto,
87 * but then determines that they are either noise, or transmissions
88 * from a distant wireless network (also "noise", really) that get
89 * "stepped on" by stronger transmissions within our own network.
90 * This algorithm attempts to set a sensitivity level that is high
91 * enough to receive all of our own network traffic, but not so
92 * high that our DSP gets too busy trying to lock onto non-network
93 * activity/noise. */
94static int
95il4965_sens_energy_cck(struct il_priv *il, u32 norm_fa, u32 rx_enable_time,
96 struct stats_general_data *rx_info)
97{
98 u32 max_nrg_cck = 0;
99 int i = 0;
100 u8 max_silence_rssi = 0;
101 u32 silence_ref = 0;
102 u8 silence_rssi_a = 0;
103 u8 silence_rssi_b = 0;
104 u8 silence_rssi_c = 0;
105 u32 val;
106
107 /* "false_alarms" values below are cross-multiplications to assess the
108 * numbers of false alarms within the measured period of actual Rx
109 * (Rx is off when we're txing), vs the min/max expected false alarms
110 * (some should be expected if rx is sensitive enough) in a
111 * hypothetical listening period of 200 time units (TU), 204.8 msec:
112 *
113 * MIN_FA/fixed-time < false_alarms/actual-rx-time < MAX_FA/beacon-time
114 *
115 * */
116 u32 false_alarms = norm_fa * 200 * 1024;
117 u32 max_false_alarms = MAX_FA_CCK * rx_enable_time;
118 u32 min_false_alarms = MIN_FA_CCK * rx_enable_time;
119 struct il_sensitivity_data *data = NULL;
120 const struct il_sensitivity_ranges *ranges = il->hw_params.sens;
121
122 data = &(il->sensitivity_data);
123
124 data->nrg_auto_corr_silence_diff = 0;
125
126 /* Find max silence rssi among all 3 receivers.
127 * This is background noise, which may include transmissions from other
128 * networks, measured during silence before our network's beacon */
129 silence_rssi_a =
130 (u8) ((rx_info->beacon_silence_rssi_a & ALL_BAND_FILTER) >> 8);
131 silence_rssi_b =
132 (u8) ((rx_info->beacon_silence_rssi_b & ALL_BAND_FILTER) >> 8);
133 silence_rssi_c =
134 (u8) ((rx_info->beacon_silence_rssi_c & ALL_BAND_FILTER) >> 8);
135
136 val = max(silence_rssi_b, silence_rssi_c);
137 max_silence_rssi = max(silence_rssi_a, (u8) val);
138
139 /* Store silence rssi in 20-beacon history table */
140 data->nrg_silence_rssi[data->nrg_silence_idx] = max_silence_rssi;
141 data->nrg_silence_idx++;
142 if (data->nrg_silence_idx >= NRG_NUM_PREV_STAT_L)
143 data->nrg_silence_idx = 0;
144
145 /* Find max silence rssi across 20 beacon history */
146 for (i = 0; i < NRG_NUM_PREV_STAT_L; i++) {
147 val = data->nrg_silence_rssi[i];
148 silence_ref = max(silence_ref, val);
149 }
150 D_CALIB("silence a %u, b %u, c %u, 20-bcn max %u\n", silence_rssi_a,
151 silence_rssi_b, silence_rssi_c, silence_ref);
152
153 /* Find max rx energy (min value!) among all 3 receivers,
154 * measured during beacon frame.
155 * Save it in 10-beacon history table. */
156 i = data->nrg_energy_idx;
157 val = min(rx_info->beacon_energy_b, rx_info->beacon_energy_c);
158 data->nrg_value[i] = min(rx_info->beacon_energy_a, val);
159
160 data->nrg_energy_idx++;
161 if (data->nrg_energy_idx >= 10)
162 data->nrg_energy_idx = 0;
163
164 /* Find min rx energy (max value) across 10 beacon history.
165 * This is the minimum signal level that we want to receive well.
166 * Add backoff (margin so we don't miss slightly lower energy frames).
167 * This establishes an upper bound (min value) for energy threshold. */
168 max_nrg_cck = data->nrg_value[0];
169 for (i = 1; i < 10; i++)
170 max_nrg_cck = (u32) max(max_nrg_cck, (data->nrg_value[i]));
171 max_nrg_cck += 6;
172
173 D_CALIB("rx energy a %u, b %u, c %u, 10-bcn max/min %u\n",
174 rx_info->beacon_energy_a, rx_info->beacon_energy_b,
175 rx_info->beacon_energy_c, max_nrg_cck - 6);
176
177 /* Count number of consecutive beacons with fewer-than-desired
178 * false alarms. */
179 if (false_alarms < min_false_alarms)
180 data->num_in_cck_no_fa++;
181 else
182 data->num_in_cck_no_fa = 0;
183 D_CALIB("consecutive bcns with few false alarms = %u\n",
184 data->num_in_cck_no_fa);
185
186 /* If we got too many false alarms this time, reduce sensitivity */
187 if (false_alarms > max_false_alarms &&
188 data->auto_corr_cck > AUTO_CORR_MAX_TH_CCK) {
189 D_CALIB("norm FA %u > max FA %u\n", false_alarms,
190 max_false_alarms);
191 D_CALIB("... reducing sensitivity\n");
192 data->nrg_curr_state = IL_FA_TOO_MANY;
193 /* Store for "fewer than desired" on later beacon */
194 data->nrg_silence_ref = silence_ref;
195
196 /* increase energy threshold (reduce nrg value)
197 * to decrease sensitivity */
198 data->nrg_th_cck = data->nrg_th_cck - NRG_STEP_CCK;
199 /* Else if we got fewer than desired, increase sensitivity */
200 } else if (false_alarms < min_false_alarms) {
201 data->nrg_curr_state = IL_FA_TOO_FEW;
202
203 /* Compare silence level with silence level for most recent
204 * healthy number or too many false alarms */
205 data->nrg_auto_corr_silence_diff =
206 (s32) data->nrg_silence_ref - (s32) silence_ref;
207
208 D_CALIB("norm FA %u < min FA %u, silence diff %d\n",
209 false_alarms, min_false_alarms,
210 data->nrg_auto_corr_silence_diff);
211
212 /* Increase value to increase sensitivity, but only if:
213 * 1a) previous beacon did *not* have *too many* false alarms
214 * 1b) AND there's a significant difference in Rx levels
215 * from a previous beacon with too many, or healthy # FAs
216 * OR 2) We've seen a lot of beacons (100) with too few
217 * false alarms */
218 if (data->nrg_prev_state != IL_FA_TOO_MANY &&
219 (data->nrg_auto_corr_silence_diff > NRG_DIFF ||
220 data->num_in_cck_no_fa > MAX_NUMBER_CCK_NO_FA)) {
221
222 D_CALIB("... increasing sensitivity\n");
223 /* Increase nrg value to increase sensitivity */
224 val = data->nrg_th_cck + NRG_STEP_CCK;
225 data->nrg_th_cck = min((u32) ranges->min_nrg_cck, val);
226 } else {
227 D_CALIB("... but not changing sensitivity\n");
228 }
229
230 /* Else we got a healthy number of false alarms, keep status quo */
231 } else {
232 D_CALIB(" FA in safe zone\n");
233 data->nrg_curr_state = IL_FA_GOOD_RANGE;
234
235 /* Store for use in "fewer than desired" with later beacon */
236 data->nrg_silence_ref = silence_ref;
237
238 /* If previous beacon had too many false alarms,
239 * give it some extra margin by reducing sensitivity again
240 * (but don't go below measured energy of desired Rx) */
241 if (IL_FA_TOO_MANY == data->nrg_prev_state) {
242 D_CALIB("... increasing margin\n");
243 if (data->nrg_th_cck > (max_nrg_cck + NRG_MARGIN))
244 data->nrg_th_cck -= NRG_MARGIN;
245 else
246 data->nrg_th_cck = max_nrg_cck;
247 }
248 }
249
250 /* Make sure the energy threshold does not go above the measured
251 * energy of the desired Rx signals (reduced by backoff margin),
252 * or else we might start missing Rx frames.
253 * Lower value is higher energy, so we use max()!
254 */
255 data->nrg_th_cck = max(max_nrg_cck, data->nrg_th_cck);
256 D_CALIB("new nrg_th_cck %u\n", data->nrg_th_cck);
257
258 data->nrg_prev_state = data->nrg_curr_state;
259
260 /* Auto-correlation CCK algorithm */
261 if (false_alarms > min_false_alarms) {
262
263 /* increase auto_corr values to decrease sensitivity
264 * so the DSP won't be disturbed by the noise
265 */
266 if (data->auto_corr_cck < AUTO_CORR_MAX_TH_CCK)
267 data->auto_corr_cck = AUTO_CORR_MAX_TH_CCK + 1;
268 else {
269 val = data->auto_corr_cck + AUTO_CORR_STEP_CCK;
270 data->auto_corr_cck =
271 min((u32) ranges->auto_corr_max_cck, val);
272 }
273 val = data->auto_corr_cck_mrc + AUTO_CORR_STEP_CCK;
274 data->auto_corr_cck_mrc =
275 min((u32) ranges->auto_corr_max_cck_mrc, val);
276 } else if (false_alarms < min_false_alarms &&
277 (data->nrg_auto_corr_silence_diff > NRG_DIFF ||
278 data->num_in_cck_no_fa > MAX_NUMBER_CCK_NO_FA)) {
279
280 /* Decrease auto_corr values to increase sensitivity */
281 val = data->auto_corr_cck - AUTO_CORR_STEP_CCK;
282 data->auto_corr_cck = max((u32) ranges->auto_corr_min_cck, val);
283 val = data->auto_corr_cck_mrc - AUTO_CORR_STEP_CCK;
284 data->auto_corr_cck_mrc =
285 max((u32) ranges->auto_corr_min_cck_mrc, val);
286 }
287
288 return 0;
289}
290
291static int
292il4965_sens_auto_corr_ofdm(struct il_priv *il, u32 norm_fa, u32 rx_enable_time)
293{
294 u32 val;
295 u32 false_alarms = norm_fa * 200 * 1024;
296 u32 max_false_alarms = MAX_FA_OFDM * rx_enable_time;
297 u32 min_false_alarms = MIN_FA_OFDM * rx_enable_time;
298 struct il_sensitivity_data *data = NULL;
299 const struct il_sensitivity_ranges *ranges = il->hw_params.sens;
300
301 data = &(il->sensitivity_data);
302
303 /* If we got too many false alarms this time, reduce sensitivity */
304 if (false_alarms > max_false_alarms) {
305
306 D_CALIB("norm FA %u > max FA %u)\n", false_alarms,
307 max_false_alarms);
308
309 val = data->auto_corr_ofdm + AUTO_CORR_STEP_OFDM;
310 data->auto_corr_ofdm =
311 min((u32) ranges->auto_corr_max_ofdm, val);
312
313 val = data->auto_corr_ofdm_mrc + AUTO_CORR_STEP_OFDM;
314 data->auto_corr_ofdm_mrc =
315 min((u32) ranges->auto_corr_max_ofdm_mrc, val);
316
317 val = data->auto_corr_ofdm_x1 + AUTO_CORR_STEP_OFDM;
318 data->auto_corr_ofdm_x1 =
319 min((u32) ranges->auto_corr_max_ofdm_x1, val);
320
321 val = data->auto_corr_ofdm_mrc_x1 + AUTO_CORR_STEP_OFDM;
322 data->auto_corr_ofdm_mrc_x1 =
323 min((u32) ranges->auto_corr_max_ofdm_mrc_x1, val);
324 }
325
326 /* Else if we got fewer than desired, increase sensitivity */
327 else if (false_alarms < min_false_alarms) {
328
329 D_CALIB("norm FA %u < min FA %u\n", false_alarms,
330 min_false_alarms);
331
332 val = data->auto_corr_ofdm - AUTO_CORR_STEP_OFDM;
333 data->auto_corr_ofdm =
334 max((u32) ranges->auto_corr_min_ofdm, val);
335
336 val = data->auto_corr_ofdm_mrc - AUTO_CORR_STEP_OFDM;
337 data->auto_corr_ofdm_mrc =
338 max((u32) ranges->auto_corr_min_ofdm_mrc, val);
339
340 val = data->auto_corr_ofdm_x1 - AUTO_CORR_STEP_OFDM;
341 data->auto_corr_ofdm_x1 =
342 max((u32) ranges->auto_corr_min_ofdm_x1, val);
343
344 val = data->auto_corr_ofdm_mrc_x1 - AUTO_CORR_STEP_OFDM;
345 data->auto_corr_ofdm_mrc_x1 =
346 max((u32) ranges->auto_corr_min_ofdm_mrc_x1, val);
347 } else {
348 D_CALIB("min FA %u < norm FA %u < max FA %u OK\n",
349 min_false_alarms, false_alarms, max_false_alarms);
350 }
351 return 0;
352}
353
354static void
355il4965_prepare_legacy_sensitivity_tbl(struct il_priv *il,
356 struct il_sensitivity_data *data,
357 __le16 *tbl)
358{
359 tbl[HD_AUTO_CORR32_X4_TH_ADD_MIN_IDX] =
360 cpu_to_le16((u16) data->auto_corr_ofdm);
361 tbl[HD_AUTO_CORR32_X4_TH_ADD_MIN_MRC_IDX] =
362 cpu_to_le16((u16) data->auto_corr_ofdm_mrc);
363 tbl[HD_AUTO_CORR32_X1_TH_ADD_MIN_IDX] =
364 cpu_to_le16((u16) data->auto_corr_ofdm_x1);
365 tbl[HD_AUTO_CORR32_X1_TH_ADD_MIN_MRC_IDX] =
366 cpu_to_le16((u16) data->auto_corr_ofdm_mrc_x1);
367
368 tbl[HD_AUTO_CORR40_X4_TH_ADD_MIN_IDX] =
369 cpu_to_le16((u16) data->auto_corr_cck);
370 tbl[HD_AUTO_CORR40_X4_TH_ADD_MIN_MRC_IDX] =
371 cpu_to_le16((u16) data->auto_corr_cck_mrc);
372
373 tbl[HD_MIN_ENERGY_CCK_DET_IDX] = cpu_to_le16((u16) data->nrg_th_cck);
374 tbl[HD_MIN_ENERGY_OFDM_DET_IDX] = cpu_to_le16((u16) data->nrg_th_ofdm);
375
376 tbl[HD_BARKER_CORR_TH_ADD_MIN_IDX] =
377 cpu_to_le16(data->barker_corr_th_min);
378 tbl[HD_BARKER_CORR_TH_ADD_MIN_MRC_IDX] =
379 cpu_to_le16(data->barker_corr_th_min_mrc);
380 tbl[HD_OFDM_ENERGY_TH_IN_IDX] = cpu_to_le16(data->nrg_th_cca);
381
382 D_CALIB("ofdm: ac %u mrc %u x1 %u mrc_x1 %u thresh %u\n",
383 data->auto_corr_ofdm, data->auto_corr_ofdm_mrc,
384 data->auto_corr_ofdm_x1, data->auto_corr_ofdm_mrc_x1,
385 data->nrg_th_ofdm);
386
387 D_CALIB("cck: ac %u mrc %u thresh %u\n", data->auto_corr_cck,
388 data->auto_corr_cck_mrc, data->nrg_th_cck);
389}
390
391/* Prepare a C_SENSITIVITY, send to uCode if values have changed */
392static int
393il4965_sensitivity_write(struct il_priv *il)
394{
395 struct il_sensitivity_cmd cmd;
396 struct il_sensitivity_data *data = NULL;
397 struct il_host_cmd cmd_out = {
398 .id = C_SENSITIVITY,
399 .len = sizeof(struct il_sensitivity_cmd),
400 .flags = CMD_ASYNC,
401 .data = &cmd,
402 };
403
404 data = &(il->sensitivity_data);
405
406 memset(&cmd, 0, sizeof(cmd));
407
408 il4965_prepare_legacy_sensitivity_tbl(il, data, &cmd.table[0]);
409
410 /* Update uCode's "work" table, and copy it to DSP */
411 cmd.control = C_SENSITIVITY_CONTROL_WORK_TBL;
412
413 /* Don't send command to uCode if nothing has changed */
414 if (!memcmp
415 (&cmd.table[0], &(il->sensitivity_tbl[0]),
416 sizeof(u16) * HD_TBL_SIZE)) {
417 D_CALIB("No change in C_SENSITIVITY\n");
418 return 0;
419 }
420
421 /* Copy table for comparison next time */
422 memcpy(&(il->sensitivity_tbl[0]), &(cmd.table[0]),
423 sizeof(u16) * HD_TBL_SIZE);
424
425 return il_send_cmd(il, &cmd_out);
426}
427
428void
429il4965_init_sensitivity(struct il_priv *il)
430{
431 int ret = 0;
432 int i;
433 struct il_sensitivity_data *data = NULL;
434 const struct il_sensitivity_ranges *ranges = il->hw_params.sens;
435
436 if (il->disable_sens_cal)
437 return;
438
439 D_CALIB("Start il4965_init_sensitivity\n");
440
441 /* Clear driver's sensitivity algo data */
442 data = &(il->sensitivity_data);
443
444 if (ranges == NULL)
445 return;
446
447 memset(data, 0, sizeof(struct il_sensitivity_data));
448
449 data->num_in_cck_no_fa = 0;
450 data->nrg_curr_state = IL_FA_TOO_MANY;
451 data->nrg_prev_state = IL_FA_TOO_MANY;
452 data->nrg_silence_ref = 0;
453 data->nrg_silence_idx = 0;
454 data->nrg_energy_idx = 0;
455
456 for (i = 0; i < 10; i++)
457 data->nrg_value[i] = 0;
458
459 for (i = 0; i < NRG_NUM_PREV_STAT_L; i++)
460 data->nrg_silence_rssi[i] = 0;
461
462 data->auto_corr_ofdm = ranges->auto_corr_min_ofdm;
463 data->auto_corr_ofdm_mrc = ranges->auto_corr_min_ofdm_mrc;
464 data->auto_corr_ofdm_x1 = ranges->auto_corr_min_ofdm_x1;
465 data->auto_corr_ofdm_mrc_x1 = ranges->auto_corr_min_ofdm_mrc_x1;
466 data->auto_corr_cck = AUTO_CORR_CCK_MIN_VAL_DEF;
467 data->auto_corr_cck_mrc = ranges->auto_corr_min_cck_mrc;
468 data->nrg_th_cck = ranges->nrg_th_cck;
469 data->nrg_th_ofdm = ranges->nrg_th_ofdm;
470 data->barker_corr_th_min = ranges->barker_corr_th_min;
471 data->barker_corr_th_min_mrc = ranges->barker_corr_th_min_mrc;
472 data->nrg_th_cca = ranges->nrg_th_cca;
473
474 data->last_bad_plcp_cnt_ofdm = 0;
475 data->last_fa_cnt_ofdm = 0;
476 data->last_bad_plcp_cnt_cck = 0;
477 data->last_fa_cnt_cck = 0;
478
479 ret |= il4965_sensitivity_write(il);
480 D_CALIB("<<return 0x%X\n", ret);
481}
482
483void
484il4965_sensitivity_calibration(struct il_priv *il, void *resp)
485{
486 u32 rx_enable_time;
487 u32 fa_cck;
488 u32 fa_ofdm;
489 u32 bad_plcp_cck;
490 u32 bad_plcp_ofdm;
491 u32 norm_fa_ofdm;
492 u32 norm_fa_cck;
493 struct il_sensitivity_data *data = NULL;
494 struct stats_rx_non_phy *rx_info;
495 struct stats_rx_phy *ofdm, *cck;
496 unsigned long flags;
497 struct stats_general_data statis;
498
499 if (il->disable_sens_cal)
500 return;
501
502 data = &(il->sensitivity_data);
503
504 if (!il_is_any_associated(il)) {
505 D_CALIB("<< - not associated\n");
506 return;
507 }
508
509 spin_lock_irqsave(&il->lock, flags);
510
511 rx_info = &(((struct il_notif_stats *)resp)->rx.general);
512 ofdm = &(((struct il_notif_stats *)resp)->rx.ofdm);
513 cck = &(((struct il_notif_stats *)resp)->rx.cck);
514
515 if (rx_info->interference_data_flag != INTERFERENCE_DATA_AVAILABLE) {
516 D_CALIB("<< invalid data.\n");
517 spin_unlock_irqrestore(&il->lock, flags);
518 return;
519 }
520
521 /* Extract Statistics: */
522 rx_enable_time = le32_to_cpu(rx_info->channel_load);
523 fa_cck = le32_to_cpu(cck->false_alarm_cnt);
524 fa_ofdm = le32_to_cpu(ofdm->false_alarm_cnt);
525 bad_plcp_cck = le32_to_cpu(cck->plcp_err);
526 bad_plcp_ofdm = le32_to_cpu(ofdm->plcp_err);
527
528 statis.beacon_silence_rssi_a =
529 le32_to_cpu(rx_info->beacon_silence_rssi_a);
530 statis.beacon_silence_rssi_b =
531 le32_to_cpu(rx_info->beacon_silence_rssi_b);
532 statis.beacon_silence_rssi_c =
533 le32_to_cpu(rx_info->beacon_silence_rssi_c);
534 statis.beacon_energy_a = le32_to_cpu(rx_info->beacon_energy_a);
535 statis.beacon_energy_b = le32_to_cpu(rx_info->beacon_energy_b);
536 statis.beacon_energy_c = le32_to_cpu(rx_info->beacon_energy_c);
537
538 spin_unlock_irqrestore(&il->lock, flags);
539
540 D_CALIB("rx_enable_time = %u usecs\n", rx_enable_time);
541
542 if (!rx_enable_time) {
543 D_CALIB("<< RX Enable Time == 0!\n");
544 return;
545 }
546
547 /* These stats increase monotonically, and do not reset
548 * at each beacon. Calculate difference from last value, or just
549 * use the new stats value if it has reset or wrapped around. */
550 if (data->last_bad_plcp_cnt_cck > bad_plcp_cck)
551 data->last_bad_plcp_cnt_cck = bad_plcp_cck;
552 else {
553 bad_plcp_cck -= data->last_bad_plcp_cnt_cck;
554 data->last_bad_plcp_cnt_cck += bad_plcp_cck;
555 }
556
557 if (data->last_bad_plcp_cnt_ofdm > bad_plcp_ofdm)
558 data->last_bad_plcp_cnt_ofdm = bad_plcp_ofdm;
559 else {
560 bad_plcp_ofdm -= data->last_bad_plcp_cnt_ofdm;
561 data->last_bad_plcp_cnt_ofdm += bad_plcp_ofdm;
562 }
563
564 if (data->last_fa_cnt_ofdm > fa_ofdm)
565 data->last_fa_cnt_ofdm = fa_ofdm;
566 else {
567 fa_ofdm -= data->last_fa_cnt_ofdm;
568 data->last_fa_cnt_ofdm += fa_ofdm;
569 }
570
571 if (data->last_fa_cnt_cck > fa_cck)
572 data->last_fa_cnt_cck = fa_cck;
573 else {
574 fa_cck -= data->last_fa_cnt_cck;
575 data->last_fa_cnt_cck += fa_cck;
576 }
577
578 /* Total aborted signal locks */
579 norm_fa_ofdm = fa_ofdm + bad_plcp_ofdm;
580 norm_fa_cck = fa_cck + bad_plcp_cck;
581
582 D_CALIB("cck: fa %u badp %u ofdm: fa %u badp %u\n", fa_cck,
583 bad_plcp_cck, fa_ofdm, bad_plcp_ofdm);
584
585 il4965_sens_auto_corr_ofdm(il, norm_fa_ofdm, rx_enable_time);
586 il4965_sens_energy_cck(il, norm_fa_cck, rx_enable_time, &statis);
587
588 il4965_sensitivity_write(il);
589}
590
591static inline u8
592il4965_find_first_chain(u8 mask)
593{
594 if (mask & ANT_A)
595 return CHAIN_A;
596 if (mask & ANT_B)
597 return CHAIN_B;
598 return CHAIN_C;
599}
600
601/**
602 * Run disconnected antenna algorithm to find out which antennas are
603 * disconnected.
604 */
605static void
606il4965_find_disconn_antenna(struct il_priv *il, u32 * average_sig,
607 struct il_chain_noise_data *data)
608{
609 u32 active_chains = 0;
610 u32 max_average_sig;
611 u16 max_average_sig_antenna_i;
612 u8 num_tx_chains;
613 u8 first_chain;
614 u16 i = 0;
615
616 average_sig[0] =
617 data->chain_signal_a /
618 il->cfg->chain_noise_num_beacons;
619 average_sig[1] =
620 data->chain_signal_b /
621 il->cfg->chain_noise_num_beacons;
622 average_sig[2] =
623 data->chain_signal_c /
624 il->cfg->chain_noise_num_beacons;
625
626 if (average_sig[0] >= average_sig[1]) {
627 max_average_sig = average_sig[0];
628 max_average_sig_antenna_i = 0;
629 active_chains = (1 << max_average_sig_antenna_i);
630 } else {
631 max_average_sig = average_sig[1];
632 max_average_sig_antenna_i = 1;
633 active_chains = (1 << max_average_sig_antenna_i);
634 }
635
636 if (average_sig[2] >= max_average_sig) {
637 max_average_sig = average_sig[2];
638 max_average_sig_antenna_i = 2;
639 active_chains = (1 << max_average_sig_antenna_i);
640 }
641
642 D_CALIB("average_sig: a %d b %d c %d\n", average_sig[0], average_sig[1],
643 average_sig[2]);
644 D_CALIB("max_average_sig = %d, antenna %d\n", max_average_sig,
645 max_average_sig_antenna_i);
646
647 /* Compare signal strengths for all 3 receivers. */
648 for (i = 0; i < NUM_RX_CHAINS; i++) {
649 if (i != max_average_sig_antenna_i) {
650 s32 rssi_delta = (max_average_sig - average_sig[i]);
651
652 /* If signal is very weak, compared with
653 * strongest, mark it as disconnected. */
654 if (rssi_delta > MAXIMUM_ALLOWED_PATHLOSS)
655 data->disconn_array[i] = 1;
656 else
657 active_chains |= (1 << i);
658 D_CALIB("i = %d rssiDelta = %d "
659 "disconn_array[i] = %d\n", i, rssi_delta,
660 data->disconn_array[i]);
661 }
662 }
663
664 /*
665 * The above algorithm sometimes fails when the ucode
666 * reports 0 for all chains. It's not clear why that
667 * happens to start with, but it is then causing trouble
668 * because this can make us enable more chains than the
669 * hardware really has.
670 *
671 * To be safe, simply mask out any chains that we know
672 * are not on the device.
673 */
674 active_chains &= il->hw_params.valid_rx_ant;
675
676 num_tx_chains = 0;
677 for (i = 0; i < NUM_RX_CHAINS; i++) {
678 /* loops on all the bits of
679 * il->hw_setting.valid_tx_ant */
680 u8 ant_msk = (1 << i);
681 if (!(il->hw_params.valid_tx_ant & ant_msk))
682 continue;
683
684 num_tx_chains++;
685 if (data->disconn_array[i] == 0)
686 /* there is a Tx antenna connected */
687 break;
688 if (num_tx_chains == il->hw_params.tx_chains_num &&
689 data->disconn_array[i]) {
690 /*
691 * If all chains are disconnected
692 * connect the first valid tx chain
693 */
694 first_chain =
695 il4965_find_first_chain(il->cfg->valid_tx_ant);
696 data->disconn_array[first_chain] = 0;
697 active_chains |= BIT(first_chain);
698 D_CALIB("All Tx chains are disconnected"
699 "- declare %d as connected\n", first_chain);
700 break;
701 }
702 }
703
704 if (active_chains != il->hw_params.valid_rx_ant &&
705 active_chains != il->chain_noise_data.active_chains)
706 D_CALIB("Detected that not all antennas are connected! "
707 "Connected: %#x, valid: %#x.\n", active_chains,
708 il->hw_params.valid_rx_ant);
709
710 /* Save for use within RXON, TX, SCAN commands, etc. */
711 data->active_chains = active_chains;
712 D_CALIB("active_chains (bitwise) = 0x%x\n", active_chains);
713}
714
715static void
716il4965_gain_computation(struct il_priv *il, u32 * average_noise,
717 u16 min_average_noise_antenna_i, u32 min_average_noise,
718 u8 default_chain)
719{
720 int i, ret;
721 struct il_chain_noise_data *data = &il->chain_noise_data;
722
723 data->delta_gain_code[min_average_noise_antenna_i] = 0;
724
725 for (i = default_chain; i < NUM_RX_CHAINS; i++) {
726 s32 delta_g = 0;
727
728 if (!data->disconn_array[i] &&
729 data->delta_gain_code[i] ==
730 CHAIN_NOISE_DELTA_GAIN_INIT_VAL) {
731 delta_g = average_noise[i] - min_average_noise;
732 data->delta_gain_code[i] = (u8) ((delta_g * 10) / 15);
733 data->delta_gain_code[i] =
734 min(data->delta_gain_code[i],
735 (u8) CHAIN_NOISE_MAX_DELTA_GAIN_CODE);
736
737 data->delta_gain_code[i] =
738 (data->delta_gain_code[i] | (1 << 2));
739 } else {
740 data->delta_gain_code[i] = 0;
741 }
742 }
743 D_CALIB("delta_gain_codes: a %d b %d c %d\n", data->delta_gain_code[0],
744 data->delta_gain_code[1], data->delta_gain_code[2]);
745
746 /* Differential gain gets sent to uCode only once */
747 if (!data->radio_write) {
748 struct il_calib_diff_gain_cmd cmd;
749 data->radio_write = 1;
750
751 memset(&cmd, 0, sizeof(cmd));
752 cmd.hdr.op_code = IL_PHY_CALIBRATE_DIFF_GAIN_CMD;
753 cmd.diff_gain_a = data->delta_gain_code[0];
754 cmd.diff_gain_b = data->delta_gain_code[1];
755 cmd.diff_gain_c = data->delta_gain_code[2];
756 ret = il_send_cmd_pdu(il, C_PHY_CALIBRATION, sizeof(cmd), &cmd);
757 if (ret)
758 D_CALIB("fail sending cmd " "C_PHY_CALIBRATION\n");
759
760 /* TODO we might want recalculate
761 * rx_chain in rxon cmd */
762
763 /* Mark so we run this algo only once! */
764 data->state = IL_CHAIN_NOISE_CALIBRATED;
765 }
766}
767
768/*
769 * Accumulate 16 beacons of signal and noise stats for each of
770 * 3 receivers/antennas/rx-chains, then figure out:
771 * 1) Which antennas are connected.
772 * 2) Differential rx gain settings to balance the 3 receivers.
773 */
774void
775il4965_chain_noise_calibration(struct il_priv *il, void *stat_resp)
776{
777 struct il_chain_noise_data *data = NULL;
778
779 u32 chain_noise_a;
780 u32 chain_noise_b;
781 u32 chain_noise_c;
782 u32 chain_sig_a;
783 u32 chain_sig_b;
784 u32 chain_sig_c;
785 u32 average_sig[NUM_RX_CHAINS] = { INITIALIZATION_VALUE };
786 u32 average_noise[NUM_RX_CHAINS] = { INITIALIZATION_VALUE };
787 u32 min_average_noise = MIN_AVERAGE_NOISE_MAX_VALUE;
788 u16 min_average_noise_antenna_i = INITIALIZATION_VALUE;
789 u16 i = 0;
790 u16 rxon_chnum = INITIALIZATION_VALUE;
791 u16 stat_chnum = INITIALIZATION_VALUE;
792 u8 rxon_band24;
793 u8 stat_band24;
794 unsigned long flags;
795 struct stats_rx_non_phy *rx_info;
796
797 if (il->disable_chain_noise_cal)
798 return;
799
800 data = &(il->chain_noise_data);
801
802 /*
803 * Accumulate just the first "chain_noise_num_beacons" after
804 * the first association, then we're done forever.
805 */
806 if (data->state != IL_CHAIN_NOISE_ACCUMULATE) {
807 if (data->state == IL_CHAIN_NOISE_ALIVE)
808 D_CALIB("Wait for noise calib reset\n");
809 return;
810 }
811
812 spin_lock_irqsave(&il->lock, flags);
813
814 rx_info = &(((struct il_notif_stats *)stat_resp)->rx.general);
815
816 if (rx_info->interference_data_flag != INTERFERENCE_DATA_AVAILABLE) {
817 D_CALIB(" << Interference data unavailable\n");
818 spin_unlock_irqrestore(&il->lock, flags);
819 return;
820 }
821
822 rxon_band24 = !!(il->staging.flags & RXON_FLG_BAND_24G_MSK);
823 rxon_chnum = le16_to_cpu(il->staging.channel);
824
825 stat_band24 =
826 !!(((struct il_notif_stats *)stat_resp)->
827 flag & STATS_REPLY_FLG_BAND_24G_MSK);
828 stat_chnum =
829 le32_to_cpu(((struct il_notif_stats *)stat_resp)->flag) >> 16;
830
831 /* Make sure we accumulate data for just the associated channel
832 * (even if scanning). */
833 if (rxon_chnum != stat_chnum || rxon_band24 != stat_band24) {
834 D_CALIB("Stats not from chan=%d, band24=%d\n", rxon_chnum,
835 rxon_band24);
836 spin_unlock_irqrestore(&il->lock, flags);
837 return;
838 }
839
840 /*
841 * Accumulate beacon stats values across
842 * "chain_noise_num_beacons"
843 */
844 chain_noise_a =
845 le32_to_cpu(rx_info->beacon_silence_rssi_a) & IN_BAND_FILTER;
846 chain_noise_b =
847 le32_to_cpu(rx_info->beacon_silence_rssi_b) & IN_BAND_FILTER;
848 chain_noise_c =
849 le32_to_cpu(rx_info->beacon_silence_rssi_c) & IN_BAND_FILTER;
850
851 chain_sig_a = le32_to_cpu(rx_info->beacon_rssi_a) & IN_BAND_FILTER;
852 chain_sig_b = le32_to_cpu(rx_info->beacon_rssi_b) & IN_BAND_FILTER;
853 chain_sig_c = le32_to_cpu(rx_info->beacon_rssi_c) & IN_BAND_FILTER;
854
855 spin_unlock_irqrestore(&il->lock, flags);
856
857 data->beacon_count++;
858
859 data->chain_noise_a = (chain_noise_a + data->chain_noise_a);
860 data->chain_noise_b = (chain_noise_b + data->chain_noise_b);
861 data->chain_noise_c = (chain_noise_c + data->chain_noise_c);
862
863 data->chain_signal_a = (chain_sig_a + data->chain_signal_a);
864 data->chain_signal_b = (chain_sig_b + data->chain_signal_b);
865 data->chain_signal_c = (chain_sig_c + data->chain_signal_c);
866
867 D_CALIB("chan=%d, band24=%d, beacon=%d\n", rxon_chnum, rxon_band24,
868 data->beacon_count);
869 D_CALIB("chain_sig: a %d b %d c %d\n", chain_sig_a, chain_sig_b,
870 chain_sig_c);
871 D_CALIB("chain_noise: a %d b %d c %d\n", chain_noise_a, chain_noise_b,
872 chain_noise_c);
873
874 /* If this is the "chain_noise_num_beacons", determine:
875 * 1) Disconnected antennas (using signal strengths)
876 * 2) Differential gain (using silence noise) to balance receivers */
877 if (data->beacon_count != il->cfg->chain_noise_num_beacons)
878 return;
879
880 /* Analyze signal for disconnected antenna */
881 il4965_find_disconn_antenna(il, average_sig, data);
882
883 /* Analyze noise for rx balance */
884 average_noise[0] =
885 data->chain_noise_a / il->cfg->chain_noise_num_beacons;
886 average_noise[1] =
887 data->chain_noise_b / il->cfg->chain_noise_num_beacons;
888 average_noise[2] =
889 data->chain_noise_c / il->cfg->chain_noise_num_beacons;
890
891 for (i = 0; i < NUM_RX_CHAINS; i++) {
892 if (!data->disconn_array[i] &&
893 average_noise[i] <= min_average_noise) {
894 /* This means that chain i is active and has
895 * lower noise values so far: */
896 min_average_noise = average_noise[i];
897 min_average_noise_antenna_i = i;
898 }
899 }
900
901 D_CALIB("average_noise: a %d b %d c %d\n", average_noise[0],
902 average_noise[1], average_noise[2]);
903
904 D_CALIB("min_average_noise = %d, antenna %d\n", min_average_noise,
905 min_average_noise_antenna_i);
906
907 il4965_gain_computation(il, average_noise, min_average_noise_antenna_i,
908 min_average_noise,
909 il4965_find_first_chain(il->cfg->valid_rx_ant));
910
911 /* Some power changes may have been made during the calibration.
912 * Update and commit the RXON
913 */
914 if (il->ops->update_chain_flags)
915 il->ops->update_chain_flags(il);
916
917 data->state = IL_CHAIN_NOISE_DONE;
918 il_power_update_mode(il, false);
919}
920
921void
922il4965_reset_run_time_calib(struct il_priv *il)
923{
924 int i;
925 memset(&(il->sensitivity_data), 0, sizeof(struct il_sensitivity_data));
926 memset(&(il->chain_noise_data), 0, sizeof(struct il_chain_noise_data));
927 for (i = 0; i < NUM_RX_CHAINS; i++)
928 il->chain_noise_data.delta_gain_code[i] =
929 CHAIN_NOISE_DELTA_GAIN_INIT_VAL;
930
931 /* Ask for stats now, the uCode will send notification
932 * periodically after association */
933 il_send_stats_request(il, CMD_ASYNC, true);
934}
diff --git a/drivers/net/wireless/intel/iwlegacy/4965-debug.c b/drivers/net/wireless/intel/iwlegacy/4965-debug.c
new file mode 100644
index 000000000000..e0597bfdddb8
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlegacy/4965-debug.c
@@ -0,0 +1,752 @@
1/******************************************************************************
2*
3* GPL LICENSE SUMMARY
4*
5* Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
6*
7* This program is free software; you can redistribute it and/or modify
8* it under the terms of version 2 of the GNU General Public License as
9* published by the Free Software Foundation.
10*
11* This program is distributed in the hope that it will be useful, but
12* WITHOUT ANY WARRANTY; without even the implied warranty of
13* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14* General Public License for more details.
15*
16* You should have received a copy of the GNU General Public License
17* along with this program; if not, write to the Free Software
18* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
19* USA
20*
21* The full GNU General Public License is included in this distribution
22* in the file called LICENSE.GPL.
23*
24* Contact Information:
25* Intel Linux Wireless <ilw@linux.intel.com>
26* Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
27*****************************************************************************/
28#include "common.h"
29#include "4965.h"
30
31static const char *fmt_value = " %-30s %10u\n";
32static const char *fmt_table = " %-30s %10u %10u %10u %10u\n";
33static const char *fmt_header =
34 "%-32s current cumulative delta max\n";
35
36static int
37il4965_stats_flag(struct il_priv *il, char *buf, int bufsz)
38{
39 int p = 0;
40 u32 flag;
41
42 flag = le32_to_cpu(il->_4965.stats.flag);
43
44 p += scnprintf(buf + p, bufsz - p, "Statistics Flag(0x%X):\n", flag);
45 if (flag & UCODE_STATS_CLEAR_MSK)
46 p += scnprintf(buf + p, bufsz - p,
47 "\tStatistics have been cleared\n");
48 p += scnprintf(buf + p, bufsz - p, "\tOperational Frequency: %s\n",
49 (flag & UCODE_STATS_FREQUENCY_MSK) ? "2.4 GHz" :
50 "5.2 GHz");
51 p += scnprintf(buf + p, bufsz - p, "\tTGj Narrow Band: %s\n",
52 (flag & UCODE_STATS_NARROW_BAND_MSK) ? "enabled" :
53 "disabled");
54
55 return p;
56}
57
58static ssize_t
59il4965_ucode_rx_stats_read(struct file *file, char __user *user_buf,
60 size_t count, loff_t *ppos)
61{
62 struct il_priv *il = file->private_data;
63 int pos = 0;
64 char *buf;
65 int bufsz =
66 sizeof(struct stats_rx_phy) * 40 +
67 sizeof(struct stats_rx_non_phy) * 40 +
68 sizeof(struct stats_rx_ht_phy) * 40 + 400;
69 ssize_t ret;
70 struct stats_rx_phy *ofdm, *accum_ofdm, *delta_ofdm, *max_ofdm;
71 struct stats_rx_phy *cck, *accum_cck, *delta_cck, *max_cck;
72 struct stats_rx_non_phy *general, *accum_general;
73 struct stats_rx_non_phy *delta_general, *max_general;
74 struct stats_rx_ht_phy *ht, *accum_ht, *delta_ht, *max_ht;
75
76 if (!il_is_alive(il))
77 return -EAGAIN;
78
79 buf = kzalloc(bufsz, GFP_KERNEL);
80 if (!buf) {
81 IL_ERR("Can not allocate Buffer\n");
82 return -ENOMEM;
83 }
84
85 /*
86 * the statistic information display here is based on
87 * the last stats notification from uCode
88 * might not reflect the current uCode activity
89 */
90 ofdm = &il->_4965.stats.rx.ofdm;
91 cck = &il->_4965.stats.rx.cck;
92 general = &il->_4965.stats.rx.general;
93 ht = &il->_4965.stats.rx.ofdm_ht;
94 accum_ofdm = &il->_4965.accum_stats.rx.ofdm;
95 accum_cck = &il->_4965.accum_stats.rx.cck;
96 accum_general = &il->_4965.accum_stats.rx.general;
97 accum_ht = &il->_4965.accum_stats.rx.ofdm_ht;
98 delta_ofdm = &il->_4965.delta_stats.rx.ofdm;
99 delta_cck = &il->_4965.delta_stats.rx.cck;
100 delta_general = &il->_4965.delta_stats.rx.general;
101 delta_ht = &il->_4965.delta_stats.rx.ofdm_ht;
102 max_ofdm = &il->_4965.max_delta.rx.ofdm;
103 max_cck = &il->_4965.max_delta.rx.cck;
104 max_general = &il->_4965.max_delta.rx.general;
105 max_ht = &il->_4965.max_delta.rx.ofdm_ht;
106
107 pos += il4965_stats_flag(il, buf, bufsz);
108 pos +=
109 scnprintf(buf + pos, bufsz - pos, fmt_header,
110 "Statistics_Rx - OFDM:");
111 pos +=
112 scnprintf(buf + pos, bufsz - pos, fmt_table, "ina_cnt:",
113 le32_to_cpu(ofdm->ina_cnt), accum_ofdm->ina_cnt,
114 delta_ofdm->ina_cnt, max_ofdm->ina_cnt);
115 pos +=
116 scnprintf(buf + pos, bufsz - pos, fmt_table, "fina_cnt:",
117 le32_to_cpu(ofdm->fina_cnt), accum_ofdm->fina_cnt,
118 delta_ofdm->fina_cnt, max_ofdm->fina_cnt);
119 pos +=
120 scnprintf(buf + pos, bufsz - pos, fmt_table, "plcp_err:",
121 le32_to_cpu(ofdm->plcp_err), accum_ofdm->plcp_err,
122 delta_ofdm->plcp_err, max_ofdm->plcp_err);
123 pos +=
124 scnprintf(buf + pos, bufsz - pos, fmt_table, "crc32_err:",
125 le32_to_cpu(ofdm->crc32_err), accum_ofdm->crc32_err,
126 delta_ofdm->crc32_err, max_ofdm->crc32_err);
127 pos +=
128 scnprintf(buf + pos, bufsz - pos, fmt_table, "overrun_err:",
129 le32_to_cpu(ofdm->overrun_err), accum_ofdm->overrun_err,
130 delta_ofdm->overrun_err, max_ofdm->overrun_err);
131 pos +=
132 scnprintf(buf + pos, bufsz - pos, fmt_table, "early_overrun_err:",
133 le32_to_cpu(ofdm->early_overrun_err),
134 accum_ofdm->early_overrun_err,
135 delta_ofdm->early_overrun_err,
136 max_ofdm->early_overrun_err);
137 pos +=
138 scnprintf(buf + pos, bufsz - pos, fmt_table, "crc32_good:",
139 le32_to_cpu(ofdm->crc32_good), accum_ofdm->crc32_good,
140 delta_ofdm->crc32_good, max_ofdm->crc32_good);
141 pos +=
142 scnprintf(buf + pos, bufsz - pos, fmt_table, "false_alarm_cnt:",
143 le32_to_cpu(ofdm->false_alarm_cnt),
144 accum_ofdm->false_alarm_cnt, delta_ofdm->false_alarm_cnt,
145 max_ofdm->false_alarm_cnt);
146 pos +=
147 scnprintf(buf + pos, bufsz - pos, fmt_table, "fina_sync_err_cnt:",
148 le32_to_cpu(ofdm->fina_sync_err_cnt),
149 accum_ofdm->fina_sync_err_cnt,
150 delta_ofdm->fina_sync_err_cnt,
151 max_ofdm->fina_sync_err_cnt);
152 pos +=
153 scnprintf(buf + pos, bufsz - pos, fmt_table, "sfd_timeout:",
154 le32_to_cpu(ofdm->sfd_timeout), accum_ofdm->sfd_timeout,
155 delta_ofdm->sfd_timeout, max_ofdm->sfd_timeout);
156 pos +=
157 scnprintf(buf + pos, bufsz - pos, fmt_table, "fina_timeout:",
158 le32_to_cpu(ofdm->fina_timeout), accum_ofdm->fina_timeout,
159 delta_ofdm->fina_timeout, max_ofdm->fina_timeout);
160 pos +=
161 scnprintf(buf + pos, bufsz - pos, fmt_table, "unresponded_rts:",
162 le32_to_cpu(ofdm->unresponded_rts),
163 accum_ofdm->unresponded_rts, delta_ofdm->unresponded_rts,
164 max_ofdm->unresponded_rts);
165 pos +=
166 scnprintf(buf + pos, bufsz - pos, fmt_table, "rxe_frame_lmt_ovrun:",
167 le32_to_cpu(ofdm->rxe_frame_limit_overrun),
168 accum_ofdm->rxe_frame_limit_overrun,
169 delta_ofdm->rxe_frame_limit_overrun,
170 max_ofdm->rxe_frame_limit_overrun);
171 pos +=
172 scnprintf(buf + pos, bufsz - pos, fmt_table, "sent_ack_cnt:",
173 le32_to_cpu(ofdm->sent_ack_cnt), accum_ofdm->sent_ack_cnt,
174 delta_ofdm->sent_ack_cnt, max_ofdm->sent_ack_cnt);
175 pos +=
176 scnprintf(buf + pos, bufsz - pos, fmt_table, "sent_cts_cnt:",
177 le32_to_cpu(ofdm->sent_cts_cnt), accum_ofdm->sent_cts_cnt,
178 delta_ofdm->sent_cts_cnt, max_ofdm->sent_cts_cnt);
179 pos +=
180 scnprintf(buf + pos, bufsz - pos, fmt_table, "sent_ba_rsp_cnt:",
181 le32_to_cpu(ofdm->sent_ba_rsp_cnt),
182 accum_ofdm->sent_ba_rsp_cnt, delta_ofdm->sent_ba_rsp_cnt,
183 max_ofdm->sent_ba_rsp_cnt);
184 pos +=
185 scnprintf(buf + pos, bufsz - pos, fmt_table, "dsp_self_kill:",
186 le32_to_cpu(ofdm->dsp_self_kill),
187 accum_ofdm->dsp_self_kill, delta_ofdm->dsp_self_kill,
188 max_ofdm->dsp_self_kill);
189 pos +=
190 scnprintf(buf + pos, bufsz - pos, fmt_table, "mh_format_err:",
191 le32_to_cpu(ofdm->mh_format_err),
192 accum_ofdm->mh_format_err, delta_ofdm->mh_format_err,
193 max_ofdm->mh_format_err);
194 pos +=
195 scnprintf(buf + pos, bufsz - pos, fmt_table,
196 "re_acq_main_rssi_sum:",
197 le32_to_cpu(ofdm->re_acq_main_rssi_sum),
198 accum_ofdm->re_acq_main_rssi_sum,
199 delta_ofdm->re_acq_main_rssi_sum,
200 max_ofdm->re_acq_main_rssi_sum);
201
202 pos +=
203 scnprintf(buf + pos, bufsz - pos, fmt_header,
204 "Statistics_Rx - CCK:");
205 pos +=
206 scnprintf(buf + pos, bufsz - pos, fmt_table, "ina_cnt:",
207 le32_to_cpu(cck->ina_cnt), accum_cck->ina_cnt,
208 delta_cck->ina_cnt, max_cck->ina_cnt);
209 pos +=
210 scnprintf(buf + pos, bufsz - pos, fmt_table, "fina_cnt:",
211 le32_to_cpu(cck->fina_cnt), accum_cck->fina_cnt,
212 delta_cck->fina_cnt, max_cck->fina_cnt);
213 pos +=
214 scnprintf(buf + pos, bufsz - pos, fmt_table, "plcp_err:",
215 le32_to_cpu(cck->plcp_err), accum_cck->plcp_err,
216 delta_cck->plcp_err, max_cck->plcp_err);
217 pos +=
218 scnprintf(buf + pos, bufsz - pos, fmt_table, "crc32_err:",
219 le32_to_cpu(cck->crc32_err), accum_cck->crc32_err,
220 delta_cck->crc32_err, max_cck->crc32_err);
221 pos +=
222 scnprintf(buf + pos, bufsz - pos, fmt_table, "overrun_err:",
223 le32_to_cpu(cck->overrun_err), accum_cck->overrun_err,
224 delta_cck->overrun_err, max_cck->overrun_err);
225 pos +=
226 scnprintf(buf + pos, bufsz - pos, fmt_table, "early_overrun_err:",
227 le32_to_cpu(cck->early_overrun_err),
228 accum_cck->early_overrun_err,
229 delta_cck->early_overrun_err, max_cck->early_overrun_err);
230 pos +=
231 scnprintf(buf + pos, bufsz - pos, fmt_table, "crc32_good:",
232 le32_to_cpu(cck->crc32_good), accum_cck->crc32_good,
233 delta_cck->crc32_good, max_cck->crc32_good);
234 pos +=
235 scnprintf(buf + pos, bufsz - pos, fmt_table, "false_alarm_cnt:",
236 le32_to_cpu(cck->false_alarm_cnt),
237 accum_cck->false_alarm_cnt, delta_cck->false_alarm_cnt,
238 max_cck->false_alarm_cnt);
239 pos +=
240 scnprintf(buf + pos, bufsz - pos, fmt_table, "fina_sync_err_cnt:",
241 le32_to_cpu(cck->fina_sync_err_cnt),
242 accum_cck->fina_sync_err_cnt,
243 delta_cck->fina_sync_err_cnt, max_cck->fina_sync_err_cnt);
244 pos +=
245 scnprintf(buf + pos, bufsz - pos, fmt_table, "sfd_timeout:",
246 le32_to_cpu(cck->sfd_timeout), accum_cck->sfd_timeout,
247 delta_cck->sfd_timeout, max_cck->sfd_timeout);
248 pos +=
249 scnprintf(buf + pos, bufsz - pos, fmt_table, "fina_timeout:",
250 le32_to_cpu(cck->fina_timeout), accum_cck->fina_timeout,
251 delta_cck->fina_timeout, max_cck->fina_timeout);
252 pos +=
253 scnprintf(buf + pos, bufsz - pos, fmt_table, "unresponded_rts:",
254 le32_to_cpu(cck->unresponded_rts),
255 accum_cck->unresponded_rts, delta_cck->unresponded_rts,
256 max_cck->unresponded_rts);
257 pos +=
258 scnprintf(buf + pos, bufsz - pos, fmt_table, "rxe_frame_lmt_ovrun:",
259 le32_to_cpu(cck->rxe_frame_limit_overrun),
260 accum_cck->rxe_frame_limit_overrun,
261 delta_cck->rxe_frame_limit_overrun,
262 max_cck->rxe_frame_limit_overrun);
263 pos +=
264 scnprintf(buf + pos, bufsz - pos, fmt_table, "sent_ack_cnt:",
265 le32_to_cpu(cck->sent_ack_cnt), accum_cck->sent_ack_cnt,
266 delta_cck->sent_ack_cnt, max_cck->sent_ack_cnt);
267 pos +=
268 scnprintf(buf + pos, bufsz - pos, fmt_table, "sent_cts_cnt:",
269 le32_to_cpu(cck->sent_cts_cnt), accum_cck->sent_cts_cnt,
270 delta_cck->sent_cts_cnt, max_cck->sent_cts_cnt);
271 pos +=
272 scnprintf(buf + pos, bufsz - pos, fmt_table, "sent_ba_rsp_cnt:",
273 le32_to_cpu(cck->sent_ba_rsp_cnt),
274 accum_cck->sent_ba_rsp_cnt, delta_cck->sent_ba_rsp_cnt,
275 max_cck->sent_ba_rsp_cnt);
276 pos +=
277 scnprintf(buf + pos, bufsz - pos, fmt_table, "dsp_self_kill:",
278 le32_to_cpu(cck->dsp_self_kill), accum_cck->dsp_self_kill,
279 delta_cck->dsp_self_kill, max_cck->dsp_self_kill);
280 pos +=
281 scnprintf(buf + pos, bufsz - pos, fmt_table, "mh_format_err:",
282 le32_to_cpu(cck->mh_format_err), accum_cck->mh_format_err,
283 delta_cck->mh_format_err, max_cck->mh_format_err);
284 pos +=
285 scnprintf(buf + pos, bufsz - pos, fmt_table,
286 "re_acq_main_rssi_sum:",
287 le32_to_cpu(cck->re_acq_main_rssi_sum),
288 accum_cck->re_acq_main_rssi_sum,
289 delta_cck->re_acq_main_rssi_sum,
290 max_cck->re_acq_main_rssi_sum);
291
292 pos +=
293 scnprintf(buf + pos, bufsz - pos, fmt_header,
294 "Statistics_Rx - GENERAL:");
295 pos +=
296 scnprintf(buf + pos, bufsz - pos, fmt_table, "bogus_cts:",
297 le32_to_cpu(general->bogus_cts), accum_general->bogus_cts,
298 delta_general->bogus_cts, max_general->bogus_cts);
299 pos +=
300 scnprintf(buf + pos, bufsz - pos, fmt_table, "bogus_ack:",
301 le32_to_cpu(general->bogus_ack), accum_general->bogus_ack,
302 delta_general->bogus_ack, max_general->bogus_ack);
303 pos +=
304 scnprintf(buf + pos, bufsz - pos, fmt_table, "non_bssid_frames:",
305 le32_to_cpu(general->non_bssid_frames),
306 accum_general->non_bssid_frames,
307 delta_general->non_bssid_frames,
308 max_general->non_bssid_frames);
309 pos +=
310 scnprintf(buf + pos, bufsz - pos, fmt_table, "filtered_frames:",
311 le32_to_cpu(general->filtered_frames),
312 accum_general->filtered_frames,
313 delta_general->filtered_frames,
314 max_general->filtered_frames);
315 pos +=
316 scnprintf(buf + pos, bufsz - pos, fmt_table, "non_channel_beacons:",
317 le32_to_cpu(general->non_channel_beacons),
318 accum_general->non_channel_beacons,
319 delta_general->non_channel_beacons,
320 max_general->non_channel_beacons);
321 pos +=
322 scnprintf(buf + pos, bufsz - pos, fmt_table, "channel_beacons:",
323 le32_to_cpu(general->channel_beacons),
324 accum_general->channel_beacons,
325 delta_general->channel_beacons,
326 max_general->channel_beacons);
327 pos +=
328 scnprintf(buf + pos, bufsz - pos, fmt_table, "num_missed_bcon:",
329 le32_to_cpu(general->num_missed_bcon),
330 accum_general->num_missed_bcon,
331 delta_general->num_missed_bcon,
332 max_general->num_missed_bcon);
333 pos +=
334 scnprintf(buf + pos, bufsz - pos, fmt_table,
335 "adc_rx_saturation_time:",
336 le32_to_cpu(general->adc_rx_saturation_time),
337 accum_general->adc_rx_saturation_time,
338 delta_general->adc_rx_saturation_time,
339 max_general->adc_rx_saturation_time);
340 pos +=
341 scnprintf(buf + pos, bufsz - pos, fmt_table,
342 "ina_detect_search_tm:",
343 le32_to_cpu(general->ina_detection_search_time),
344 accum_general->ina_detection_search_time,
345 delta_general->ina_detection_search_time,
346 max_general->ina_detection_search_time);
347 pos +=
348 scnprintf(buf + pos, bufsz - pos, fmt_table,
349 "beacon_silence_rssi_a:",
350 le32_to_cpu(general->beacon_silence_rssi_a),
351 accum_general->beacon_silence_rssi_a,
352 delta_general->beacon_silence_rssi_a,
353 max_general->beacon_silence_rssi_a);
354 pos +=
355 scnprintf(buf + pos, bufsz - pos, fmt_table,
356 "beacon_silence_rssi_b:",
357 le32_to_cpu(general->beacon_silence_rssi_b),
358 accum_general->beacon_silence_rssi_b,
359 delta_general->beacon_silence_rssi_b,
360 max_general->beacon_silence_rssi_b);
361 pos +=
362 scnprintf(buf + pos, bufsz - pos, fmt_table,
363 "beacon_silence_rssi_c:",
364 le32_to_cpu(general->beacon_silence_rssi_c),
365 accum_general->beacon_silence_rssi_c,
366 delta_general->beacon_silence_rssi_c,
367 max_general->beacon_silence_rssi_c);
368 pos +=
369 scnprintf(buf + pos, bufsz - pos, fmt_table,
370 "interference_data_flag:",
371 le32_to_cpu(general->interference_data_flag),
372 accum_general->interference_data_flag,
373 delta_general->interference_data_flag,
374 max_general->interference_data_flag);
375 pos +=
376 scnprintf(buf + pos, bufsz - pos, fmt_table, "channel_load:",
377 le32_to_cpu(general->channel_load),
378 accum_general->channel_load, delta_general->channel_load,
379 max_general->channel_load);
380 pos +=
381 scnprintf(buf + pos, bufsz - pos, fmt_table, "dsp_false_alarms:",
382 le32_to_cpu(general->dsp_false_alarms),
383 accum_general->dsp_false_alarms,
384 delta_general->dsp_false_alarms,
385 max_general->dsp_false_alarms);
386 pos +=
387 scnprintf(buf + pos, bufsz - pos, fmt_table, "beacon_rssi_a:",
388 le32_to_cpu(general->beacon_rssi_a),
389 accum_general->beacon_rssi_a,
390 delta_general->beacon_rssi_a, max_general->beacon_rssi_a);
391 pos +=
392 scnprintf(buf + pos, bufsz - pos, fmt_table, "beacon_rssi_b:",
393 le32_to_cpu(general->beacon_rssi_b),
394 accum_general->beacon_rssi_b,
395 delta_general->beacon_rssi_b, max_general->beacon_rssi_b);
396 pos +=
397 scnprintf(buf + pos, bufsz - pos, fmt_table, "beacon_rssi_c:",
398 le32_to_cpu(general->beacon_rssi_c),
399 accum_general->beacon_rssi_c,
400 delta_general->beacon_rssi_c, max_general->beacon_rssi_c);
401 pos +=
402 scnprintf(buf + pos, bufsz - pos, fmt_table, "beacon_energy_a:",
403 le32_to_cpu(general->beacon_energy_a),
404 accum_general->beacon_energy_a,
405 delta_general->beacon_energy_a,
406 max_general->beacon_energy_a);
407 pos +=
408 scnprintf(buf + pos, bufsz - pos, fmt_table, "beacon_energy_b:",
409 le32_to_cpu(general->beacon_energy_b),
410 accum_general->beacon_energy_b,
411 delta_general->beacon_energy_b,
412 max_general->beacon_energy_b);
413 pos +=
414 scnprintf(buf + pos, bufsz - pos, fmt_table, "beacon_energy_c:",
415 le32_to_cpu(general->beacon_energy_c),
416 accum_general->beacon_energy_c,
417 delta_general->beacon_energy_c,
418 max_general->beacon_energy_c);
419
420 pos +=
421 scnprintf(buf + pos, bufsz - pos, fmt_header,
422 "Statistics_Rx - OFDM_HT:");
423 pos +=
424 scnprintf(buf + pos, bufsz - pos, fmt_table, "plcp_err:",
425 le32_to_cpu(ht->plcp_err), accum_ht->plcp_err,
426 delta_ht->plcp_err, max_ht->plcp_err);
427 pos +=
428 scnprintf(buf + pos, bufsz - pos, fmt_table, "overrun_err:",
429 le32_to_cpu(ht->overrun_err), accum_ht->overrun_err,
430 delta_ht->overrun_err, max_ht->overrun_err);
431 pos +=
432 scnprintf(buf + pos, bufsz - pos, fmt_table, "early_overrun_err:",
433 le32_to_cpu(ht->early_overrun_err),
434 accum_ht->early_overrun_err, delta_ht->early_overrun_err,
435 max_ht->early_overrun_err);
436 pos +=
437 scnprintf(buf + pos, bufsz - pos, fmt_table, "crc32_good:",
438 le32_to_cpu(ht->crc32_good), accum_ht->crc32_good,
439 delta_ht->crc32_good, max_ht->crc32_good);
440 pos +=
441 scnprintf(buf + pos, bufsz - pos, fmt_table, "crc32_err:",
442 le32_to_cpu(ht->crc32_err), accum_ht->crc32_err,
443 delta_ht->crc32_err, max_ht->crc32_err);
444 pos +=
445 scnprintf(buf + pos, bufsz - pos, fmt_table, "mh_format_err:",
446 le32_to_cpu(ht->mh_format_err), accum_ht->mh_format_err,
447 delta_ht->mh_format_err, max_ht->mh_format_err);
448 pos +=
449 scnprintf(buf + pos, bufsz - pos, fmt_table, "agg_crc32_good:",
450 le32_to_cpu(ht->agg_crc32_good), accum_ht->agg_crc32_good,
451 delta_ht->agg_crc32_good, max_ht->agg_crc32_good);
452 pos +=
453 scnprintf(buf + pos, bufsz - pos, fmt_table, "agg_mpdu_cnt:",
454 le32_to_cpu(ht->agg_mpdu_cnt), accum_ht->agg_mpdu_cnt,
455 delta_ht->agg_mpdu_cnt, max_ht->agg_mpdu_cnt);
456 pos +=
457 scnprintf(buf + pos, bufsz - pos, fmt_table, "agg_cnt:",
458 le32_to_cpu(ht->agg_cnt), accum_ht->agg_cnt,
459 delta_ht->agg_cnt, max_ht->agg_cnt);
460 pos +=
461 scnprintf(buf + pos, bufsz - pos, fmt_table, "unsupport_mcs:",
462 le32_to_cpu(ht->unsupport_mcs), accum_ht->unsupport_mcs,
463 delta_ht->unsupport_mcs, max_ht->unsupport_mcs);
464
465 ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
466 kfree(buf);
467 return ret;
468}
469
470static ssize_t
471il4965_ucode_tx_stats_read(struct file *file, char __user *user_buf,
472 size_t count, loff_t *ppos)
473{
474 struct il_priv *il = file->private_data;
475 int pos = 0;
476 char *buf;
477 int bufsz = (sizeof(struct stats_tx) * 48) + 250;
478 ssize_t ret;
479 struct stats_tx *tx, *accum_tx, *delta_tx, *max_tx;
480
481 if (!il_is_alive(il))
482 return -EAGAIN;
483
484 buf = kzalloc(bufsz, GFP_KERNEL);
485 if (!buf) {
486 IL_ERR("Can not allocate Buffer\n");
487 return -ENOMEM;
488 }
489
490 /* the statistic information display here is based on
491 * the last stats notification from uCode
492 * might not reflect the current uCode activity
493 */
494 tx = &il->_4965.stats.tx;
495 accum_tx = &il->_4965.accum_stats.tx;
496 delta_tx = &il->_4965.delta_stats.tx;
497 max_tx = &il->_4965.max_delta.tx;
498
499 pos += il4965_stats_flag(il, buf, bufsz);
500 pos += scnprintf(buf + pos, bufsz - pos, fmt_header, "Statistics_Tx:");
501 pos +=
502 scnprintf(buf + pos, bufsz - pos, fmt_table, "preamble:",
503 le32_to_cpu(tx->preamble_cnt), accum_tx->preamble_cnt,
504 delta_tx->preamble_cnt, max_tx->preamble_cnt);
505 pos +=
506 scnprintf(buf + pos, bufsz - pos, fmt_table, "rx_detected_cnt:",
507 le32_to_cpu(tx->rx_detected_cnt),
508 accum_tx->rx_detected_cnt, delta_tx->rx_detected_cnt,
509 max_tx->rx_detected_cnt);
510 pos +=
511 scnprintf(buf + pos, bufsz - pos, fmt_table, "bt_prio_defer_cnt:",
512 le32_to_cpu(tx->bt_prio_defer_cnt),
513 accum_tx->bt_prio_defer_cnt, delta_tx->bt_prio_defer_cnt,
514 max_tx->bt_prio_defer_cnt);
515 pos +=
516 scnprintf(buf + pos, bufsz - pos, fmt_table, "bt_prio_kill_cnt:",
517 le32_to_cpu(tx->bt_prio_kill_cnt),
518 accum_tx->bt_prio_kill_cnt, delta_tx->bt_prio_kill_cnt,
519 max_tx->bt_prio_kill_cnt);
520 pos +=
521 scnprintf(buf + pos, bufsz - pos, fmt_table, "few_bytes_cnt:",
522 le32_to_cpu(tx->few_bytes_cnt), accum_tx->few_bytes_cnt,
523 delta_tx->few_bytes_cnt, max_tx->few_bytes_cnt);
524 pos +=
525 scnprintf(buf + pos, bufsz - pos, fmt_table, "cts_timeout:",
526 le32_to_cpu(tx->cts_timeout), accum_tx->cts_timeout,
527 delta_tx->cts_timeout, max_tx->cts_timeout);
528 pos +=
529 scnprintf(buf + pos, bufsz - pos, fmt_table, "ack_timeout:",
530 le32_to_cpu(tx->ack_timeout), accum_tx->ack_timeout,
531 delta_tx->ack_timeout, max_tx->ack_timeout);
532 pos +=
533 scnprintf(buf + pos, bufsz - pos, fmt_table, "expected_ack_cnt:",
534 le32_to_cpu(tx->expected_ack_cnt),
535 accum_tx->expected_ack_cnt, delta_tx->expected_ack_cnt,
536 max_tx->expected_ack_cnt);
537 pos +=
538 scnprintf(buf + pos, bufsz - pos, fmt_table, "actual_ack_cnt:",
539 le32_to_cpu(tx->actual_ack_cnt), accum_tx->actual_ack_cnt,
540 delta_tx->actual_ack_cnt, max_tx->actual_ack_cnt);
541 pos +=
542 scnprintf(buf + pos, bufsz - pos, fmt_table, "dump_msdu_cnt:",
543 le32_to_cpu(tx->dump_msdu_cnt), accum_tx->dump_msdu_cnt,
544 delta_tx->dump_msdu_cnt, max_tx->dump_msdu_cnt);
545 pos +=
546 scnprintf(buf + pos, bufsz - pos, fmt_table,
547 "abort_nxt_frame_mismatch:",
548 le32_to_cpu(tx->burst_abort_next_frame_mismatch_cnt),
549 accum_tx->burst_abort_next_frame_mismatch_cnt,
550 delta_tx->burst_abort_next_frame_mismatch_cnt,
551 max_tx->burst_abort_next_frame_mismatch_cnt);
552 pos +=
553 scnprintf(buf + pos, bufsz - pos, fmt_table,
554 "abort_missing_nxt_frame:",
555 le32_to_cpu(tx->burst_abort_missing_next_frame_cnt),
556 accum_tx->burst_abort_missing_next_frame_cnt,
557 delta_tx->burst_abort_missing_next_frame_cnt,
558 max_tx->burst_abort_missing_next_frame_cnt);
559 pos +=
560 scnprintf(buf + pos, bufsz - pos, fmt_table,
561 "cts_timeout_collision:",
562 le32_to_cpu(tx->cts_timeout_collision),
563 accum_tx->cts_timeout_collision,
564 delta_tx->cts_timeout_collision,
565 max_tx->cts_timeout_collision);
566 pos +=
567 scnprintf(buf + pos, bufsz - pos, fmt_table,
568 "ack_ba_timeout_collision:",
569 le32_to_cpu(tx->ack_or_ba_timeout_collision),
570 accum_tx->ack_or_ba_timeout_collision,
571 delta_tx->ack_or_ba_timeout_collision,
572 max_tx->ack_or_ba_timeout_collision);
573 pos +=
574 scnprintf(buf + pos, bufsz - pos, fmt_table, "agg ba_timeout:",
575 le32_to_cpu(tx->agg.ba_timeout), accum_tx->agg.ba_timeout,
576 delta_tx->agg.ba_timeout, max_tx->agg.ba_timeout);
577 pos +=
578 scnprintf(buf + pos, bufsz - pos, fmt_table,
579 "agg ba_resched_frames:",
580 le32_to_cpu(tx->agg.ba_reschedule_frames),
581 accum_tx->agg.ba_reschedule_frames,
582 delta_tx->agg.ba_reschedule_frames,
583 max_tx->agg.ba_reschedule_frames);
584 pos +=
585 scnprintf(buf + pos, bufsz - pos, fmt_table,
586 "agg scd_query_agg_frame:",
587 le32_to_cpu(tx->agg.scd_query_agg_frame_cnt),
588 accum_tx->agg.scd_query_agg_frame_cnt,
589 delta_tx->agg.scd_query_agg_frame_cnt,
590 max_tx->agg.scd_query_agg_frame_cnt);
591 pos +=
592 scnprintf(buf + pos, bufsz - pos, fmt_table,
593 "agg scd_query_no_agg:",
594 le32_to_cpu(tx->agg.scd_query_no_agg),
595 accum_tx->agg.scd_query_no_agg,
596 delta_tx->agg.scd_query_no_agg,
597 max_tx->agg.scd_query_no_agg);
598 pos +=
599 scnprintf(buf + pos, bufsz - pos, fmt_table, "agg scd_query_agg:",
600 le32_to_cpu(tx->agg.scd_query_agg),
601 accum_tx->agg.scd_query_agg, delta_tx->agg.scd_query_agg,
602 max_tx->agg.scd_query_agg);
603 pos +=
604 scnprintf(buf + pos, bufsz - pos, fmt_table,
605 "agg scd_query_mismatch:",
606 le32_to_cpu(tx->agg.scd_query_mismatch),
607 accum_tx->agg.scd_query_mismatch,
608 delta_tx->agg.scd_query_mismatch,
609 max_tx->agg.scd_query_mismatch);
610 pos +=
611 scnprintf(buf + pos, bufsz - pos, fmt_table, "agg frame_not_ready:",
612 le32_to_cpu(tx->agg.frame_not_ready),
613 accum_tx->agg.frame_not_ready,
614 delta_tx->agg.frame_not_ready,
615 max_tx->agg.frame_not_ready);
616 pos +=
617 scnprintf(buf + pos, bufsz - pos, fmt_table, "agg underrun:",
618 le32_to_cpu(tx->agg.underrun), accum_tx->agg.underrun,
619 delta_tx->agg.underrun, max_tx->agg.underrun);
620 pos +=
621 scnprintf(buf + pos, bufsz - pos, fmt_table, "agg bt_prio_kill:",
622 le32_to_cpu(tx->agg.bt_prio_kill),
623 accum_tx->agg.bt_prio_kill, delta_tx->agg.bt_prio_kill,
624 max_tx->agg.bt_prio_kill);
625 pos +=
626 scnprintf(buf + pos, bufsz - pos, fmt_table, "agg rx_ba_rsp_cnt:",
627 le32_to_cpu(tx->agg.rx_ba_rsp_cnt),
628 accum_tx->agg.rx_ba_rsp_cnt, delta_tx->agg.rx_ba_rsp_cnt,
629 max_tx->agg.rx_ba_rsp_cnt);
630
631 ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
632 kfree(buf);
633 return ret;
634}
635
636static ssize_t
637il4965_ucode_general_stats_read(struct file *file, char __user *user_buf,
638 size_t count, loff_t *ppos)
639{
640 struct il_priv *il = file->private_data;
641 int pos = 0;
642 char *buf;
643 int bufsz = sizeof(struct stats_general) * 10 + 300;
644 ssize_t ret;
645 struct stats_general_common *general, *accum_general;
646 struct stats_general_common *delta_general, *max_general;
647 struct stats_dbg *dbg, *accum_dbg, *delta_dbg, *max_dbg;
648 struct stats_div *div, *accum_div, *delta_div, *max_div;
649
650 if (!il_is_alive(il))
651 return -EAGAIN;
652
653 buf = kzalloc(bufsz, GFP_KERNEL);
654 if (!buf) {
655 IL_ERR("Can not allocate Buffer\n");
656 return -ENOMEM;
657 }
658
659 /* the statistic information display here is based on
660 * the last stats notification from uCode
661 * might not reflect the current uCode activity
662 */
663 general = &il->_4965.stats.general.common;
664 dbg = &il->_4965.stats.general.common.dbg;
665 div = &il->_4965.stats.general.common.div;
666 accum_general = &il->_4965.accum_stats.general.common;
667 accum_dbg = &il->_4965.accum_stats.general.common.dbg;
668 accum_div = &il->_4965.accum_stats.general.common.div;
669 delta_general = &il->_4965.delta_stats.general.common;
670 max_general = &il->_4965.max_delta.general.common;
671 delta_dbg = &il->_4965.delta_stats.general.common.dbg;
672 max_dbg = &il->_4965.max_delta.general.common.dbg;
673 delta_div = &il->_4965.delta_stats.general.common.div;
674 max_div = &il->_4965.max_delta.general.common.div;
675
676 pos += il4965_stats_flag(il, buf, bufsz);
677 pos +=
678 scnprintf(buf + pos, bufsz - pos, fmt_header,
679 "Statistics_General:");
680 pos +=
681 scnprintf(buf + pos, bufsz - pos, fmt_value, "temperature:",
682 le32_to_cpu(general->temperature));
683 pos +=
684 scnprintf(buf + pos, bufsz - pos, fmt_value, "ttl_timestamp:",
685 le32_to_cpu(general->ttl_timestamp));
686 pos +=
687 scnprintf(buf + pos, bufsz - pos, fmt_table, "burst_check:",
688 le32_to_cpu(dbg->burst_check), accum_dbg->burst_check,
689 delta_dbg->burst_check, max_dbg->burst_check);
690 pos +=
691 scnprintf(buf + pos, bufsz - pos, fmt_table, "burst_count:",
692 le32_to_cpu(dbg->burst_count), accum_dbg->burst_count,
693 delta_dbg->burst_count, max_dbg->burst_count);
694 pos +=
695 scnprintf(buf + pos, bufsz - pos, fmt_table,
696 "wait_for_silence_timeout_count:",
697 le32_to_cpu(dbg->wait_for_silence_timeout_cnt),
698 accum_dbg->wait_for_silence_timeout_cnt,
699 delta_dbg->wait_for_silence_timeout_cnt,
700 max_dbg->wait_for_silence_timeout_cnt);
701 pos +=
702 scnprintf(buf + pos, bufsz - pos, fmt_table, "sleep_time:",
703 le32_to_cpu(general->sleep_time),
704 accum_general->sleep_time, delta_general->sleep_time,
705 max_general->sleep_time);
706 pos +=
707 scnprintf(buf + pos, bufsz - pos, fmt_table, "slots_out:",
708 le32_to_cpu(general->slots_out), accum_general->slots_out,
709 delta_general->slots_out, max_general->slots_out);
710 pos +=
711 scnprintf(buf + pos, bufsz - pos, fmt_table, "slots_idle:",
712 le32_to_cpu(general->slots_idle),
713 accum_general->slots_idle, delta_general->slots_idle,
714 max_general->slots_idle);
715 pos +=
716 scnprintf(buf + pos, bufsz - pos, fmt_table, "tx_on_a:",
717 le32_to_cpu(div->tx_on_a), accum_div->tx_on_a,
718 delta_div->tx_on_a, max_div->tx_on_a);
719 pos +=
720 scnprintf(buf + pos, bufsz - pos, fmt_table, "tx_on_b:",
721 le32_to_cpu(div->tx_on_b), accum_div->tx_on_b,
722 delta_div->tx_on_b, max_div->tx_on_b);
723 pos +=
724 scnprintf(buf + pos, bufsz - pos, fmt_table, "exec_time:",
725 le32_to_cpu(div->exec_time), accum_div->exec_time,
726 delta_div->exec_time, max_div->exec_time);
727 pos +=
728 scnprintf(buf + pos, bufsz - pos, fmt_table, "probe_time:",
729 le32_to_cpu(div->probe_time), accum_div->probe_time,
730 delta_div->probe_time, max_div->probe_time);
731 pos +=
732 scnprintf(buf + pos, bufsz - pos, fmt_table, "rx_enable_counter:",
733 le32_to_cpu(general->rx_enable_counter),
734 accum_general->rx_enable_counter,
735 delta_general->rx_enable_counter,
736 max_general->rx_enable_counter);
737 pos +=
738 scnprintf(buf + pos, bufsz - pos, fmt_table, "num_of_sos_states:",
739 le32_to_cpu(general->num_of_sos_states),
740 accum_general->num_of_sos_states,
741 delta_general->num_of_sos_states,
742 max_general->num_of_sos_states);
743 ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
744 kfree(buf);
745 return ret;
746}
747
748const struct il_debugfs_ops il4965_debugfs_ops = {
749 .rx_stats_read = il4965_ucode_rx_stats_read,
750 .tx_stats_read = il4965_ucode_tx_stats_read,
751 .general_stats_read = il4965_ucode_general_stats_read,
752};
diff --git a/drivers/net/wireless/intel/iwlegacy/4965-mac.c b/drivers/net/wireless/intel/iwlegacy/4965-mac.c
new file mode 100644
index 000000000000..6656215a13a9
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlegacy/4965-mac.c
@@ -0,0 +1,6868 @@
1/******************************************************************************
2 *
3 * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved.
4 *
5 * Portions of this file are derived from the ipw3945 project, as well
6 * as portions of the ieee80211 subsystem header files.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of version 2 of the GNU General Public License as
10 * published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it will be useful, but WITHOUT
13 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 * more details.
16 *
17 * You should have received a copy of the GNU General Public License along with
18 * this program; if not, write to the Free Software Foundation, Inc.,
19 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
20 *
21 * The full GNU General Public License is included in this distribution in the
22 * file called LICENSE.
23 *
24 * Contact Information:
25 * Intel Linux Wireless <ilw@linux.intel.com>
26 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
27 *
28 *****************************************************************************/
29
30#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
31
32#include <linux/kernel.h>
33#include <linux/module.h>
34#include <linux/init.h>
35#include <linux/pci.h>
36#include <linux/pci-aspm.h>
37#include <linux/slab.h>
38#include <linux/dma-mapping.h>
39#include <linux/delay.h>
40#include <linux/sched.h>
41#include <linux/skbuff.h>
42#include <linux/netdevice.h>
43#include <linux/firmware.h>
44#include <linux/etherdevice.h>
45#include <linux/if_arp.h>
46
47#include <net/mac80211.h>
48
49#include <asm/div64.h>
50
51#define DRV_NAME "iwl4965"
52
53#include "common.h"
54#include "4965.h"
55
56/******************************************************************************
57 *
58 * module boiler plate
59 *
60 ******************************************************************************/
61
62/*
63 * module name, copyright, version, etc.
64 */
65#define DRV_DESCRIPTION "Intel(R) Wireless WiFi 4965 driver for Linux"
66
67#ifdef CONFIG_IWLEGACY_DEBUG
68#define VD "d"
69#else
70#define VD
71#endif
72
73#define DRV_VERSION IWLWIFI_VERSION VD
74
75MODULE_DESCRIPTION(DRV_DESCRIPTION);
76MODULE_VERSION(DRV_VERSION);
77MODULE_AUTHOR(DRV_COPYRIGHT " " DRV_AUTHOR);
78MODULE_LICENSE("GPL");
79MODULE_ALIAS("iwl4965");
80
81void
82il4965_check_abort_status(struct il_priv *il, u8 frame_count, u32 status)
83{
84 if (frame_count == 1 && status == TX_STATUS_FAIL_RFKILL_FLUSH) {
85 IL_ERR("Tx flush command to flush out all frames\n");
86 if (!test_bit(S_EXIT_PENDING, &il->status))
87 queue_work(il->workqueue, &il->tx_flush);
88 }
89}
90
91/*
92 * EEPROM
93 */
94struct il_mod_params il4965_mod_params = {
95 .restart_fw = 1,
96 /* the rest are 0 by default */
97};
98
99void
100il4965_rx_queue_reset(struct il_priv *il, struct il_rx_queue *rxq)
101{
102 unsigned long flags;
103 int i;
104 spin_lock_irqsave(&rxq->lock, flags);
105 INIT_LIST_HEAD(&rxq->rx_free);
106 INIT_LIST_HEAD(&rxq->rx_used);
107 /* Fill the rx_used queue with _all_ of the Rx buffers */
108 for (i = 0; i < RX_FREE_BUFFERS + RX_QUEUE_SIZE; i++) {
109 /* In the reset function, these buffers may have been allocated
110 * to an SKB, so we need to unmap and free potential storage */
111 if (rxq->pool[i].page != NULL) {
112 pci_unmap_page(il->pci_dev, rxq->pool[i].page_dma,
113 PAGE_SIZE << il->hw_params.rx_page_order,
114 PCI_DMA_FROMDEVICE);
115 __il_free_pages(il, rxq->pool[i].page);
116 rxq->pool[i].page = NULL;
117 }
118 list_add_tail(&rxq->pool[i].list, &rxq->rx_used);
119 }
120
121 for (i = 0; i < RX_QUEUE_SIZE; i++)
122 rxq->queue[i] = NULL;
123
124 /* Set us so that we have processed and used all buffers, but have
125 * not restocked the Rx queue with fresh buffers */
126 rxq->read = rxq->write = 0;
127 rxq->write_actual = 0;
128 rxq->free_count = 0;
129 spin_unlock_irqrestore(&rxq->lock, flags);
130}
131
132int
133il4965_rx_init(struct il_priv *il, struct il_rx_queue *rxq)
134{
135 u32 rb_size;
136 const u32 rfdnlog = RX_QUEUE_SIZE_LOG; /* 256 RBDs */
137 u32 rb_timeout = 0;
138
139 if (il->cfg->mod_params->amsdu_size_8K)
140 rb_size = FH49_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_8K;
141 else
142 rb_size = FH49_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_4K;
143
144 /* Stop Rx DMA */
145 il_wr(il, FH49_MEM_RCSR_CHNL0_CONFIG_REG, 0);
146
147 /* Reset driver's Rx queue write idx */
148 il_wr(il, FH49_RSCSR_CHNL0_RBDCB_WPTR_REG, 0);
149
150 /* Tell device where to find RBD circular buffer in DRAM */
151 il_wr(il, FH49_RSCSR_CHNL0_RBDCB_BASE_REG, (u32) (rxq->bd_dma >> 8));
152
153 /* Tell device where in DRAM to update its Rx status */
154 il_wr(il, FH49_RSCSR_CHNL0_STTS_WPTR_REG, rxq->rb_stts_dma >> 4);
155
156 /* Enable Rx DMA
157 * Direct rx interrupts to hosts
158 * Rx buffer size 4 or 8k
159 * RB timeout 0x10
160 * 256 RBDs
161 */
162 il_wr(il, FH49_MEM_RCSR_CHNL0_CONFIG_REG,
163 FH49_RCSR_RX_CONFIG_CHNL_EN_ENABLE_VAL |
164 FH49_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_INT_HOST_VAL |
165 FH49_RCSR_CHNL0_RX_CONFIG_SINGLE_FRAME_MSK |
166 rb_size |
167 (rb_timeout << FH49_RCSR_RX_CONFIG_REG_IRQ_RBTH_POS) |
168 (rfdnlog << FH49_RCSR_RX_CONFIG_RBDCB_SIZE_POS));
169
170 /* Set interrupt coalescing timer to default (2048 usecs) */
171 il_write8(il, CSR_INT_COALESCING, IL_HOST_INT_TIMEOUT_DEF);
172
173 return 0;
174}
175
176static void
177il4965_set_pwr_vmain(struct il_priv *il)
178{
179/*
180 * (for documentation purposes)
181 * to set power to V_AUX, do:
182
183 if (pci_pme_capable(il->pci_dev, PCI_D3cold))
184 il_set_bits_mask_prph(il, APMG_PS_CTRL_REG,
185 APMG_PS_CTRL_VAL_PWR_SRC_VAUX,
186 ~APMG_PS_CTRL_MSK_PWR_SRC);
187 */
188
189 il_set_bits_mask_prph(il, APMG_PS_CTRL_REG,
190 APMG_PS_CTRL_VAL_PWR_SRC_VMAIN,
191 ~APMG_PS_CTRL_MSK_PWR_SRC);
192}
193
194int
195il4965_hw_nic_init(struct il_priv *il)
196{
197 unsigned long flags;
198 struct il_rx_queue *rxq = &il->rxq;
199 int ret;
200
201 spin_lock_irqsave(&il->lock, flags);
202 il_apm_init(il);
203 /* Set interrupt coalescing calibration timer to default (512 usecs) */
204 il_write8(il, CSR_INT_COALESCING, IL_HOST_INT_CALIB_TIMEOUT_DEF);
205 spin_unlock_irqrestore(&il->lock, flags);
206
207 il4965_set_pwr_vmain(il);
208 il4965_nic_config(il);
209
210 /* Allocate the RX queue, or reset if it is already allocated */
211 if (!rxq->bd) {
212 ret = il_rx_queue_alloc(il);
213 if (ret) {
214 IL_ERR("Unable to initialize Rx queue\n");
215 return -ENOMEM;
216 }
217 } else
218 il4965_rx_queue_reset(il, rxq);
219
220 il4965_rx_replenish(il);
221
222 il4965_rx_init(il, rxq);
223
224 spin_lock_irqsave(&il->lock, flags);
225
226 rxq->need_update = 1;
227 il_rx_queue_update_write_ptr(il, rxq);
228
229 spin_unlock_irqrestore(&il->lock, flags);
230
231 /* Allocate or reset and init all Tx and Command queues */
232 if (!il->txq) {
233 ret = il4965_txq_ctx_alloc(il);
234 if (ret)
235 return ret;
236 } else
237 il4965_txq_ctx_reset(il);
238
239 set_bit(S_INIT, &il->status);
240
241 return 0;
242}
243
244/**
245 * il4965_dma_addr2rbd_ptr - convert a DMA address to a uCode read buffer ptr
246 */
247static inline __le32
248il4965_dma_addr2rbd_ptr(struct il_priv *il, dma_addr_t dma_addr)
249{
250 return cpu_to_le32((u32) (dma_addr >> 8));
251}
252
253/**
254 * il4965_rx_queue_restock - refill RX queue from pre-allocated pool
255 *
256 * If there are slots in the RX queue that need to be restocked,
257 * and we have free pre-allocated buffers, fill the ranks as much
258 * as we can, pulling from rx_free.
259 *
260 * This moves the 'write' idx forward to catch up with 'processed', and
261 * also updates the memory address in the firmware to reference the new
262 * target buffer.
263 */
264void
265il4965_rx_queue_restock(struct il_priv *il)
266{
267 struct il_rx_queue *rxq = &il->rxq;
268 struct list_head *element;
269 struct il_rx_buf *rxb;
270 unsigned long flags;
271
272 spin_lock_irqsave(&rxq->lock, flags);
273 while (il_rx_queue_space(rxq) > 0 && rxq->free_count) {
274 /* The overwritten rxb must be a used one */
275 rxb = rxq->queue[rxq->write];
276 BUG_ON(rxb && rxb->page);
277
278 /* Get next free Rx buffer, remove from free list */
279 element = rxq->rx_free.next;
280 rxb = list_entry(element, struct il_rx_buf, list);
281 list_del(element);
282
283 /* Point to Rx buffer via next RBD in circular buffer */
284 rxq->bd[rxq->write] =
285 il4965_dma_addr2rbd_ptr(il, rxb->page_dma);
286 rxq->queue[rxq->write] = rxb;
287 rxq->write = (rxq->write + 1) & RX_QUEUE_MASK;
288 rxq->free_count--;
289 }
290 spin_unlock_irqrestore(&rxq->lock, flags);
291 /* If the pre-allocated buffer pool is dropping low, schedule to
292 * refill it */
293 if (rxq->free_count <= RX_LOW_WATERMARK)
294 queue_work(il->workqueue, &il->rx_replenish);
295
296 /* If we've added more space for the firmware to place data, tell it.
297 * Increment device's write pointer in multiples of 8. */
298 if (rxq->write_actual != (rxq->write & ~0x7)) {
299 spin_lock_irqsave(&rxq->lock, flags);
300 rxq->need_update = 1;
301 spin_unlock_irqrestore(&rxq->lock, flags);
302 il_rx_queue_update_write_ptr(il, rxq);
303 }
304}
305
306/**
307 * il4965_rx_replenish - Move all used packet from rx_used to rx_free
308 *
309 * When moving to rx_free an SKB is allocated for the slot.
310 *
311 * Also restock the Rx queue via il_rx_queue_restock.
312 * This is called as a scheduled work item (except for during initialization)
313 */
314static void
315il4965_rx_allocate(struct il_priv *il, gfp_t priority)
316{
317 struct il_rx_queue *rxq = &il->rxq;
318 struct list_head *element;
319 struct il_rx_buf *rxb;
320 struct page *page;
321 dma_addr_t page_dma;
322 unsigned long flags;
323 gfp_t gfp_mask = priority;
324
325 while (1) {
326 spin_lock_irqsave(&rxq->lock, flags);
327 if (list_empty(&rxq->rx_used)) {
328 spin_unlock_irqrestore(&rxq->lock, flags);
329 return;
330 }
331 spin_unlock_irqrestore(&rxq->lock, flags);
332
333 if (rxq->free_count > RX_LOW_WATERMARK)
334 gfp_mask |= __GFP_NOWARN;
335
336 if (il->hw_params.rx_page_order > 0)
337 gfp_mask |= __GFP_COMP;
338
339 /* Alloc a new receive buffer */
340 page = alloc_pages(gfp_mask, il->hw_params.rx_page_order);
341 if (!page) {
342 if (net_ratelimit())
343 D_INFO("alloc_pages failed, " "order: %d\n",
344 il->hw_params.rx_page_order);
345
346 if (rxq->free_count <= RX_LOW_WATERMARK &&
347 net_ratelimit())
348 IL_ERR("Failed to alloc_pages with %s. "
349 "Only %u free buffers remaining.\n",
350 priority ==
351 GFP_ATOMIC ? "GFP_ATOMIC" : "GFP_KERNEL",
352 rxq->free_count);
353 /* We don't reschedule replenish work here -- we will
354 * call the restock method and if it still needs
355 * more buffers it will schedule replenish */
356 return;
357 }
358
359 /* Get physical address of the RB */
360 page_dma =
361 pci_map_page(il->pci_dev, page, 0,
362 PAGE_SIZE << il->hw_params.rx_page_order,
363 PCI_DMA_FROMDEVICE);
364 if (unlikely(pci_dma_mapping_error(il->pci_dev, page_dma))) {
365 __free_pages(page, il->hw_params.rx_page_order);
366 break;
367 }
368
369 spin_lock_irqsave(&rxq->lock, flags);
370
371 if (list_empty(&rxq->rx_used)) {
372 spin_unlock_irqrestore(&rxq->lock, flags);
373 pci_unmap_page(il->pci_dev, page_dma,
374 PAGE_SIZE << il->hw_params.rx_page_order,
375 PCI_DMA_FROMDEVICE);
376 __free_pages(page, il->hw_params.rx_page_order);
377 return;
378 }
379
380 element = rxq->rx_used.next;
381 rxb = list_entry(element, struct il_rx_buf, list);
382 list_del(element);
383
384 BUG_ON(rxb->page);
385
386 rxb->page = page;
387 rxb->page_dma = page_dma;
388 list_add_tail(&rxb->list, &rxq->rx_free);
389 rxq->free_count++;
390 il->alloc_rxb_page++;
391
392 spin_unlock_irqrestore(&rxq->lock, flags);
393 }
394}
395
396void
397il4965_rx_replenish(struct il_priv *il)
398{
399 unsigned long flags;
400
401 il4965_rx_allocate(il, GFP_KERNEL);
402
403 spin_lock_irqsave(&il->lock, flags);
404 il4965_rx_queue_restock(il);
405 spin_unlock_irqrestore(&il->lock, flags);
406}
407
408void
409il4965_rx_replenish_now(struct il_priv *il)
410{
411 il4965_rx_allocate(il, GFP_ATOMIC);
412
413 il4965_rx_queue_restock(il);
414}
415
416/* Assumes that the skb field of the buffers in 'pool' is kept accurate.
417 * If an SKB has been detached, the POOL needs to have its SKB set to NULL
418 * This free routine walks the list of POOL entries and if SKB is set to
419 * non NULL it is unmapped and freed
420 */
421void
422il4965_rx_queue_free(struct il_priv *il, struct il_rx_queue *rxq)
423{
424 int i;
425 for (i = 0; i < RX_QUEUE_SIZE + RX_FREE_BUFFERS; i++) {
426 if (rxq->pool[i].page != NULL) {
427 pci_unmap_page(il->pci_dev, rxq->pool[i].page_dma,
428 PAGE_SIZE << il->hw_params.rx_page_order,
429 PCI_DMA_FROMDEVICE);
430 __il_free_pages(il, rxq->pool[i].page);
431 rxq->pool[i].page = NULL;
432 }
433 }
434
435 dma_free_coherent(&il->pci_dev->dev, 4 * RX_QUEUE_SIZE, rxq->bd,
436 rxq->bd_dma);
437 dma_free_coherent(&il->pci_dev->dev, sizeof(struct il_rb_status),
438 rxq->rb_stts, rxq->rb_stts_dma);
439 rxq->bd = NULL;
440 rxq->rb_stts = NULL;
441}
442
443int
444il4965_rxq_stop(struct il_priv *il)
445{
446 int ret;
447
448 _il_wr(il, FH49_MEM_RCSR_CHNL0_CONFIG_REG, 0);
449 ret = _il_poll_bit(il, FH49_MEM_RSSR_RX_STATUS_REG,
450 FH49_RSSR_CHNL0_RX_STATUS_CHNL_IDLE,
451 FH49_RSSR_CHNL0_RX_STATUS_CHNL_IDLE,
452 1000);
453 if (ret < 0)
454 IL_ERR("Can't stop Rx DMA.\n");
455
456 return 0;
457}
458
459int
460il4965_hwrate_to_mac80211_idx(u32 rate_n_flags, enum ieee80211_band band)
461{
462 int idx = 0;
463 int band_offset = 0;
464
465 /* HT rate format: mac80211 wants an MCS number, which is just LSB */
466 if (rate_n_flags & RATE_MCS_HT_MSK) {
467 idx = (rate_n_flags & 0xff);
468 return idx;
469 /* Legacy rate format, search for match in table */
470 } else {
471 if (band == IEEE80211_BAND_5GHZ)
472 band_offset = IL_FIRST_OFDM_RATE;
473 for (idx = band_offset; idx < RATE_COUNT_LEGACY; idx++)
474 if (il_rates[idx].plcp == (rate_n_flags & 0xFF))
475 return idx - band_offset;
476 }
477
478 return -1;
479}
480
481static int
482il4965_calc_rssi(struct il_priv *il, struct il_rx_phy_res *rx_resp)
483{
484 /* data from PHY/DSP regarding signal strength, etc.,
485 * contents are always there, not configurable by host. */
486 struct il4965_rx_non_cfg_phy *ncphy =
487 (struct il4965_rx_non_cfg_phy *)rx_resp->non_cfg_phy_buf;
488 u32 agc =
489 (le16_to_cpu(ncphy->agc_info) & IL49_AGC_DB_MASK) >>
490 IL49_AGC_DB_POS;
491
492 u32 valid_antennae =
493 (le16_to_cpu(rx_resp->phy_flags) & IL49_RX_PHY_FLAGS_ANTENNAE_MASK)
494 >> IL49_RX_PHY_FLAGS_ANTENNAE_OFFSET;
495 u8 max_rssi = 0;
496 u32 i;
497
498 /* Find max rssi among 3 possible receivers.
499 * These values are measured by the digital signal processor (DSP).
500 * They should stay fairly constant even as the signal strength varies,
501 * if the radio's automatic gain control (AGC) is working right.
502 * AGC value (see below) will provide the "interesting" info. */
503 for (i = 0; i < 3; i++)
504 if (valid_antennae & (1 << i))
505 max_rssi = max(ncphy->rssi_info[i << 1], max_rssi);
506
507 D_STATS("Rssi In A %d B %d C %d Max %d AGC dB %d\n",
508 ncphy->rssi_info[0], ncphy->rssi_info[2], ncphy->rssi_info[4],
509 max_rssi, agc);
510
511 /* dBm = max_rssi dB - agc dB - constant.
512 * Higher AGC (higher radio gain) means lower signal. */
513 return max_rssi - agc - IL4965_RSSI_OFFSET;
514}
515
516static u32
517il4965_translate_rx_status(struct il_priv *il, u32 decrypt_in)
518{
519 u32 decrypt_out = 0;
520
521 if ((decrypt_in & RX_RES_STATUS_STATION_FOUND) ==
522 RX_RES_STATUS_STATION_FOUND)
523 decrypt_out |=
524 (RX_RES_STATUS_STATION_FOUND |
525 RX_RES_STATUS_NO_STATION_INFO_MISMATCH);
526
527 decrypt_out |= (decrypt_in & RX_RES_STATUS_SEC_TYPE_MSK);
528
529 /* packet was not encrypted */
530 if ((decrypt_in & RX_RES_STATUS_SEC_TYPE_MSK) ==
531 RX_RES_STATUS_SEC_TYPE_NONE)
532 return decrypt_out;
533
534 /* packet was encrypted with unknown alg */
535 if ((decrypt_in & RX_RES_STATUS_SEC_TYPE_MSK) ==
536 RX_RES_STATUS_SEC_TYPE_ERR)
537 return decrypt_out;
538
539 /* decryption was not done in HW */
540 if ((decrypt_in & RX_MPDU_RES_STATUS_DEC_DONE_MSK) !=
541 RX_MPDU_RES_STATUS_DEC_DONE_MSK)
542 return decrypt_out;
543
544 switch (decrypt_in & RX_RES_STATUS_SEC_TYPE_MSK) {
545
546 case RX_RES_STATUS_SEC_TYPE_CCMP:
547 /* alg is CCM: check MIC only */
548 if (!(decrypt_in & RX_MPDU_RES_STATUS_MIC_OK))
549 /* Bad MIC */
550 decrypt_out |= RX_RES_STATUS_BAD_ICV_MIC;
551 else
552 decrypt_out |= RX_RES_STATUS_DECRYPT_OK;
553
554 break;
555
556 case RX_RES_STATUS_SEC_TYPE_TKIP:
557 if (!(decrypt_in & RX_MPDU_RES_STATUS_TTAK_OK)) {
558 /* Bad TTAK */
559 decrypt_out |= RX_RES_STATUS_BAD_KEY_TTAK;
560 break;
561 }
562 /* fall through if TTAK OK */
563 default:
564 if (!(decrypt_in & RX_MPDU_RES_STATUS_ICV_OK))
565 decrypt_out |= RX_RES_STATUS_BAD_ICV_MIC;
566 else
567 decrypt_out |= RX_RES_STATUS_DECRYPT_OK;
568 break;
569 }
570
571 D_RX("decrypt_in:0x%x decrypt_out = 0x%x\n", decrypt_in, decrypt_out);
572
573 return decrypt_out;
574}
575
576#define SMALL_PACKET_SIZE 256
577
578static void
579il4965_pass_packet_to_mac80211(struct il_priv *il, struct ieee80211_hdr *hdr,
580 u32 len, u32 ampdu_status, struct il_rx_buf *rxb,
581 struct ieee80211_rx_status *stats)
582{
583 struct sk_buff *skb;
584 __le16 fc = hdr->frame_control;
585
586 /* We only process data packets if the interface is open */
587 if (unlikely(!il->is_open)) {
588 D_DROP("Dropping packet while interface is not open.\n");
589 return;
590 }
591
592 if (unlikely(test_bit(IL_STOP_REASON_PASSIVE, &il->stop_reason))) {
593 il_wake_queues_by_reason(il, IL_STOP_REASON_PASSIVE);
594 D_INFO("Woke queues - frame received on passive channel\n");
595 }
596
597 /* In case of HW accelerated crypto and bad decryption, drop */
598 if (!il->cfg->mod_params->sw_crypto &&
599 il_set_decrypted_flag(il, hdr, ampdu_status, stats))
600 return;
601
602 skb = dev_alloc_skb(SMALL_PACKET_SIZE);
603 if (!skb) {
604 IL_ERR("dev_alloc_skb failed\n");
605 return;
606 }
607
608 if (len <= SMALL_PACKET_SIZE) {
609 memcpy(skb_put(skb, len), hdr, len);
610 } else {
611 skb_add_rx_frag(skb, 0, rxb->page, (void *)hdr - rxb_addr(rxb),
612 len, PAGE_SIZE << il->hw_params.rx_page_order);
613 il->alloc_rxb_page--;
614 rxb->page = NULL;
615 }
616
617 il_update_stats(il, false, fc, len);
618 memcpy(IEEE80211_SKB_RXCB(skb), stats, sizeof(*stats));
619
620 ieee80211_rx(il->hw, skb);
621}
622
623/* Called for N_RX (legacy ABG frames), or
624 * N_RX_MPDU (HT high-throughput N frames). */
625static void
626il4965_hdl_rx(struct il_priv *il, struct il_rx_buf *rxb)
627{
628 struct ieee80211_hdr *header;
629 struct ieee80211_rx_status rx_status = {};
630 struct il_rx_pkt *pkt = rxb_addr(rxb);
631 struct il_rx_phy_res *phy_res;
632 __le32 rx_pkt_status;
633 struct il_rx_mpdu_res_start *amsdu;
634 u32 len;
635 u32 ampdu_status;
636 u32 rate_n_flags;
637
638 /**
639 * N_RX and N_RX_MPDU are handled differently.
640 * N_RX: physical layer info is in this buffer
641 * N_RX_MPDU: physical layer info was sent in separate
642 * command and cached in il->last_phy_res
643 *
644 * Here we set up local variables depending on which command is
645 * received.
646 */
647 if (pkt->hdr.cmd == N_RX) {
648 phy_res = (struct il_rx_phy_res *)pkt->u.raw;
649 header =
650 (struct ieee80211_hdr *)(pkt->u.raw + sizeof(*phy_res) +
651 phy_res->cfg_phy_cnt);
652
653 len = le16_to_cpu(phy_res->byte_count);
654 rx_pkt_status =
655 *(__le32 *) (pkt->u.raw + sizeof(*phy_res) +
656 phy_res->cfg_phy_cnt + len);
657 ampdu_status = le32_to_cpu(rx_pkt_status);
658 } else {
659 if (!il->_4965.last_phy_res_valid) {
660 IL_ERR("MPDU frame without cached PHY data\n");
661 return;
662 }
663 phy_res = &il->_4965.last_phy_res;
664 amsdu = (struct il_rx_mpdu_res_start *)pkt->u.raw;
665 header = (struct ieee80211_hdr *)(pkt->u.raw + sizeof(*amsdu));
666 len = le16_to_cpu(amsdu->byte_count);
667 rx_pkt_status = *(__le32 *) (pkt->u.raw + sizeof(*amsdu) + len);
668 ampdu_status =
669 il4965_translate_rx_status(il, le32_to_cpu(rx_pkt_status));
670 }
671
672 if ((unlikely(phy_res->cfg_phy_cnt > 20))) {
673 D_DROP("dsp size out of range [0,20]: %d\n",
674 phy_res->cfg_phy_cnt);
675 return;
676 }
677
678 if (!(rx_pkt_status & RX_RES_STATUS_NO_CRC32_ERROR) ||
679 !(rx_pkt_status & RX_RES_STATUS_NO_RXE_OVERFLOW)) {
680 D_RX("Bad CRC or FIFO: 0x%08X.\n", le32_to_cpu(rx_pkt_status));
681 return;
682 }
683
684 /* This will be used in several places later */
685 rate_n_flags = le32_to_cpu(phy_res->rate_n_flags);
686
687 /* rx_status carries information about the packet to mac80211 */
688 rx_status.mactime = le64_to_cpu(phy_res->timestamp);
689 rx_status.band =
690 (phy_res->
691 phy_flags & RX_RES_PHY_FLAGS_BAND_24_MSK) ? IEEE80211_BAND_2GHZ :
692 IEEE80211_BAND_5GHZ;
693 rx_status.freq =
694 ieee80211_channel_to_frequency(le16_to_cpu(phy_res->channel),
695 rx_status.band);
696 rx_status.rate_idx =
697 il4965_hwrate_to_mac80211_idx(rate_n_flags, rx_status.band);
698 rx_status.flag = 0;
699
700 /* TSF isn't reliable. In order to allow smooth user experience,
701 * this W/A doesn't propagate it to the mac80211 */
702 /*rx_status.flag |= RX_FLAG_MACTIME_START; */
703
704 il->ucode_beacon_time = le32_to_cpu(phy_res->beacon_time_stamp);
705
706 /* Find max signal strength (dBm) among 3 antenna/receiver chains */
707 rx_status.signal = il4965_calc_rssi(il, phy_res);
708
709 D_STATS("Rssi %d, TSF %llu\n", rx_status.signal,
710 (unsigned long long)rx_status.mactime);
711
712 /*
713 * "antenna number"
714 *
715 * It seems that the antenna field in the phy flags value
716 * is actually a bit field. This is undefined by radiotap,
717 * it wants an actual antenna number but I always get "7"
718 * for most legacy frames I receive indicating that the
719 * same frame was received on all three RX chains.
720 *
721 * I think this field should be removed in favor of a
722 * new 802.11n radiotap field "RX chains" that is defined
723 * as a bitmask.
724 */
725 rx_status.antenna =
726 (le16_to_cpu(phy_res->phy_flags) & RX_RES_PHY_FLAGS_ANTENNA_MSK) >>
727 RX_RES_PHY_FLAGS_ANTENNA_POS;
728
729 /* set the preamble flag if appropriate */
730 if (phy_res->phy_flags & RX_RES_PHY_FLAGS_SHORT_PREAMBLE_MSK)
731 rx_status.flag |= RX_FLAG_SHORTPRE;
732
733 /* Set up the HT phy flags */
734 if (rate_n_flags & RATE_MCS_HT_MSK)
735 rx_status.flag |= RX_FLAG_HT;
736 if (rate_n_flags & RATE_MCS_HT40_MSK)
737 rx_status.flag |= RX_FLAG_40MHZ;
738 if (rate_n_flags & RATE_MCS_SGI_MSK)
739 rx_status.flag |= RX_FLAG_SHORT_GI;
740
741 if (phy_res->phy_flags & RX_RES_PHY_FLAGS_AGG_MSK) {
742 /* We know which subframes of an A-MPDU belong
743 * together since we get a single PHY response
744 * from the firmware for all of them.
745 */
746
747 rx_status.flag |= RX_FLAG_AMPDU_DETAILS;
748 rx_status.ampdu_reference = il->_4965.ampdu_ref;
749 }
750
751 il4965_pass_packet_to_mac80211(il, header, len, ampdu_status, rxb,
752 &rx_status);
753}
754
755/* Cache phy data (Rx signal strength, etc) for HT frame (N_RX_PHY).
756 * This will be used later in il_hdl_rx() for N_RX_MPDU. */
757static void
758il4965_hdl_rx_phy(struct il_priv *il, struct il_rx_buf *rxb)
759{
760 struct il_rx_pkt *pkt = rxb_addr(rxb);
761 il->_4965.last_phy_res_valid = true;
762 il->_4965.ampdu_ref++;
763 memcpy(&il->_4965.last_phy_res, pkt->u.raw,
764 sizeof(struct il_rx_phy_res));
765}
766
767static int
768il4965_get_channels_for_scan(struct il_priv *il, struct ieee80211_vif *vif,
769 enum ieee80211_band band, u8 is_active,
770 u8 n_probes, struct il_scan_channel *scan_ch)
771{
772 struct ieee80211_channel *chan;
773 const struct ieee80211_supported_band *sband;
774 const struct il_channel_info *ch_info;
775 u16 passive_dwell = 0;
776 u16 active_dwell = 0;
777 int added, i;
778 u16 channel;
779
780 sband = il_get_hw_mode(il, band);
781 if (!sband)
782 return 0;
783
784 active_dwell = il_get_active_dwell_time(il, band, n_probes);
785 passive_dwell = il_get_passive_dwell_time(il, band, vif);
786
787 if (passive_dwell <= active_dwell)
788 passive_dwell = active_dwell + 1;
789
790 for (i = 0, added = 0; i < il->scan_request->n_channels; i++) {
791 chan = il->scan_request->channels[i];
792
793 if (chan->band != band)
794 continue;
795
796 channel = chan->hw_value;
797 scan_ch->channel = cpu_to_le16(channel);
798
799 ch_info = il_get_channel_info(il, band, channel);
800 if (!il_is_channel_valid(ch_info)) {
801 D_SCAN("Channel %d is INVALID for this band.\n",
802 channel);
803 continue;
804 }
805
806 if (!is_active || il_is_channel_passive(ch_info) ||
807 (chan->flags & IEEE80211_CHAN_NO_IR))
808 scan_ch->type = SCAN_CHANNEL_TYPE_PASSIVE;
809 else
810 scan_ch->type = SCAN_CHANNEL_TYPE_ACTIVE;
811
812 if (n_probes)
813 scan_ch->type |= IL_SCAN_PROBE_MASK(n_probes);
814
815 scan_ch->active_dwell = cpu_to_le16(active_dwell);
816 scan_ch->passive_dwell = cpu_to_le16(passive_dwell);
817
818 /* Set txpower levels to defaults */
819 scan_ch->dsp_atten = 110;
820
821 /* NOTE: if we were doing 6Mb OFDM for scans we'd use
822 * power level:
823 * scan_ch->tx_gain = ((1 << 5) | (2 << 3)) | 3;
824 */
825 if (band == IEEE80211_BAND_5GHZ)
826 scan_ch->tx_gain = ((1 << 5) | (3 << 3)) | 3;
827 else
828 scan_ch->tx_gain = ((1 << 5) | (5 << 3));
829
830 D_SCAN("Scanning ch=%d prob=0x%X [%s %d]\n", channel,
831 le32_to_cpu(scan_ch->type),
832 (scan_ch->
833 type & SCAN_CHANNEL_TYPE_ACTIVE) ? "ACTIVE" : "PASSIVE",
834 (scan_ch->
835 type & SCAN_CHANNEL_TYPE_ACTIVE) ? active_dwell :
836 passive_dwell);
837
838 scan_ch++;
839 added++;
840 }
841
842 D_SCAN("total channels to scan %d\n", added);
843 return added;
844}
845
846static void
847il4965_toggle_tx_ant(struct il_priv *il, u8 *ant, u8 valid)
848{
849 int i;
850 u8 ind = *ant;
851
852 for (i = 0; i < RATE_ANT_NUM - 1; i++) {
853 ind = (ind + 1) < RATE_ANT_NUM ? ind + 1 : 0;
854 if (valid & BIT(ind)) {
855 *ant = ind;
856 return;
857 }
858 }
859}
860
861int
862il4965_request_scan(struct il_priv *il, struct ieee80211_vif *vif)
863{
864 struct il_host_cmd cmd = {
865 .id = C_SCAN,
866 .len = sizeof(struct il_scan_cmd),
867 .flags = CMD_SIZE_HUGE,
868 };
869 struct il_scan_cmd *scan;
870 u32 rate_flags = 0;
871 u16 cmd_len;
872 u16 rx_chain = 0;
873 enum ieee80211_band band;
874 u8 n_probes = 0;
875 u8 rx_ant = il->hw_params.valid_rx_ant;
876 u8 rate;
877 bool is_active = false;
878 int chan_mod;
879 u8 active_chains;
880 u8 scan_tx_antennas = il->hw_params.valid_tx_ant;
881 int ret;
882
883 lockdep_assert_held(&il->mutex);
884
885 if (!il->scan_cmd) {
886 il->scan_cmd =
887 kmalloc(sizeof(struct il_scan_cmd) + IL_MAX_SCAN_SIZE,
888 GFP_KERNEL);
889 if (!il->scan_cmd) {
890 D_SCAN("fail to allocate memory for scan\n");
891 return -ENOMEM;
892 }
893 }
894 scan = il->scan_cmd;
895 memset(scan, 0, sizeof(struct il_scan_cmd) + IL_MAX_SCAN_SIZE);
896
897 scan->quiet_plcp_th = IL_PLCP_QUIET_THRESH;
898 scan->quiet_time = IL_ACTIVE_QUIET_TIME;
899
900 if (il_is_any_associated(il)) {
901 u16 interval;
902 u32 extra;
903 u32 suspend_time = 100;
904 u32 scan_suspend_time = 100;
905
906 D_INFO("Scanning while associated...\n");
907 interval = vif->bss_conf.beacon_int;
908
909 scan->suspend_time = 0;
910 scan->max_out_time = cpu_to_le32(200 * 1024);
911 if (!interval)
912 interval = suspend_time;
913
914 extra = (suspend_time / interval) << 22;
915 scan_suspend_time =
916 (extra | ((suspend_time % interval) * 1024));
917 scan->suspend_time = cpu_to_le32(scan_suspend_time);
918 D_SCAN("suspend_time 0x%X beacon interval %d\n",
919 scan_suspend_time, interval);
920 }
921
922 if (il->scan_request->n_ssids) {
923 int i, p = 0;
924 D_SCAN("Kicking off active scan\n");
925 for (i = 0; i < il->scan_request->n_ssids; i++) {
926 /* always does wildcard anyway */
927 if (!il->scan_request->ssids[i].ssid_len)
928 continue;
929 scan->direct_scan[p].id = WLAN_EID_SSID;
930 scan->direct_scan[p].len =
931 il->scan_request->ssids[i].ssid_len;
932 memcpy(scan->direct_scan[p].ssid,
933 il->scan_request->ssids[i].ssid,
934 il->scan_request->ssids[i].ssid_len);
935 n_probes++;
936 p++;
937 }
938 is_active = true;
939 } else
940 D_SCAN("Start passive scan.\n");
941
942 scan->tx_cmd.tx_flags = TX_CMD_FLG_SEQ_CTL_MSK;
943 scan->tx_cmd.sta_id = il->hw_params.bcast_id;
944 scan->tx_cmd.stop_time.life_time = TX_CMD_LIFE_TIME_INFINITE;
945
946 switch (il->scan_band) {
947 case IEEE80211_BAND_2GHZ:
948 scan->flags = RXON_FLG_BAND_24G_MSK | RXON_FLG_AUTO_DETECT_MSK;
949 chan_mod =
950 le32_to_cpu(il->active.flags & RXON_FLG_CHANNEL_MODE_MSK) >>
951 RXON_FLG_CHANNEL_MODE_POS;
952 if (chan_mod == CHANNEL_MODE_PURE_40) {
953 rate = RATE_6M_PLCP;
954 } else {
955 rate = RATE_1M_PLCP;
956 rate_flags = RATE_MCS_CCK_MSK;
957 }
958 break;
959 case IEEE80211_BAND_5GHZ:
960 rate = RATE_6M_PLCP;
961 break;
962 default:
963 IL_WARN("Invalid scan band\n");
964 return -EIO;
965 }
966
967 /*
968 * If active scanning is requested but a certain channel is
969 * marked passive, we can do active scanning if we detect
970 * transmissions.
971 *
972 * There is an issue with some firmware versions that triggers
973 * a sysassert on a "good CRC threshold" of zero (== disabled),
974 * on a radar channel even though this means that we should NOT
975 * send probes.
976 *
977 * The "good CRC threshold" is the number of frames that we
978 * need to receive during our dwell time on a channel before
979 * sending out probes -- setting this to a huge value will
980 * mean we never reach it, but at the same time work around
981 * the aforementioned issue. Thus use IL_GOOD_CRC_TH_NEVER
982 * here instead of IL_GOOD_CRC_TH_DISABLED.
983 */
984 scan->good_CRC_th =
985 is_active ? IL_GOOD_CRC_TH_DEFAULT : IL_GOOD_CRC_TH_NEVER;
986
987 band = il->scan_band;
988
989 if (il->cfg->scan_rx_antennas[band])
990 rx_ant = il->cfg->scan_rx_antennas[band];
991
992 il4965_toggle_tx_ant(il, &il->scan_tx_ant[band], scan_tx_antennas);
993 rate_flags |= BIT(il->scan_tx_ant[band]) << RATE_MCS_ANT_POS;
994 scan->tx_cmd.rate_n_flags = cpu_to_le32(rate | rate_flags);
995
996 /* In power save mode use one chain, otherwise use all chains */
997 if (test_bit(S_POWER_PMI, &il->status)) {
998 /* rx_ant has been set to all valid chains previously */
999 active_chains =
1000 rx_ant & ((u8) (il->chain_noise_data.active_chains));
1001 if (!active_chains)
1002 active_chains = rx_ant;
1003
1004 D_SCAN("chain_noise_data.active_chains: %u\n",
1005 il->chain_noise_data.active_chains);
1006
1007 rx_ant = il4965_first_antenna(active_chains);
1008 }
1009
1010 /* MIMO is not used here, but value is required */
1011 rx_chain |= il->hw_params.valid_rx_ant << RXON_RX_CHAIN_VALID_POS;
1012 rx_chain |= rx_ant << RXON_RX_CHAIN_FORCE_MIMO_SEL_POS;
1013 rx_chain |= rx_ant << RXON_RX_CHAIN_FORCE_SEL_POS;
1014 rx_chain |= 0x1 << RXON_RX_CHAIN_DRIVER_FORCE_POS;
1015 scan->rx_chain = cpu_to_le16(rx_chain);
1016
1017 cmd_len =
1018 il_fill_probe_req(il, (struct ieee80211_mgmt *)scan->data,
1019 vif->addr, il->scan_request->ie,
1020 il->scan_request->ie_len,
1021 IL_MAX_SCAN_SIZE - sizeof(*scan));
1022 scan->tx_cmd.len = cpu_to_le16(cmd_len);
1023
1024 scan->filter_flags |=
1025 (RXON_FILTER_ACCEPT_GRP_MSK | RXON_FILTER_BCON_AWARE_MSK);
1026
1027 scan->channel_count =
1028 il4965_get_channels_for_scan(il, vif, band, is_active, n_probes,
1029 (void *)&scan->data[cmd_len]);
1030 if (scan->channel_count == 0) {
1031 D_SCAN("channel count %d\n", scan->channel_count);
1032 return -EIO;
1033 }
1034
1035 cmd.len +=
1036 le16_to_cpu(scan->tx_cmd.len) +
1037 scan->channel_count * sizeof(struct il_scan_channel);
1038 cmd.data = scan;
1039 scan->len = cpu_to_le16(cmd.len);
1040
1041 set_bit(S_SCAN_HW, &il->status);
1042
1043 ret = il_send_cmd_sync(il, &cmd);
1044 if (ret)
1045 clear_bit(S_SCAN_HW, &il->status);
1046
1047 return ret;
1048}
1049
1050int
1051il4965_manage_ibss_station(struct il_priv *il, struct ieee80211_vif *vif,
1052 bool add)
1053{
1054 struct il_vif_priv *vif_priv = (void *)vif->drv_priv;
1055
1056 if (add)
1057 return il4965_add_bssid_station(il, vif->bss_conf.bssid,
1058 &vif_priv->ibss_bssid_sta_id);
1059 return il_remove_station(il, vif_priv->ibss_bssid_sta_id,
1060 vif->bss_conf.bssid);
1061}
1062
1063void
1064il4965_free_tfds_in_queue(struct il_priv *il, int sta_id, int tid, int freed)
1065{
1066 lockdep_assert_held(&il->sta_lock);
1067
1068 if (il->stations[sta_id].tid[tid].tfds_in_queue >= freed)
1069 il->stations[sta_id].tid[tid].tfds_in_queue -= freed;
1070 else {
1071 D_TX("free more than tfds_in_queue (%u:%d)\n",
1072 il->stations[sta_id].tid[tid].tfds_in_queue, freed);
1073 il->stations[sta_id].tid[tid].tfds_in_queue = 0;
1074 }
1075}
1076
1077#define IL_TX_QUEUE_MSK 0xfffff
1078
1079static bool
1080il4965_is_single_rx_stream(struct il_priv *il)
1081{
1082 return il->current_ht_config.smps == IEEE80211_SMPS_STATIC ||
1083 il->current_ht_config.single_chain_sufficient;
1084}
1085
1086#define IL_NUM_RX_CHAINS_MULTIPLE 3
1087#define IL_NUM_RX_CHAINS_SINGLE 2
1088#define IL_NUM_IDLE_CHAINS_DUAL 2
1089#define IL_NUM_IDLE_CHAINS_SINGLE 1
1090
1091/*
1092 * Determine how many receiver/antenna chains to use.
1093 *
1094 * More provides better reception via diversity. Fewer saves power
1095 * at the expense of throughput, but only when not in powersave to
1096 * start with.
1097 *
1098 * MIMO (dual stream) requires at least 2, but works better with 3.
1099 * This does not determine *which* chains to use, just how many.
1100 */
1101static int
1102il4965_get_active_rx_chain_count(struct il_priv *il)
1103{
1104 /* # of Rx chains to use when expecting MIMO. */
1105 if (il4965_is_single_rx_stream(il))
1106 return IL_NUM_RX_CHAINS_SINGLE;
1107 else
1108 return IL_NUM_RX_CHAINS_MULTIPLE;
1109}
1110
1111/*
1112 * When we are in power saving mode, unless device support spatial
1113 * multiplexing power save, use the active count for rx chain count.
1114 */
1115static int
1116il4965_get_idle_rx_chain_count(struct il_priv *il, int active_cnt)
1117{
1118 /* # Rx chains when idling, depending on SMPS mode */
1119 switch (il->current_ht_config.smps) {
1120 case IEEE80211_SMPS_STATIC:
1121 case IEEE80211_SMPS_DYNAMIC:
1122 return IL_NUM_IDLE_CHAINS_SINGLE;
1123 case IEEE80211_SMPS_OFF:
1124 return active_cnt;
1125 default:
1126 WARN(1, "invalid SMPS mode %d", il->current_ht_config.smps);
1127 return active_cnt;
1128 }
1129}
1130
1131/* up to 4 chains */
1132static u8
1133il4965_count_chain_bitmap(u32 chain_bitmap)
1134{
1135 u8 res;
1136 res = (chain_bitmap & BIT(0)) >> 0;
1137 res += (chain_bitmap & BIT(1)) >> 1;
1138 res += (chain_bitmap & BIT(2)) >> 2;
1139 res += (chain_bitmap & BIT(3)) >> 3;
1140 return res;
1141}
1142
1143/**
1144 * il4965_set_rxon_chain - Set up Rx chain usage in "staging" RXON image
1145 *
1146 * Selects how many and which Rx receivers/antennas/chains to use.
1147 * This should not be used for scan command ... it puts data in wrong place.
1148 */
1149void
1150il4965_set_rxon_chain(struct il_priv *il)
1151{
1152 bool is_single = il4965_is_single_rx_stream(il);
1153 bool is_cam = !test_bit(S_POWER_PMI, &il->status);
1154 u8 idle_rx_cnt, active_rx_cnt, valid_rx_cnt;
1155 u32 active_chains;
1156 u16 rx_chain;
1157
1158 /* Tell uCode which antennas are actually connected.
1159 * Before first association, we assume all antennas are connected.
1160 * Just after first association, il4965_chain_noise_calibration()
1161 * checks which antennas actually *are* connected. */
1162 if (il->chain_noise_data.active_chains)
1163 active_chains = il->chain_noise_data.active_chains;
1164 else
1165 active_chains = il->hw_params.valid_rx_ant;
1166
1167 rx_chain = active_chains << RXON_RX_CHAIN_VALID_POS;
1168
1169 /* How many receivers should we use? */
1170 active_rx_cnt = il4965_get_active_rx_chain_count(il);
1171 idle_rx_cnt = il4965_get_idle_rx_chain_count(il, active_rx_cnt);
1172
1173 /* correct rx chain count according hw settings
1174 * and chain noise calibration
1175 */
1176 valid_rx_cnt = il4965_count_chain_bitmap(active_chains);
1177 if (valid_rx_cnt < active_rx_cnt)
1178 active_rx_cnt = valid_rx_cnt;
1179
1180 if (valid_rx_cnt < idle_rx_cnt)
1181 idle_rx_cnt = valid_rx_cnt;
1182
1183 rx_chain |= active_rx_cnt << RXON_RX_CHAIN_MIMO_CNT_POS;
1184 rx_chain |= idle_rx_cnt << RXON_RX_CHAIN_CNT_POS;
1185
1186 il->staging.rx_chain = cpu_to_le16(rx_chain);
1187
1188 if (!is_single && active_rx_cnt >= IL_NUM_RX_CHAINS_SINGLE && is_cam)
1189 il->staging.rx_chain |= RXON_RX_CHAIN_MIMO_FORCE_MSK;
1190 else
1191 il->staging.rx_chain &= ~RXON_RX_CHAIN_MIMO_FORCE_MSK;
1192
1193 D_ASSOC("rx_chain=0x%X active=%d idle=%d\n", il->staging.rx_chain,
1194 active_rx_cnt, idle_rx_cnt);
1195
1196 WARN_ON(active_rx_cnt == 0 || idle_rx_cnt == 0 ||
1197 active_rx_cnt < idle_rx_cnt);
1198}
1199
1200static const char *
1201il4965_get_fh_string(int cmd)
1202{
1203 switch (cmd) {
1204 IL_CMD(FH49_RSCSR_CHNL0_STTS_WPTR_REG);
1205 IL_CMD(FH49_RSCSR_CHNL0_RBDCB_BASE_REG);
1206 IL_CMD(FH49_RSCSR_CHNL0_WPTR);
1207 IL_CMD(FH49_MEM_RCSR_CHNL0_CONFIG_REG);
1208 IL_CMD(FH49_MEM_RSSR_SHARED_CTRL_REG);
1209 IL_CMD(FH49_MEM_RSSR_RX_STATUS_REG);
1210 IL_CMD(FH49_MEM_RSSR_RX_ENABLE_ERR_IRQ2DRV);
1211 IL_CMD(FH49_TSSR_TX_STATUS_REG);
1212 IL_CMD(FH49_TSSR_TX_ERROR_REG);
1213 default:
1214 return "UNKNOWN";
1215 }
1216}
1217
1218int
1219il4965_dump_fh(struct il_priv *il, char **buf, bool display)
1220{
1221 int i;
1222#ifdef CONFIG_IWLEGACY_DEBUG
1223 int pos = 0;
1224 size_t bufsz = 0;
1225#endif
1226 static const u32 fh_tbl[] = {
1227 FH49_RSCSR_CHNL0_STTS_WPTR_REG,
1228 FH49_RSCSR_CHNL0_RBDCB_BASE_REG,
1229 FH49_RSCSR_CHNL0_WPTR,
1230 FH49_MEM_RCSR_CHNL0_CONFIG_REG,
1231 FH49_MEM_RSSR_SHARED_CTRL_REG,
1232 FH49_MEM_RSSR_RX_STATUS_REG,
1233 FH49_MEM_RSSR_RX_ENABLE_ERR_IRQ2DRV,
1234 FH49_TSSR_TX_STATUS_REG,
1235 FH49_TSSR_TX_ERROR_REG
1236 };
1237#ifdef CONFIG_IWLEGACY_DEBUG
1238 if (display) {
1239 bufsz = ARRAY_SIZE(fh_tbl) * 48 + 40;
1240 *buf = kmalloc(bufsz, GFP_KERNEL);
1241 if (!*buf)
1242 return -ENOMEM;
1243 pos +=
1244 scnprintf(*buf + pos, bufsz - pos, "FH register values:\n");
1245 for (i = 0; i < ARRAY_SIZE(fh_tbl); i++) {
1246 pos +=
1247 scnprintf(*buf + pos, bufsz - pos,
1248 " %34s: 0X%08x\n",
1249 il4965_get_fh_string(fh_tbl[i]),
1250 il_rd(il, fh_tbl[i]));
1251 }
1252 return pos;
1253 }
1254#endif
1255 IL_ERR("FH register values:\n");
1256 for (i = 0; i < ARRAY_SIZE(fh_tbl); i++) {
1257 IL_ERR(" %34s: 0X%08x\n", il4965_get_fh_string(fh_tbl[i]),
1258 il_rd(il, fh_tbl[i]));
1259 }
1260 return 0;
1261}
1262
1263static void
1264il4965_hdl_missed_beacon(struct il_priv *il, struct il_rx_buf *rxb)
1265{
1266 struct il_rx_pkt *pkt = rxb_addr(rxb);
1267 struct il_missed_beacon_notif *missed_beacon;
1268
1269 missed_beacon = &pkt->u.missed_beacon;
1270 if (le32_to_cpu(missed_beacon->consecutive_missed_beacons) >
1271 il->missed_beacon_threshold) {
1272 D_CALIB("missed bcn cnsq %d totl %d rcd %d expctd %d\n",
1273 le32_to_cpu(missed_beacon->consecutive_missed_beacons),
1274 le32_to_cpu(missed_beacon->total_missed_becons),
1275 le32_to_cpu(missed_beacon->num_recvd_beacons),
1276 le32_to_cpu(missed_beacon->num_expected_beacons));
1277 if (!test_bit(S_SCANNING, &il->status))
1278 il4965_init_sensitivity(il);
1279 }
1280}
1281
1282/* Calculate noise level, based on measurements during network silence just
1283 * before arriving beacon. This measurement can be done only if we know
1284 * exactly when to expect beacons, therefore only when we're associated. */
1285static void
1286il4965_rx_calc_noise(struct il_priv *il)
1287{
1288 struct stats_rx_non_phy *rx_info;
1289 int num_active_rx = 0;
1290 int total_silence = 0;
1291 int bcn_silence_a, bcn_silence_b, bcn_silence_c;
1292 int last_rx_noise;
1293
1294 rx_info = &(il->_4965.stats.rx.general);
1295 bcn_silence_a =
1296 le32_to_cpu(rx_info->beacon_silence_rssi_a) & IN_BAND_FILTER;
1297 bcn_silence_b =
1298 le32_to_cpu(rx_info->beacon_silence_rssi_b) & IN_BAND_FILTER;
1299 bcn_silence_c =
1300 le32_to_cpu(rx_info->beacon_silence_rssi_c) & IN_BAND_FILTER;
1301
1302 if (bcn_silence_a) {
1303 total_silence += bcn_silence_a;
1304 num_active_rx++;
1305 }
1306 if (bcn_silence_b) {
1307 total_silence += bcn_silence_b;
1308 num_active_rx++;
1309 }
1310 if (bcn_silence_c) {
1311 total_silence += bcn_silence_c;
1312 num_active_rx++;
1313 }
1314
1315 /* Average among active antennas */
1316 if (num_active_rx)
1317 last_rx_noise = (total_silence / num_active_rx) - 107;
1318 else
1319 last_rx_noise = IL_NOISE_MEAS_NOT_AVAILABLE;
1320
1321 D_CALIB("inband silence a %u, b %u, c %u, dBm %d\n", bcn_silence_a,
1322 bcn_silence_b, bcn_silence_c, last_rx_noise);
1323}
1324
1325#ifdef CONFIG_IWLEGACY_DEBUGFS
1326/*
1327 * based on the assumption of all stats counter are in DWORD
1328 * FIXME: This function is for debugging, do not deal with
1329 * the case of counters roll-over.
1330 */
1331static void
1332il4965_accumulative_stats(struct il_priv *il, __le32 * stats)
1333{
1334 int i, size;
1335 __le32 *prev_stats;
1336 u32 *accum_stats;
1337 u32 *delta, *max_delta;
1338 struct stats_general_common *general, *accum_general;
1339 struct stats_tx *tx, *accum_tx;
1340
1341 prev_stats = (__le32 *) &il->_4965.stats;
1342 accum_stats = (u32 *) &il->_4965.accum_stats;
1343 size = sizeof(struct il_notif_stats);
1344 general = &il->_4965.stats.general.common;
1345 accum_general = &il->_4965.accum_stats.general.common;
1346 tx = &il->_4965.stats.tx;
1347 accum_tx = &il->_4965.accum_stats.tx;
1348 delta = (u32 *) &il->_4965.delta_stats;
1349 max_delta = (u32 *) &il->_4965.max_delta;
1350
1351 for (i = sizeof(__le32); i < size;
1352 i +=
1353 sizeof(__le32), stats++, prev_stats++, delta++, max_delta++,
1354 accum_stats++) {
1355 if (le32_to_cpu(*stats) > le32_to_cpu(*prev_stats)) {
1356 *delta =
1357 (le32_to_cpu(*stats) - le32_to_cpu(*prev_stats));
1358 *accum_stats += *delta;
1359 if (*delta > *max_delta)
1360 *max_delta = *delta;
1361 }
1362 }
1363
1364 /* reset accumulative stats for "no-counter" type stats */
1365 accum_general->temperature = general->temperature;
1366 accum_general->ttl_timestamp = general->ttl_timestamp;
1367}
1368#endif
1369
1370static void
1371il4965_hdl_stats(struct il_priv *il, struct il_rx_buf *rxb)
1372{
1373 const int recalib_seconds = 60;
1374 bool change;
1375 struct il_rx_pkt *pkt = rxb_addr(rxb);
1376
1377 D_RX("Statistics notification received (%d vs %d).\n",
1378 (int)sizeof(struct il_notif_stats),
1379 le32_to_cpu(pkt->len_n_flags) & IL_RX_FRAME_SIZE_MSK);
1380
1381 change =
1382 ((il->_4965.stats.general.common.temperature !=
1383 pkt->u.stats.general.common.temperature) ||
1384 ((il->_4965.stats.flag & STATS_REPLY_FLG_HT40_MODE_MSK) !=
1385 (pkt->u.stats.flag & STATS_REPLY_FLG_HT40_MODE_MSK)));
1386#ifdef CONFIG_IWLEGACY_DEBUGFS
1387 il4965_accumulative_stats(il, (__le32 *) &pkt->u.stats);
1388#endif
1389
1390 /* TODO: reading some of stats is unneeded */
1391 memcpy(&il->_4965.stats, &pkt->u.stats, sizeof(il->_4965.stats));
1392
1393 set_bit(S_STATS, &il->status);
1394
1395 /*
1396 * Reschedule the stats timer to occur in recalib_seconds to ensure
1397 * we get a thermal update even if the uCode doesn't give us one
1398 */
1399 mod_timer(&il->stats_periodic,
1400 jiffies + msecs_to_jiffies(recalib_seconds * 1000));
1401
1402 if (unlikely(!test_bit(S_SCANNING, &il->status)) &&
1403 (pkt->hdr.cmd == N_STATS)) {
1404 il4965_rx_calc_noise(il);
1405 queue_work(il->workqueue, &il->run_time_calib_work);
1406 }
1407
1408 if (change)
1409 il4965_temperature_calib(il);
1410}
1411
1412static void
1413il4965_hdl_c_stats(struct il_priv *il, struct il_rx_buf *rxb)
1414{
1415 struct il_rx_pkt *pkt = rxb_addr(rxb);
1416
1417 if (le32_to_cpu(pkt->u.stats.flag) & UCODE_STATS_CLEAR_MSK) {
1418#ifdef CONFIG_IWLEGACY_DEBUGFS
1419 memset(&il->_4965.accum_stats, 0,
1420 sizeof(struct il_notif_stats));
1421 memset(&il->_4965.delta_stats, 0,
1422 sizeof(struct il_notif_stats));
1423 memset(&il->_4965.max_delta, 0, sizeof(struct il_notif_stats));
1424#endif
1425 D_RX("Statistics have been cleared\n");
1426 }
1427 il4965_hdl_stats(il, rxb);
1428}
1429
1430
1431/*
1432 * mac80211 queues, ACs, hardware queues, FIFOs.
1433 *
1434 * Cf. http://wireless.kernel.org/en/developers/Documentation/mac80211/queues
1435 *
1436 * Mac80211 uses the following numbers, which we get as from it
1437 * by way of skb_get_queue_mapping(skb):
1438 *
1439 * VO 0
1440 * VI 1
1441 * BE 2
1442 * BK 3
1443 *
1444 *
1445 * Regular (not A-MPDU) frames are put into hardware queues corresponding
1446 * to the FIFOs, see comments in iwl-prph.h. Aggregated frames get their
1447 * own queue per aggregation session (RA/TID combination), such queues are
1448 * set up to map into FIFOs too, for which we need an AC->FIFO mapping. In
1449 * order to map frames to the right queue, we also need an AC->hw queue
1450 * mapping. This is implemented here.
1451 *
1452 * Due to the way hw queues are set up (by the hw specific modules like
1453 * 4965.c), the AC->hw queue mapping is the identity
1454 * mapping.
1455 */
1456
1457static const u8 tid_to_ac[] = {
1458 IEEE80211_AC_BE,
1459 IEEE80211_AC_BK,
1460 IEEE80211_AC_BK,
1461 IEEE80211_AC_BE,
1462 IEEE80211_AC_VI,
1463 IEEE80211_AC_VI,
1464 IEEE80211_AC_VO,
1465 IEEE80211_AC_VO
1466};
1467
1468static inline int
1469il4965_get_ac_from_tid(u16 tid)
1470{
1471 if (likely(tid < ARRAY_SIZE(tid_to_ac)))
1472 return tid_to_ac[tid];
1473
1474 /* no support for TIDs 8-15 yet */
1475 return -EINVAL;
1476}
1477
1478static inline int
1479il4965_get_fifo_from_tid(u16 tid)
1480{
1481 const u8 ac_to_fifo[] = {
1482 IL_TX_FIFO_VO,
1483 IL_TX_FIFO_VI,
1484 IL_TX_FIFO_BE,
1485 IL_TX_FIFO_BK,
1486 };
1487
1488 if (likely(tid < ARRAY_SIZE(tid_to_ac)))
1489 return ac_to_fifo[tid_to_ac[tid]];
1490
1491 /* no support for TIDs 8-15 yet */
1492 return -EINVAL;
1493}
1494
1495/*
1496 * handle build C_TX command notification.
1497 */
1498static void
1499il4965_tx_cmd_build_basic(struct il_priv *il, struct sk_buff *skb,
1500 struct il_tx_cmd *tx_cmd,
1501 struct ieee80211_tx_info *info,
1502 struct ieee80211_hdr *hdr, u8 std_id)
1503{
1504 __le16 fc = hdr->frame_control;
1505 __le32 tx_flags = tx_cmd->tx_flags;
1506
1507 tx_cmd->stop_time.life_time = TX_CMD_LIFE_TIME_INFINITE;
1508 if (!(info->flags & IEEE80211_TX_CTL_NO_ACK)) {
1509 tx_flags |= TX_CMD_FLG_ACK_MSK;
1510 if (ieee80211_is_mgmt(fc))
1511 tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK;
1512 if (ieee80211_is_probe_resp(fc) &&
1513 !(le16_to_cpu(hdr->seq_ctrl) & 0xf))
1514 tx_flags |= TX_CMD_FLG_TSF_MSK;
1515 } else {
1516 tx_flags &= (~TX_CMD_FLG_ACK_MSK);
1517 tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK;
1518 }
1519
1520 if (ieee80211_is_back_req(fc))
1521 tx_flags |= TX_CMD_FLG_ACK_MSK | TX_CMD_FLG_IMM_BA_RSP_MASK;
1522
1523 tx_cmd->sta_id = std_id;
1524 if (ieee80211_has_morefrags(fc))
1525 tx_flags |= TX_CMD_FLG_MORE_FRAG_MSK;
1526
1527 if (ieee80211_is_data_qos(fc)) {
1528 u8 *qc = ieee80211_get_qos_ctl(hdr);
1529 tx_cmd->tid_tspec = qc[0] & 0xf;
1530 tx_flags &= ~TX_CMD_FLG_SEQ_CTL_MSK;
1531 } else {
1532 tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK;
1533 }
1534
1535 il_tx_cmd_protection(il, info, fc, &tx_flags);
1536
1537 tx_flags &= ~(TX_CMD_FLG_ANT_SEL_MSK);
1538 if (ieee80211_is_mgmt(fc)) {
1539 if (ieee80211_is_assoc_req(fc) || ieee80211_is_reassoc_req(fc))
1540 tx_cmd->timeout.pm_frame_timeout = cpu_to_le16(3);
1541 else
1542 tx_cmd->timeout.pm_frame_timeout = cpu_to_le16(2);
1543 } else {
1544 tx_cmd->timeout.pm_frame_timeout = 0;
1545 }
1546
1547 tx_cmd->driver_txop = 0;
1548 tx_cmd->tx_flags = tx_flags;
1549 tx_cmd->next_frame_len = 0;
1550}
1551
1552static void
1553il4965_tx_cmd_build_rate(struct il_priv *il,
1554 struct il_tx_cmd *tx_cmd,
1555 struct ieee80211_tx_info *info,
1556 struct ieee80211_sta *sta,
1557 __le16 fc)
1558{
1559 const u8 rts_retry_limit = 60;
1560 u32 rate_flags;
1561 int rate_idx;
1562 u8 data_retry_limit;
1563 u8 rate_plcp;
1564
1565 /* Set retry limit on DATA packets and Probe Responses */
1566 if (ieee80211_is_probe_resp(fc))
1567 data_retry_limit = 3;
1568 else
1569 data_retry_limit = IL4965_DEFAULT_TX_RETRY;
1570 tx_cmd->data_retry_limit = data_retry_limit;
1571 /* Set retry limit on RTS packets */
1572 tx_cmd->rts_retry_limit = min(data_retry_limit, rts_retry_limit);
1573
1574 /* DATA packets will use the uCode station table for rate/antenna
1575 * selection */
1576 if (ieee80211_is_data(fc)) {
1577 tx_cmd->initial_rate_idx = 0;
1578 tx_cmd->tx_flags |= TX_CMD_FLG_STA_RATE_MSK;
1579 return;
1580 }
1581
1582 /**
1583 * If the current TX rate stored in mac80211 has the MCS bit set, it's
1584 * not really a TX rate. Thus, we use the lowest supported rate for
1585 * this band. Also use the lowest supported rate if the stored rate
1586 * idx is invalid.
1587 */
1588 rate_idx = info->control.rates[0].idx;
1589 if ((info->control.rates[0].flags & IEEE80211_TX_RC_MCS) || rate_idx < 0
1590 || rate_idx > RATE_COUNT_LEGACY)
1591 rate_idx = rate_lowest_index(&il->bands[info->band], sta);
1592 /* For 5 GHZ band, remap mac80211 rate indices into driver indices */
1593 if (info->band == IEEE80211_BAND_5GHZ)
1594 rate_idx += IL_FIRST_OFDM_RATE;
1595 /* Get PLCP rate for tx_cmd->rate_n_flags */
1596 rate_plcp = il_rates[rate_idx].plcp;
1597 /* Zero out flags for this packet */
1598 rate_flags = 0;
1599
1600 /* Set CCK flag as needed */
1601 if (rate_idx >= IL_FIRST_CCK_RATE && rate_idx <= IL_LAST_CCK_RATE)
1602 rate_flags |= RATE_MCS_CCK_MSK;
1603
1604 /* Set up antennas */
1605 il4965_toggle_tx_ant(il, &il->mgmt_tx_ant, il->hw_params.valid_tx_ant);
1606 rate_flags |= BIT(il->mgmt_tx_ant) << RATE_MCS_ANT_POS;
1607
1608 /* Set the rate in the TX cmd */
1609 tx_cmd->rate_n_flags = cpu_to_le32(rate_plcp | rate_flags);
1610}
1611
1612static void
1613il4965_tx_cmd_build_hwcrypto(struct il_priv *il, struct ieee80211_tx_info *info,
1614 struct il_tx_cmd *tx_cmd, struct sk_buff *skb_frag,
1615 int sta_id)
1616{
1617 struct ieee80211_key_conf *keyconf = info->control.hw_key;
1618
1619 switch (keyconf->cipher) {
1620 case WLAN_CIPHER_SUITE_CCMP:
1621 tx_cmd->sec_ctl = TX_CMD_SEC_CCM;
1622 memcpy(tx_cmd->key, keyconf->key, keyconf->keylen);
1623 if (info->flags & IEEE80211_TX_CTL_AMPDU)
1624 tx_cmd->tx_flags |= TX_CMD_FLG_AGG_CCMP_MSK;
1625 D_TX("tx_cmd with AES hwcrypto\n");
1626 break;
1627
1628 case WLAN_CIPHER_SUITE_TKIP:
1629 tx_cmd->sec_ctl = TX_CMD_SEC_TKIP;
1630 ieee80211_get_tkip_p2k(keyconf, skb_frag, tx_cmd->key);
1631 D_TX("tx_cmd with tkip hwcrypto\n");
1632 break;
1633
1634 case WLAN_CIPHER_SUITE_WEP104:
1635 tx_cmd->sec_ctl |= TX_CMD_SEC_KEY128;
1636 /* fall through */
1637 case WLAN_CIPHER_SUITE_WEP40:
1638 tx_cmd->sec_ctl |=
1639 (TX_CMD_SEC_WEP | (keyconf->keyidx & TX_CMD_SEC_MSK) <<
1640 TX_CMD_SEC_SHIFT);
1641
1642 memcpy(&tx_cmd->key[3], keyconf->key, keyconf->keylen);
1643
1644 D_TX("Configuring packet for WEP encryption " "with key %d\n",
1645 keyconf->keyidx);
1646 break;
1647
1648 default:
1649 IL_ERR("Unknown encode cipher %x\n", keyconf->cipher);
1650 break;
1651 }
1652}
1653
1654/*
1655 * start C_TX command process
1656 */
1657int
1658il4965_tx_skb(struct il_priv *il,
1659 struct ieee80211_sta *sta,
1660 struct sk_buff *skb)
1661{
1662 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
1663 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
1664 struct il_station_priv *sta_priv = NULL;
1665 struct il_tx_queue *txq;
1666 struct il_queue *q;
1667 struct il_device_cmd *out_cmd;
1668 struct il_cmd_meta *out_meta;
1669 struct il_tx_cmd *tx_cmd;
1670 int txq_id;
1671 dma_addr_t phys_addr;
1672 dma_addr_t txcmd_phys;
1673 dma_addr_t scratch_phys;
1674 u16 len, firstlen, secondlen;
1675 u16 seq_number = 0;
1676 __le16 fc;
1677 u8 hdr_len;
1678 u8 sta_id;
1679 u8 wait_write_ptr = 0;
1680 u8 tid = 0;
1681 u8 *qc = NULL;
1682 unsigned long flags;
1683 bool is_agg = false;
1684
1685 spin_lock_irqsave(&il->lock, flags);
1686 if (il_is_rfkill(il)) {
1687 D_DROP("Dropping - RF KILL\n");
1688 goto drop_unlock;
1689 }
1690
1691 fc = hdr->frame_control;
1692
1693#ifdef CONFIG_IWLEGACY_DEBUG
1694 if (ieee80211_is_auth(fc))
1695 D_TX("Sending AUTH frame\n");
1696 else if (ieee80211_is_assoc_req(fc))
1697 D_TX("Sending ASSOC frame\n");
1698 else if (ieee80211_is_reassoc_req(fc))
1699 D_TX("Sending REASSOC frame\n");
1700#endif
1701
1702 hdr_len = ieee80211_hdrlen(fc);
1703
1704 /* For management frames use broadcast id to do not break aggregation */
1705 if (!ieee80211_is_data(fc))
1706 sta_id = il->hw_params.bcast_id;
1707 else {
1708 /* Find idx into station table for destination station */
1709 sta_id = il_sta_id_or_broadcast(il, sta);
1710
1711 if (sta_id == IL_INVALID_STATION) {
1712 D_DROP("Dropping - INVALID STATION: %pM\n", hdr->addr1);
1713 goto drop_unlock;
1714 }
1715 }
1716
1717 D_TX("station Id %d\n", sta_id);
1718
1719 if (sta)
1720 sta_priv = (void *)sta->drv_priv;
1721
1722 if (sta_priv && sta_priv->asleep &&
1723 (info->flags & IEEE80211_TX_CTL_NO_PS_BUFFER)) {
1724 /*
1725 * This sends an asynchronous command to the device,
1726 * but we can rely on it being processed before the
1727 * next frame is processed -- and the next frame to
1728 * this station is the one that will consume this
1729 * counter.
1730 * For now set the counter to just 1 since we do not
1731 * support uAPSD yet.
1732 */
1733 il4965_sta_modify_sleep_tx_count(il, sta_id, 1);
1734 }
1735
1736 /* FIXME: remove me ? */
1737 WARN_ON_ONCE(info->flags & IEEE80211_TX_CTL_SEND_AFTER_DTIM);
1738
1739 /* Access category (AC) is also the queue number */
1740 txq_id = skb_get_queue_mapping(skb);
1741
1742 /* irqs already disabled/saved above when locking il->lock */
1743 spin_lock(&il->sta_lock);
1744
1745 if (ieee80211_is_data_qos(fc)) {
1746 qc = ieee80211_get_qos_ctl(hdr);
1747 tid = qc[0] & IEEE80211_QOS_CTL_TID_MASK;
1748 if (WARN_ON_ONCE(tid >= MAX_TID_COUNT)) {
1749 spin_unlock(&il->sta_lock);
1750 goto drop_unlock;
1751 }
1752 seq_number = il->stations[sta_id].tid[tid].seq_number;
1753 seq_number &= IEEE80211_SCTL_SEQ;
1754 hdr->seq_ctrl =
1755 hdr->seq_ctrl & cpu_to_le16(IEEE80211_SCTL_FRAG);
1756 hdr->seq_ctrl |= cpu_to_le16(seq_number);
1757 seq_number += 0x10;
1758 /* aggregation is on for this <sta,tid> */
1759 if (info->flags & IEEE80211_TX_CTL_AMPDU &&
1760 il->stations[sta_id].tid[tid].agg.state == IL_AGG_ON) {
1761 txq_id = il->stations[sta_id].tid[tid].agg.txq_id;
1762 is_agg = true;
1763 }
1764 }
1765
1766 txq = &il->txq[txq_id];
1767 q = &txq->q;
1768
1769 if (unlikely(il_queue_space(q) < q->high_mark)) {
1770 spin_unlock(&il->sta_lock);
1771 goto drop_unlock;
1772 }
1773
1774 if (ieee80211_is_data_qos(fc)) {
1775 il->stations[sta_id].tid[tid].tfds_in_queue++;
1776 if (!ieee80211_has_morefrags(fc))
1777 il->stations[sta_id].tid[tid].seq_number = seq_number;
1778 }
1779
1780 spin_unlock(&il->sta_lock);
1781
1782 txq->skbs[q->write_ptr] = skb;
1783
1784 /* Set up first empty entry in queue's array of Tx/cmd buffers */
1785 out_cmd = txq->cmd[q->write_ptr];
1786 out_meta = &txq->meta[q->write_ptr];
1787 tx_cmd = &out_cmd->cmd.tx;
1788 memset(&out_cmd->hdr, 0, sizeof(out_cmd->hdr));
1789 memset(tx_cmd, 0, sizeof(struct il_tx_cmd));
1790
1791 /*
1792 * Set up the Tx-command (not MAC!) header.
1793 * Store the chosen Tx queue and TFD idx within the sequence field;
1794 * after Tx, uCode's Tx response will return this value so driver can
1795 * locate the frame within the tx queue and do post-tx processing.
1796 */
1797 out_cmd->hdr.cmd = C_TX;
1798 out_cmd->hdr.sequence =
1799 cpu_to_le16((u16)
1800 (QUEUE_TO_SEQ(txq_id) | IDX_TO_SEQ(q->write_ptr)));
1801
1802 /* Copy MAC header from skb into command buffer */
1803 memcpy(tx_cmd->hdr, hdr, hdr_len);
1804
1805 /* Total # bytes to be transmitted */
1806 tx_cmd->len = cpu_to_le16((u16) skb->len);
1807
1808 if (info->control.hw_key)
1809 il4965_tx_cmd_build_hwcrypto(il, info, tx_cmd, skb, sta_id);
1810
1811 /* TODO need this for burst mode later on */
1812 il4965_tx_cmd_build_basic(il, skb, tx_cmd, info, hdr, sta_id);
1813
1814 il4965_tx_cmd_build_rate(il, tx_cmd, info, sta, fc);
1815
1816 /*
1817 * Use the first empty entry in this queue's command buffer array
1818 * to contain the Tx command and MAC header concatenated together
1819 * (payload data will be in another buffer).
1820 * Size of this varies, due to varying MAC header length.
1821 * If end is not dword aligned, we'll have 2 extra bytes at the end
1822 * of the MAC header (device reads on dword boundaries).
1823 * We'll tell device about this padding later.
1824 */
1825 len = sizeof(struct il_tx_cmd) + sizeof(struct il_cmd_header) + hdr_len;
1826 firstlen = (len + 3) & ~3;
1827
1828 /* Tell NIC about any 2-byte padding after MAC header */
1829 if (firstlen != len)
1830 tx_cmd->tx_flags |= TX_CMD_FLG_MH_PAD_MSK;
1831
1832 /* Physical address of this Tx command's header (not MAC header!),
1833 * within command buffer array. */
1834 txcmd_phys =
1835 pci_map_single(il->pci_dev, &out_cmd->hdr, firstlen,
1836 PCI_DMA_BIDIRECTIONAL);
1837 if (unlikely(pci_dma_mapping_error(il->pci_dev, txcmd_phys)))
1838 goto drop_unlock;
1839
1840 /* Set up TFD's 2nd entry to point directly to remainder of skb,
1841 * if any (802.11 null frames have no payload). */
1842 secondlen = skb->len - hdr_len;
1843 if (secondlen > 0) {
1844 phys_addr =
1845 pci_map_single(il->pci_dev, skb->data + hdr_len, secondlen,
1846 PCI_DMA_TODEVICE);
1847 if (unlikely(pci_dma_mapping_error(il->pci_dev, phys_addr)))
1848 goto drop_unlock;
1849 }
1850
1851 /* Add buffer containing Tx command and MAC(!) header to TFD's
1852 * first entry */
1853 il->ops->txq_attach_buf_to_tfd(il, txq, txcmd_phys, firstlen, 1, 0);
1854 dma_unmap_addr_set(out_meta, mapping, txcmd_phys);
1855 dma_unmap_len_set(out_meta, len, firstlen);
1856 if (secondlen)
1857 il->ops->txq_attach_buf_to_tfd(il, txq, phys_addr, secondlen,
1858 0, 0);
1859
1860 if (!ieee80211_has_morefrags(hdr->frame_control)) {
1861 txq->need_update = 1;
1862 } else {
1863 wait_write_ptr = 1;
1864 txq->need_update = 0;
1865 }
1866
1867 scratch_phys =
1868 txcmd_phys + sizeof(struct il_cmd_header) +
1869 offsetof(struct il_tx_cmd, scratch);
1870
1871 /* take back ownership of DMA buffer to enable update */
1872 pci_dma_sync_single_for_cpu(il->pci_dev, txcmd_phys, firstlen,
1873 PCI_DMA_BIDIRECTIONAL);
1874 tx_cmd->dram_lsb_ptr = cpu_to_le32(scratch_phys);
1875 tx_cmd->dram_msb_ptr = il_get_dma_hi_addr(scratch_phys);
1876
1877 il_update_stats(il, true, fc, skb->len);
1878
1879 D_TX("sequence nr = 0X%x\n", le16_to_cpu(out_cmd->hdr.sequence));
1880 D_TX("tx_flags = 0X%x\n", le32_to_cpu(tx_cmd->tx_flags));
1881 il_print_hex_dump(il, IL_DL_TX, (u8 *) tx_cmd, sizeof(*tx_cmd));
1882 il_print_hex_dump(il, IL_DL_TX, (u8 *) tx_cmd->hdr, hdr_len);
1883
1884 /* Set up entry for this TFD in Tx byte-count array */
1885 if (info->flags & IEEE80211_TX_CTL_AMPDU)
1886 il->ops->txq_update_byte_cnt_tbl(il, txq, le16_to_cpu(tx_cmd->len));
1887
1888 pci_dma_sync_single_for_device(il->pci_dev, txcmd_phys, firstlen,
1889 PCI_DMA_BIDIRECTIONAL);
1890
1891 /* Tell device the write idx *just past* this latest filled TFD */
1892 q->write_ptr = il_queue_inc_wrap(q->write_ptr, q->n_bd);
1893 il_txq_update_write_ptr(il, txq);
1894 spin_unlock_irqrestore(&il->lock, flags);
1895
1896 /*
1897 * At this point the frame is "transmitted" successfully
1898 * and we will get a TX status notification eventually,
1899 * regardless of the value of ret. "ret" only indicates
1900 * whether or not we should update the write pointer.
1901 */
1902
1903 /*
1904 * Avoid atomic ops if it isn't an associated client.
1905 * Also, if this is a packet for aggregation, don't
1906 * increase the counter because the ucode will stop
1907 * aggregation queues when their respective station
1908 * goes to sleep.
1909 */
1910 if (sta_priv && sta_priv->client && !is_agg)
1911 atomic_inc(&sta_priv->pending_frames);
1912
1913 if (il_queue_space(q) < q->high_mark && il->mac80211_registered) {
1914 if (wait_write_ptr) {
1915 spin_lock_irqsave(&il->lock, flags);
1916 txq->need_update = 1;
1917 il_txq_update_write_ptr(il, txq);
1918 spin_unlock_irqrestore(&il->lock, flags);
1919 } else {
1920 il_stop_queue(il, txq);
1921 }
1922 }
1923
1924 return 0;
1925
1926drop_unlock:
1927 spin_unlock_irqrestore(&il->lock, flags);
1928 return -1;
1929}
1930
1931static inline int
1932il4965_alloc_dma_ptr(struct il_priv *il, struct il_dma_ptr *ptr, size_t size)
1933{
1934 ptr->addr = dma_alloc_coherent(&il->pci_dev->dev, size, &ptr->dma,
1935 GFP_KERNEL);
1936 if (!ptr->addr)
1937 return -ENOMEM;
1938 ptr->size = size;
1939 return 0;
1940}
1941
1942static inline void
1943il4965_free_dma_ptr(struct il_priv *il, struct il_dma_ptr *ptr)
1944{
1945 if (unlikely(!ptr->addr))
1946 return;
1947
1948 dma_free_coherent(&il->pci_dev->dev, ptr->size, ptr->addr, ptr->dma);
1949 memset(ptr, 0, sizeof(*ptr));
1950}
1951
1952/**
1953 * il4965_hw_txq_ctx_free - Free TXQ Context
1954 *
1955 * Destroy all TX DMA queues and structures
1956 */
1957void
1958il4965_hw_txq_ctx_free(struct il_priv *il)
1959{
1960 int txq_id;
1961
1962 /* Tx queues */
1963 if (il->txq) {
1964 for (txq_id = 0; txq_id < il->hw_params.max_txq_num; txq_id++)
1965 if (txq_id == il->cmd_queue)
1966 il_cmd_queue_free(il);
1967 else
1968 il_tx_queue_free(il, txq_id);
1969 }
1970 il4965_free_dma_ptr(il, &il->kw);
1971
1972 il4965_free_dma_ptr(il, &il->scd_bc_tbls);
1973
1974 /* free tx queue structure */
1975 il_free_txq_mem(il);
1976}
1977
1978/**
1979 * il4965_txq_ctx_alloc - allocate TX queue context
1980 * Allocate all Tx DMA structures and initialize them
1981 *
1982 * @param il
1983 * @return error code
1984 */
1985int
1986il4965_txq_ctx_alloc(struct il_priv *il)
1987{
1988 int ret, txq_id;
1989 unsigned long flags;
1990
1991 /* Free all tx/cmd queues and keep-warm buffer */
1992 il4965_hw_txq_ctx_free(il);
1993
1994 ret =
1995 il4965_alloc_dma_ptr(il, &il->scd_bc_tbls,
1996 il->hw_params.scd_bc_tbls_size);
1997 if (ret) {
1998 IL_ERR("Scheduler BC Table allocation failed\n");
1999 goto error_bc_tbls;
2000 }
2001 /* Alloc keep-warm buffer */
2002 ret = il4965_alloc_dma_ptr(il, &il->kw, IL_KW_SIZE);
2003 if (ret) {
2004 IL_ERR("Keep Warm allocation failed\n");
2005 goto error_kw;
2006 }
2007
2008 /* allocate tx queue structure */
2009 ret = il_alloc_txq_mem(il);
2010 if (ret)
2011 goto error;
2012
2013 spin_lock_irqsave(&il->lock, flags);
2014
2015 /* Turn off all Tx DMA fifos */
2016 il4965_txq_set_sched(il, 0);
2017
2018 /* Tell NIC where to find the "keep warm" buffer */
2019 il_wr(il, FH49_KW_MEM_ADDR_REG, il->kw.dma >> 4);
2020
2021 spin_unlock_irqrestore(&il->lock, flags);
2022
2023 /* Alloc and init all Tx queues, including the command queue (#4/#9) */
2024 for (txq_id = 0; txq_id < il->hw_params.max_txq_num; txq_id++) {
2025 ret = il_tx_queue_init(il, txq_id);
2026 if (ret) {
2027 IL_ERR("Tx %d queue init failed\n", txq_id);
2028 goto error;
2029 }
2030 }
2031
2032 return ret;
2033
2034error:
2035 il4965_hw_txq_ctx_free(il);
2036 il4965_free_dma_ptr(il, &il->kw);
2037error_kw:
2038 il4965_free_dma_ptr(il, &il->scd_bc_tbls);
2039error_bc_tbls:
2040 return ret;
2041}
2042
2043void
2044il4965_txq_ctx_reset(struct il_priv *il)
2045{
2046 int txq_id;
2047 unsigned long flags;
2048
2049 spin_lock_irqsave(&il->lock, flags);
2050
2051 /* Turn off all Tx DMA fifos */
2052 il4965_txq_set_sched(il, 0);
2053 /* Tell NIC where to find the "keep warm" buffer */
2054 il_wr(il, FH49_KW_MEM_ADDR_REG, il->kw.dma >> 4);
2055
2056 spin_unlock_irqrestore(&il->lock, flags);
2057
2058 /* Alloc and init all Tx queues, including the command queue (#4) */
2059 for (txq_id = 0; txq_id < il->hw_params.max_txq_num; txq_id++)
2060 il_tx_queue_reset(il, txq_id);
2061}
2062
2063static void
2064il4965_txq_ctx_unmap(struct il_priv *il)
2065{
2066 int txq_id;
2067
2068 if (!il->txq)
2069 return;
2070
2071 /* Unmap DMA from host system and free skb's */
2072 for (txq_id = 0; txq_id < il->hw_params.max_txq_num; txq_id++)
2073 if (txq_id == il->cmd_queue)
2074 il_cmd_queue_unmap(il);
2075 else
2076 il_tx_queue_unmap(il, txq_id);
2077}
2078
2079/**
2080 * il4965_txq_ctx_stop - Stop all Tx DMA channels
2081 */
2082void
2083il4965_txq_ctx_stop(struct il_priv *il)
2084{
2085 int ch, ret;
2086
2087 _il_wr_prph(il, IL49_SCD_TXFACT, 0);
2088
2089 /* Stop each Tx DMA channel, and wait for it to be idle */
2090 for (ch = 0; ch < il->hw_params.dma_chnl_num; ch++) {
2091 _il_wr(il, FH49_TCSR_CHNL_TX_CONFIG_REG(ch), 0x0);
2092 ret =
2093 _il_poll_bit(il, FH49_TSSR_TX_STATUS_REG,
2094 FH49_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(ch),
2095 FH49_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(ch),
2096 1000);
2097 if (ret < 0)
2098 IL_ERR("Timeout stopping DMA channel %d [0x%08x]",
2099 ch, _il_rd(il, FH49_TSSR_TX_STATUS_REG));
2100 }
2101}
2102
2103/*
2104 * Find first available (lowest unused) Tx Queue, mark it "active".
2105 * Called only when finding queue for aggregation.
2106 * Should never return anything < 7, because they should already
2107 * be in use as EDCA AC (0-3), Command (4), reserved (5, 6)
2108 */
2109static int
2110il4965_txq_ctx_activate_free(struct il_priv *il)
2111{
2112 int txq_id;
2113
2114 for (txq_id = 0; txq_id < il->hw_params.max_txq_num; txq_id++)
2115 if (!test_and_set_bit(txq_id, &il->txq_ctx_active_msk))
2116 return txq_id;
2117 return -1;
2118}
2119
2120/**
2121 * il4965_tx_queue_stop_scheduler - Stop queue, but keep configuration
2122 */
2123static void
2124il4965_tx_queue_stop_scheduler(struct il_priv *il, u16 txq_id)
2125{
2126 /* Simply stop the queue, but don't change any configuration;
2127 * the SCD_ACT_EN bit is the write-enable mask for the ACTIVE bit. */
2128 il_wr_prph(il, IL49_SCD_QUEUE_STATUS_BITS(txq_id),
2129 (0 << IL49_SCD_QUEUE_STTS_REG_POS_ACTIVE) |
2130 (1 << IL49_SCD_QUEUE_STTS_REG_POS_SCD_ACT_EN));
2131}
2132
2133/**
2134 * il4965_tx_queue_set_q2ratid - Map unique receiver/tid combination to a queue
2135 */
2136static int
2137il4965_tx_queue_set_q2ratid(struct il_priv *il, u16 ra_tid, u16 txq_id)
2138{
2139 u32 tbl_dw_addr;
2140 u32 tbl_dw;
2141 u16 scd_q2ratid;
2142
2143 scd_q2ratid = ra_tid & IL_SCD_QUEUE_RA_TID_MAP_RATID_MSK;
2144
2145 tbl_dw_addr =
2146 il->scd_base_addr + IL49_SCD_TRANSLATE_TBL_OFFSET_QUEUE(txq_id);
2147
2148 tbl_dw = il_read_targ_mem(il, tbl_dw_addr);
2149
2150 if (txq_id & 0x1)
2151 tbl_dw = (scd_q2ratid << 16) | (tbl_dw & 0x0000FFFF);
2152 else
2153 tbl_dw = scd_q2ratid | (tbl_dw & 0xFFFF0000);
2154
2155 il_write_targ_mem(il, tbl_dw_addr, tbl_dw);
2156
2157 return 0;
2158}
2159
2160/**
2161 * il4965_tx_queue_agg_enable - Set up & enable aggregation for selected queue
2162 *
2163 * NOTE: txq_id must be greater than IL49_FIRST_AMPDU_QUEUE,
2164 * i.e. it must be one of the higher queues used for aggregation
2165 */
2166static int
2167il4965_txq_agg_enable(struct il_priv *il, int txq_id, int tx_fifo, int sta_id,
2168 int tid, u16 ssn_idx)
2169{
2170 unsigned long flags;
2171 u16 ra_tid;
2172 int ret;
2173
2174 if ((IL49_FIRST_AMPDU_QUEUE > txq_id) ||
2175 (IL49_FIRST_AMPDU_QUEUE +
2176 il->cfg->num_of_ampdu_queues <= txq_id)) {
2177 IL_WARN("queue number out of range: %d, must be %d to %d\n",
2178 txq_id, IL49_FIRST_AMPDU_QUEUE,
2179 IL49_FIRST_AMPDU_QUEUE +
2180 il->cfg->num_of_ampdu_queues - 1);
2181 return -EINVAL;
2182 }
2183
2184 ra_tid = BUILD_RAxTID(sta_id, tid);
2185
2186 /* Modify device's station table to Tx this TID */
2187 ret = il4965_sta_tx_modify_enable_tid(il, sta_id, tid);
2188 if (ret)
2189 return ret;
2190
2191 spin_lock_irqsave(&il->lock, flags);
2192
2193 /* Stop this Tx queue before configuring it */
2194 il4965_tx_queue_stop_scheduler(il, txq_id);
2195
2196 /* Map receiver-address / traffic-ID to this queue */
2197 il4965_tx_queue_set_q2ratid(il, ra_tid, txq_id);
2198
2199 /* Set this queue as a chain-building queue */
2200 il_set_bits_prph(il, IL49_SCD_QUEUECHAIN_SEL, (1 << txq_id));
2201
2202 /* Place first TFD at idx corresponding to start sequence number.
2203 * Assumes that ssn_idx is valid (!= 0xFFF) */
2204 il->txq[txq_id].q.read_ptr = (ssn_idx & 0xff);
2205 il->txq[txq_id].q.write_ptr = (ssn_idx & 0xff);
2206 il4965_set_wr_ptrs(il, txq_id, ssn_idx);
2207
2208 /* Set up Tx win size and frame limit for this queue */
2209 il_write_targ_mem(il,
2210 il->scd_base_addr +
2211 IL49_SCD_CONTEXT_QUEUE_OFFSET(txq_id),
2212 (SCD_WIN_SIZE << IL49_SCD_QUEUE_CTX_REG1_WIN_SIZE_POS)
2213 & IL49_SCD_QUEUE_CTX_REG1_WIN_SIZE_MSK);
2214
2215 il_write_targ_mem(il,
2216 il->scd_base_addr +
2217 IL49_SCD_CONTEXT_QUEUE_OFFSET(txq_id) + sizeof(u32),
2218 (SCD_FRAME_LIMIT <<
2219 IL49_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS) &
2220 IL49_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK);
2221
2222 il_set_bits_prph(il, IL49_SCD_INTERRUPT_MASK, (1 << txq_id));
2223
2224 /* Set up Status area in SRAM, map to Tx DMA/FIFO, activate the queue */
2225 il4965_tx_queue_set_status(il, &il->txq[txq_id], tx_fifo, 1);
2226
2227 spin_unlock_irqrestore(&il->lock, flags);
2228
2229 return 0;
2230}
2231
2232int
2233il4965_tx_agg_start(struct il_priv *il, struct ieee80211_vif *vif,
2234 struct ieee80211_sta *sta, u16 tid, u16 * ssn)
2235{
2236 int sta_id;
2237 int tx_fifo;
2238 int txq_id;
2239 int ret;
2240 unsigned long flags;
2241 struct il_tid_data *tid_data;
2242
2243 /* FIXME: warning if tx fifo not found ? */
2244 tx_fifo = il4965_get_fifo_from_tid(tid);
2245 if (unlikely(tx_fifo < 0))
2246 return tx_fifo;
2247
2248 D_HT("%s on ra = %pM tid = %d\n", __func__, sta->addr, tid);
2249
2250 sta_id = il_sta_id(sta);
2251 if (sta_id == IL_INVALID_STATION) {
2252 IL_ERR("Start AGG on invalid station\n");
2253 return -ENXIO;
2254 }
2255 if (unlikely(tid >= MAX_TID_COUNT))
2256 return -EINVAL;
2257
2258 if (il->stations[sta_id].tid[tid].agg.state != IL_AGG_OFF) {
2259 IL_ERR("Start AGG when state is not IL_AGG_OFF !\n");
2260 return -ENXIO;
2261 }
2262
2263 txq_id = il4965_txq_ctx_activate_free(il);
2264 if (txq_id == -1) {
2265 IL_ERR("No free aggregation queue available\n");
2266 return -ENXIO;
2267 }
2268
2269 spin_lock_irqsave(&il->sta_lock, flags);
2270 tid_data = &il->stations[sta_id].tid[tid];
2271 *ssn = IEEE80211_SEQ_TO_SN(tid_data->seq_number);
2272 tid_data->agg.txq_id = txq_id;
2273 il_set_swq_id(&il->txq[txq_id], il4965_get_ac_from_tid(tid), txq_id);
2274 spin_unlock_irqrestore(&il->sta_lock, flags);
2275
2276 ret = il4965_txq_agg_enable(il, txq_id, tx_fifo, sta_id, tid, *ssn);
2277 if (ret)
2278 return ret;
2279
2280 spin_lock_irqsave(&il->sta_lock, flags);
2281 tid_data = &il->stations[sta_id].tid[tid];
2282 if (tid_data->tfds_in_queue == 0) {
2283 D_HT("HW queue is empty\n");
2284 tid_data->agg.state = IL_AGG_ON;
2285 ieee80211_start_tx_ba_cb_irqsafe(vif, sta->addr, tid);
2286 } else {
2287 D_HT("HW queue is NOT empty: %d packets in HW queue\n",
2288 tid_data->tfds_in_queue);
2289 tid_data->agg.state = IL_EMPTYING_HW_QUEUE_ADDBA;
2290 }
2291 spin_unlock_irqrestore(&il->sta_lock, flags);
2292 return ret;
2293}
2294
2295/**
2296 * txq_id must be greater than IL49_FIRST_AMPDU_QUEUE
2297 * il->lock must be held by the caller
2298 */
2299static int
2300il4965_txq_agg_disable(struct il_priv *il, u16 txq_id, u16 ssn_idx, u8 tx_fifo)
2301{
2302 if ((IL49_FIRST_AMPDU_QUEUE > txq_id) ||
2303 (IL49_FIRST_AMPDU_QUEUE +
2304 il->cfg->num_of_ampdu_queues <= txq_id)) {
2305 IL_WARN("queue number out of range: %d, must be %d to %d\n",
2306 txq_id, IL49_FIRST_AMPDU_QUEUE,
2307 IL49_FIRST_AMPDU_QUEUE +
2308 il->cfg->num_of_ampdu_queues - 1);
2309 return -EINVAL;
2310 }
2311
2312 il4965_tx_queue_stop_scheduler(il, txq_id);
2313
2314 il_clear_bits_prph(il, IL49_SCD_QUEUECHAIN_SEL, (1 << txq_id));
2315
2316 il->txq[txq_id].q.read_ptr = (ssn_idx & 0xff);
2317 il->txq[txq_id].q.write_ptr = (ssn_idx & 0xff);
2318 /* supposes that ssn_idx is valid (!= 0xFFF) */
2319 il4965_set_wr_ptrs(il, txq_id, ssn_idx);
2320
2321 il_clear_bits_prph(il, IL49_SCD_INTERRUPT_MASK, (1 << txq_id));
2322 il_txq_ctx_deactivate(il, txq_id);
2323 il4965_tx_queue_set_status(il, &il->txq[txq_id], tx_fifo, 0);
2324
2325 return 0;
2326}
2327
2328int
2329il4965_tx_agg_stop(struct il_priv *il, struct ieee80211_vif *vif,
2330 struct ieee80211_sta *sta, u16 tid)
2331{
2332 int tx_fifo_id, txq_id, sta_id, ssn;
2333 struct il_tid_data *tid_data;
2334 int write_ptr, read_ptr;
2335 unsigned long flags;
2336
2337 /* FIXME: warning if tx_fifo_id not found ? */
2338 tx_fifo_id = il4965_get_fifo_from_tid(tid);
2339 if (unlikely(tx_fifo_id < 0))
2340 return tx_fifo_id;
2341
2342 sta_id = il_sta_id(sta);
2343
2344 if (sta_id == IL_INVALID_STATION) {
2345 IL_ERR("Invalid station for AGG tid %d\n", tid);
2346 return -ENXIO;
2347 }
2348
2349 spin_lock_irqsave(&il->sta_lock, flags);
2350
2351 tid_data = &il->stations[sta_id].tid[tid];
2352 ssn = (tid_data->seq_number & IEEE80211_SCTL_SEQ) >> 4;
2353 txq_id = tid_data->agg.txq_id;
2354
2355 switch (il->stations[sta_id].tid[tid].agg.state) {
2356 case IL_EMPTYING_HW_QUEUE_ADDBA:
2357 /*
2358 * This can happen if the peer stops aggregation
2359 * again before we've had a chance to drain the
2360 * queue we selected previously, i.e. before the
2361 * session was really started completely.
2362 */
2363 D_HT("AGG stop before setup done\n");
2364 goto turn_off;
2365 case IL_AGG_ON:
2366 break;
2367 default:
2368 IL_WARN("Stopping AGG while state not ON or starting\n");
2369 }
2370
2371 write_ptr = il->txq[txq_id].q.write_ptr;
2372 read_ptr = il->txq[txq_id].q.read_ptr;
2373
2374 /* The queue is not empty */
2375 if (write_ptr != read_ptr) {
2376 D_HT("Stopping a non empty AGG HW QUEUE\n");
2377 il->stations[sta_id].tid[tid].agg.state =
2378 IL_EMPTYING_HW_QUEUE_DELBA;
2379 spin_unlock_irqrestore(&il->sta_lock, flags);
2380 return 0;
2381 }
2382
2383 D_HT("HW queue is empty\n");
2384turn_off:
2385 il->stations[sta_id].tid[tid].agg.state = IL_AGG_OFF;
2386
2387 /* do not restore/save irqs */
2388 spin_unlock(&il->sta_lock);
2389 spin_lock(&il->lock);
2390
2391 /*
2392 * the only reason this call can fail is queue number out of range,
2393 * which can happen if uCode is reloaded and all the station
2394 * information are lost. if it is outside the range, there is no need
2395 * to deactivate the uCode queue, just return "success" to allow
2396 * mac80211 to clean up it own data.
2397 */
2398 il4965_txq_agg_disable(il, txq_id, ssn, tx_fifo_id);
2399 spin_unlock_irqrestore(&il->lock, flags);
2400
2401 ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid);
2402
2403 return 0;
2404}
2405
2406int
2407il4965_txq_check_empty(struct il_priv *il, int sta_id, u8 tid, int txq_id)
2408{
2409 struct il_queue *q = &il->txq[txq_id].q;
2410 u8 *addr = il->stations[sta_id].sta.sta.addr;
2411 struct il_tid_data *tid_data = &il->stations[sta_id].tid[tid];
2412
2413 lockdep_assert_held(&il->sta_lock);
2414
2415 switch (il->stations[sta_id].tid[tid].agg.state) {
2416 case IL_EMPTYING_HW_QUEUE_DELBA:
2417 /* We are reclaiming the last packet of the */
2418 /* aggregated HW queue */
2419 if (txq_id == tid_data->agg.txq_id &&
2420 q->read_ptr == q->write_ptr) {
2421 u16 ssn = IEEE80211_SEQ_TO_SN(tid_data->seq_number);
2422 int tx_fifo = il4965_get_fifo_from_tid(tid);
2423 D_HT("HW queue empty: continue DELBA flow\n");
2424 il4965_txq_agg_disable(il, txq_id, ssn, tx_fifo);
2425 tid_data->agg.state = IL_AGG_OFF;
2426 ieee80211_stop_tx_ba_cb_irqsafe(il->vif, addr, tid);
2427 }
2428 break;
2429 case IL_EMPTYING_HW_QUEUE_ADDBA:
2430 /* We are reclaiming the last packet of the queue */
2431 if (tid_data->tfds_in_queue == 0) {
2432 D_HT("HW queue empty: continue ADDBA flow\n");
2433 tid_data->agg.state = IL_AGG_ON;
2434 ieee80211_start_tx_ba_cb_irqsafe(il->vif, addr, tid);
2435 }
2436 break;
2437 }
2438
2439 return 0;
2440}
2441
2442static void
2443il4965_non_agg_tx_status(struct il_priv *il, const u8 *addr1)
2444{
2445 struct ieee80211_sta *sta;
2446 struct il_station_priv *sta_priv;
2447
2448 rcu_read_lock();
2449 sta = ieee80211_find_sta(il->vif, addr1);
2450 if (sta) {
2451 sta_priv = (void *)sta->drv_priv;
2452 /* avoid atomic ops if this isn't a client */
2453 if (sta_priv->client &&
2454 atomic_dec_return(&sta_priv->pending_frames) == 0)
2455 ieee80211_sta_block_awake(il->hw, sta, false);
2456 }
2457 rcu_read_unlock();
2458}
2459
2460static void
2461il4965_tx_status(struct il_priv *il, struct sk_buff *skb, bool is_agg)
2462{
2463 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
2464
2465 if (!is_agg)
2466 il4965_non_agg_tx_status(il, hdr->addr1);
2467
2468 ieee80211_tx_status_irqsafe(il->hw, skb);
2469}
2470
2471int
2472il4965_tx_queue_reclaim(struct il_priv *il, int txq_id, int idx)
2473{
2474 struct il_tx_queue *txq = &il->txq[txq_id];
2475 struct il_queue *q = &txq->q;
2476 int nfreed = 0;
2477 struct ieee80211_hdr *hdr;
2478 struct sk_buff *skb;
2479
2480 if (idx >= q->n_bd || il_queue_used(q, idx) == 0) {
2481 IL_ERR("Read idx for DMA queue txq id (%d), idx %d, "
2482 "is out of range [0-%d] %d %d.\n", txq_id, idx, q->n_bd,
2483 q->write_ptr, q->read_ptr);
2484 return 0;
2485 }
2486
2487 for (idx = il_queue_inc_wrap(idx, q->n_bd); q->read_ptr != idx;
2488 q->read_ptr = il_queue_inc_wrap(q->read_ptr, q->n_bd)) {
2489
2490 skb = txq->skbs[txq->q.read_ptr];
2491
2492 if (WARN_ON_ONCE(skb == NULL))
2493 continue;
2494
2495 hdr = (struct ieee80211_hdr *) skb->data;
2496 if (ieee80211_is_data_qos(hdr->frame_control))
2497 nfreed++;
2498
2499 il4965_tx_status(il, skb, txq_id >= IL4965_FIRST_AMPDU_QUEUE);
2500
2501 txq->skbs[txq->q.read_ptr] = NULL;
2502 il->ops->txq_free_tfd(il, txq);
2503 }
2504 return nfreed;
2505}
2506
2507/**
2508 * il4965_tx_status_reply_compressed_ba - Update tx status from block-ack
2509 *
2510 * Go through block-ack's bitmap of ACK'd frames, update driver's record of
2511 * ACK vs. not. This gets sent to mac80211, then to rate scaling algo.
2512 */
2513static int
2514il4965_tx_status_reply_compressed_ba(struct il_priv *il, struct il_ht_agg *agg,
2515 struct il_compressed_ba_resp *ba_resp)
2516{
2517 int i, sh, ack;
2518 u16 seq_ctl = le16_to_cpu(ba_resp->seq_ctl);
2519 u16 scd_flow = le16_to_cpu(ba_resp->scd_flow);
2520 int successes = 0;
2521 struct ieee80211_tx_info *info;
2522 u64 bitmap, sent_bitmap;
2523
2524 if (unlikely(!agg->wait_for_ba)) {
2525 if (unlikely(ba_resp->bitmap))
2526 IL_ERR("Received BA when not expected\n");
2527 return -EINVAL;
2528 }
2529
2530 /* Mark that the expected block-ack response arrived */
2531 agg->wait_for_ba = 0;
2532 D_TX_REPLY("BA %d %d\n", agg->start_idx, ba_resp->seq_ctl);
2533
2534 /* Calculate shift to align block-ack bits with our Tx win bits */
2535 sh = agg->start_idx - SEQ_TO_IDX(seq_ctl >> 4);
2536 if (sh < 0) /* tbw something is wrong with indices */
2537 sh += 0x100;
2538
2539 if (agg->frame_count > (64 - sh)) {
2540 D_TX_REPLY("more frames than bitmap size");
2541 return -1;
2542 }
2543
2544 /* don't use 64-bit values for now */
2545 bitmap = le64_to_cpu(ba_resp->bitmap) >> sh;
2546
2547 /* check for success or failure according to the
2548 * transmitted bitmap and block-ack bitmap */
2549 sent_bitmap = bitmap & agg->bitmap;
2550
2551 /* For each frame attempted in aggregation,
2552 * update driver's record of tx frame's status. */
2553 i = 0;
2554 while (sent_bitmap) {
2555 ack = sent_bitmap & 1ULL;
2556 successes += ack;
2557 D_TX_REPLY("%s ON i=%d idx=%d raw=%d\n", ack ? "ACK" : "NACK",
2558 i, (agg->start_idx + i) & 0xff, agg->start_idx + i);
2559 sent_bitmap >>= 1;
2560 ++i;
2561 }
2562
2563 D_TX_REPLY("Bitmap %llx\n", (unsigned long long)bitmap);
2564
2565 info = IEEE80211_SKB_CB(il->txq[scd_flow].skbs[agg->start_idx]);
2566 memset(&info->status, 0, sizeof(info->status));
2567 info->flags |= IEEE80211_TX_STAT_ACK;
2568 info->flags |= IEEE80211_TX_STAT_AMPDU;
2569 info->status.ampdu_ack_len = successes;
2570 info->status.ampdu_len = agg->frame_count;
2571 il4965_hwrate_to_tx_control(il, agg->rate_n_flags, info);
2572
2573 return 0;
2574}
2575
2576static inline bool
2577il4965_is_tx_success(u32 status)
2578{
2579 status &= TX_STATUS_MSK;
2580 return (status == TX_STATUS_SUCCESS || status == TX_STATUS_DIRECT_DONE);
2581}
2582
2583static u8
2584il4965_find_station(struct il_priv *il, const u8 *addr)
2585{
2586 int i;
2587 int start = 0;
2588 int ret = IL_INVALID_STATION;
2589 unsigned long flags;
2590
2591 if (il->iw_mode == NL80211_IFTYPE_ADHOC)
2592 start = IL_STA_ID;
2593
2594 if (is_broadcast_ether_addr(addr))
2595 return il->hw_params.bcast_id;
2596
2597 spin_lock_irqsave(&il->sta_lock, flags);
2598 for (i = start; i < il->hw_params.max_stations; i++)
2599 if (il->stations[i].used &&
2600 ether_addr_equal(il->stations[i].sta.sta.addr, addr)) {
2601 ret = i;
2602 goto out;
2603 }
2604
2605 D_ASSOC("can not find STA %pM total %d\n", addr, il->num_stations);
2606
2607out:
2608 /*
2609 * It may be possible that more commands interacting with stations
2610 * arrive before we completed processing the adding of
2611 * station
2612 */
2613 if (ret != IL_INVALID_STATION &&
2614 (!(il->stations[ret].used & IL_STA_UCODE_ACTIVE) ||
2615 ((il->stations[ret].used & IL_STA_UCODE_ACTIVE) &&
2616 (il->stations[ret].used & IL_STA_UCODE_INPROGRESS)))) {
2617 IL_ERR("Requested station info for sta %d before ready.\n",
2618 ret);
2619 ret = IL_INVALID_STATION;
2620 }
2621 spin_unlock_irqrestore(&il->sta_lock, flags);
2622 return ret;
2623}
2624
2625static int
2626il4965_get_ra_sta_id(struct il_priv *il, struct ieee80211_hdr *hdr)
2627{
2628 if (il->iw_mode == NL80211_IFTYPE_STATION)
2629 return IL_AP_ID;
2630 else {
2631 u8 *da = ieee80211_get_DA(hdr);
2632
2633 return il4965_find_station(il, da);
2634 }
2635}
2636
2637static inline u32
2638il4965_get_scd_ssn(struct il4965_tx_resp *tx_resp)
2639{
2640 return le32_to_cpup(&tx_resp->u.status +
2641 tx_resp->frame_count) & IEEE80211_MAX_SN;
2642}
2643
2644static inline u32
2645il4965_tx_status_to_mac80211(u32 status)
2646{
2647 status &= TX_STATUS_MSK;
2648
2649 switch (status) {
2650 case TX_STATUS_SUCCESS:
2651 case TX_STATUS_DIRECT_DONE:
2652 return IEEE80211_TX_STAT_ACK;
2653 case TX_STATUS_FAIL_DEST_PS:
2654 return IEEE80211_TX_STAT_TX_FILTERED;
2655 default:
2656 return 0;
2657 }
2658}
2659
2660/**
2661 * il4965_tx_status_reply_tx - Handle Tx response for frames in aggregation queue
2662 */
2663static int
2664il4965_tx_status_reply_tx(struct il_priv *il, struct il_ht_agg *agg,
2665 struct il4965_tx_resp *tx_resp, int txq_id,
2666 u16 start_idx)
2667{
2668 u16 status;
2669 struct agg_tx_status *frame_status = tx_resp->u.agg_status;
2670 struct ieee80211_tx_info *info = NULL;
2671 struct ieee80211_hdr *hdr = NULL;
2672 u32 rate_n_flags = le32_to_cpu(tx_resp->rate_n_flags);
2673 int i, sh, idx;
2674 u16 seq;
2675 if (agg->wait_for_ba)
2676 D_TX_REPLY("got tx response w/o block-ack\n");
2677
2678 agg->frame_count = tx_resp->frame_count;
2679 agg->start_idx = start_idx;
2680 agg->rate_n_flags = rate_n_flags;
2681 agg->bitmap = 0;
2682
2683 /* num frames attempted by Tx command */
2684 if (agg->frame_count == 1) {
2685 /* Only one frame was attempted; no block-ack will arrive */
2686 status = le16_to_cpu(frame_status[0].status);
2687 idx = start_idx;
2688
2689 D_TX_REPLY("FrameCnt = %d, StartIdx=%d idx=%d\n",
2690 agg->frame_count, agg->start_idx, idx);
2691
2692 info = IEEE80211_SKB_CB(il->txq[txq_id].skbs[idx]);
2693 info->status.rates[0].count = tx_resp->failure_frame + 1;
2694 info->flags &= ~IEEE80211_TX_CTL_AMPDU;
2695 info->flags |= il4965_tx_status_to_mac80211(status);
2696 il4965_hwrate_to_tx_control(il, rate_n_flags, info);
2697
2698 D_TX_REPLY("1 Frame 0x%x failure :%d\n", status & 0xff,
2699 tx_resp->failure_frame);
2700 D_TX_REPLY("Rate Info rate_n_flags=%x\n", rate_n_flags);
2701
2702 agg->wait_for_ba = 0;
2703 } else {
2704 /* Two or more frames were attempted; expect block-ack */
2705 u64 bitmap = 0;
2706 int start = agg->start_idx;
2707 struct sk_buff *skb;
2708
2709 /* Construct bit-map of pending frames within Tx win */
2710 for (i = 0; i < agg->frame_count; i++) {
2711 u16 sc;
2712 status = le16_to_cpu(frame_status[i].status);
2713 seq = le16_to_cpu(frame_status[i].sequence);
2714 idx = SEQ_TO_IDX(seq);
2715 txq_id = SEQ_TO_QUEUE(seq);
2716
2717 if (status &
2718 (AGG_TX_STATE_FEW_BYTES_MSK |
2719 AGG_TX_STATE_ABORT_MSK))
2720 continue;
2721
2722 D_TX_REPLY("FrameCnt = %d, txq_id=%d idx=%d\n",
2723 agg->frame_count, txq_id, idx);
2724
2725 skb = il->txq[txq_id].skbs[idx];
2726 if (WARN_ON_ONCE(skb == NULL))
2727 return -1;
2728 hdr = (struct ieee80211_hdr *) skb->data;
2729
2730 sc = le16_to_cpu(hdr->seq_ctrl);
2731 if (idx != (IEEE80211_SEQ_TO_SN(sc) & 0xff)) {
2732 IL_ERR("BUG_ON idx doesn't match seq control"
2733 " idx=%d, seq_idx=%d, seq=%d\n", idx,
2734 IEEE80211_SEQ_TO_SN(sc), hdr->seq_ctrl);
2735 return -1;
2736 }
2737
2738 D_TX_REPLY("AGG Frame i=%d idx %d seq=%d\n", i, idx,
2739 IEEE80211_SEQ_TO_SN(sc));
2740
2741 sh = idx - start;
2742 if (sh > 64) {
2743 sh = (start - idx) + 0xff;
2744 bitmap = bitmap << sh;
2745 sh = 0;
2746 start = idx;
2747 } else if (sh < -64)
2748 sh = 0xff - (start - idx);
2749 else if (sh < 0) {
2750 sh = start - idx;
2751 start = idx;
2752 bitmap = bitmap << sh;
2753 sh = 0;
2754 }
2755 bitmap |= 1ULL << sh;
2756 D_TX_REPLY("start=%d bitmap=0x%llx\n", start,
2757 (unsigned long long)bitmap);
2758 }
2759
2760 agg->bitmap = bitmap;
2761 agg->start_idx = start;
2762 D_TX_REPLY("Frames %d start_idx=%d bitmap=0x%llx\n",
2763 agg->frame_count, agg->start_idx,
2764 (unsigned long long)agg->bitmap);
2765
2766 if (bitmap)
2767 agg->wait_for_ba = 1;
2768 }
2769 return 0;
2770}
2771
2772/**
2773 * il4965_hdl_tx - Handle standard (non-aggregation) Tx response
2774 */
2775static void
2776il4965_hdl_tx(struct il_priv *il, struct il_rx_buf *rxb)
2777{
2778 struct il_rx_pkt *pkt = rxb_addr(rxb);
2779 u16 sequence = le16_to_cpu(pkt->hdr.sequence);
2780 int txq_id = SEQ_TO_QUEUE(sequence);
2781 int idx = SEQ_TO_IDX(sequence);
2782 struct il_tx_queue *txq = &il->txq[txq_id];
2783 struct sk_buff *skb;
2784 struct ieee80211_hdr *hdr;
2785 struct ieee80211_tx_info *info;
2786 struct il4965_tx_resp *tx_resp = (void *)&pkt->u.raw[0];
2787 u32 status = le32_to_cpu(tx_resp->u.status);
2788 int uninitialized_var(tid);
2789 int sta_id;
2790 int freed;
2791 u8 *qc = NULL;
2792 unsigned long flags;
2793
2794 if (idx >= txq->q.n_bd || il_queue_used(&txq->q, idx) == 0) {
2795 IL_ERR("Read idx for DMA queue txq_id (%d) idx %d "
2796 "is out of range [0-%d] %d %d\n", txq_id, idx,
2797 txq->q.n_bd, txq->q.write_ptr, txq->q.read_ptr);
2798 return;
2799 }
2800
2801 txq->time_stamp = jiffies;
2802
2803 skb = txq->skbs[txq->q.read_ptr];
2804 info = IEEE80211_SKB_CB(skb);
2805 memset(&info->status, 0, sizeof(info->status));
2806
2807 hdr = (struct ieee80211_hdr *) skb->data;
2808 if (ieee80211_is_data_qos(hdr->frame_control)) {
2809 qc = ieee80211_get_qos_ctl(hdr);
2810 tid = qc[0] & 0xf;
2811 }
2812
2813 sta_id = il4965_get_ra_sta_id(il, hdr);
2814 if (txq->sched_retry && unlikely(sta_id == IL_INVALID_STATION)) {
2815 IL_ERR("Station not known\n");
2816 return;
2817 }
2818
2819 /*
2820 * Firmware will not transmit frame on passive channel, if it not yet
2821 * received some valid frame on that channel. When this error happen
2822 * we have to wait until firmware will unblock itself i.e. when we
2823 * note received beacon or other frame. We unblock queues in
2824 * il4965_pass_packet_to_mac80211 or in il_mac_bss_info_changed.
2825 */
2826 if (unlikely((status & TX_STATUS_MSK) == TX_STATUS_FAIL_PASSIVE_NO_RX) &&
2827 il->iw_mode == NL80211_IFTYPE_STATION) {
2828 il_stop_queues_by_reason(il, IL_STOP_REASON_PASSIVE);
2829 D_INFO("Stopped queues - RX waiting on passive channel\n");
2830 }
2831
2832 spin_lock_irqsave(&il->sta_lock, flags);
2833 if (txq->sched_retry) {
2834 const u32 scd_ssn = il4965_get_scd_ssn(tx_resp);
2835 struct il_ht_agg *agg = NULL;
2836 WARN_ON(!qc);
2837
2838 agg = &il->stations[sta_id].tid[tid].agg;
2839
2840 il4965_tx_status_reply_tx(il, agg, tx_resp, txq_id, idx);
2841
2842 /* check if BAR is needed */
2843 if (tx_resp->frame_count == 1 &&
2844 !il4965_is_tx_success(status))
2845 info->flags |= IEEE80211_TX_STAT_AMPDU_NO_BACK;
2846
2847 if (txq->q.read_ptr != (scd_ssn & 0xff)) {
2848 idx = il_queue_dec_wrap(scd_ssn & 0xff, txq->q.n_bd);
2849 D_TX_REPLY("Retry scheduler reclaim scd_ssn "
2850 "%d idx %d\n", scd_ssn, idx);
2851 freed = il4965_tx_queue_reclaim(il, txq_id, idx);
2852 if (qc)
2853 il4965_free_tfds_in_queue(il, sta_id, tid,
2854 freed);
2855
2856 if (il->mac80211_registered &&
2857 il_queue_space(&txq->q) > txq->q.low_mark &&
2858 agg->state != IL_EMPTYING_HW_QUEUE_DELBA)
2859 il_wake_queue(il, txq);
2860 }
2861 } else {
2862 info->status.rates[0].count = tx_resp->failure_frame + 1;
2863 info->flags |= il4965_tx_status_to_mac80211(status);
2864 il4965_hwrate_to_tx_control(il,
2865 le32_to_cpu(tx_resp->rate_n_flags),
2866 info);
2867
2868 D_TX_REPLY("TXQ %d status %s (0x%08x) "
2869 "rate_n_flags 0x%x retries %d\n", txq_id,
2870 il4965_get_tx_fail_reason(status), status,
2871 le32_to_cpu(tx_resp->rate_n_flags),
2872 tx_resp->failure_frame);
2873
2874 freed = il4965_tx_queue_reclaim(il, txq_id, idx);
2875 if (qc && likely(sta_id != IL_INVALID_STATION))
2876 il4965_free_tfds_in_queue(il, sta_id, tid, freed);
2877 else if (sta_id == IL_INVALID_STATION)
2878 D_TX_REPLY("Station not known\n");
2879
2880 if (il->mac80211_registered &&
2881 il_queue_space(&txq->q) > txq->q.low_mark)
2882 il_wake_queue(il, txq);
2883 }
2884 if (qc && likely(sta_id != IL_INVALID_STATION))
2885 il4965_txq_check_empty(il, sta_id, tid, txq_id);
2886
2887 il4965_check_abort_status(il, tx_resp->frame_count, status);
2888
2889 spin_unlock_irqrestore(&il->sta_lock, flags);
2890}
2891
2892/**
2893 * translate ucode response to mac80211 tx status control values
2894 */
2895void
2896il4965_hwrate_to_tx_control(struct il_priv *il, u32 rate_n_flags,
2897 struct ieee80211_tx_info *info)
2898{
2899 struct ieee80211_tx_rate *r = &info->status.rates[0];
2900
2901 info->status.antenna =
2902 ((rate_n_flags & RATE_MCS_ANT_ABC_MSK) >> RATE_MCS_ANT_POS);
2903 if (rate_n_flags & RATE_MCS_HT_MSK)
2904 r->flags |= IEEE80211_TX_RC_MCS;
2905 if (rate_n_flags & RATE_MCS_GF_MSK)
2906 r->flags |= IEEE80211_TX_RC_GREEN_FIELD;
2907 if (rate_n_flags & RATE_MCS_HT40_MSK)
2908 r->flags |= IEEE80211_TX_RC_40_MHZ_WIDTH;
2909 if (rate_n_flags & RATE_MCS_DUP_MSK)
2910 r->flags |= IEEE80211_TX_RC_DUP_DATA;
2911 if (rate_n_flags & RATE_MCS_SGI_MSK)
2912 r->flags |= IEEE80211_TX_RC_SHORT_GI;
2913 r->idx = il4965_hwrate_to_mac80211_idx(rate_n_flags, info->band);
2914}
2915
2916/**
2917 * il4965_hdl_compressed_ba - Handler for N_COMPRESSED_BA
2918 *
2919 * Handles block-acknowledge notification from device, which reports success
2920 * of frames sent via aggregation.
2921 */
2922static void
2923il4965_hdl_compressed_ba(struct il_priv *il, struct il_rx_buf *rxb)
2924{
2925 struct il_rx_pkt *pkt = rxb_addr(rxb);
2926 struct il_compressed_ba_resp *ba_resp = &pkt->u.compressed_ba;
2927 struct il_tx_queue *txq = NULL;
2928 struct il_ht_agg *agg;
2929 int idx;
2930 int sta_id;
2931 int tid;
2932 unsigned long flags;
2933
2934 /* "flow" corresponds to Tx queue */
2935 u16 scd_flow = le16_to_cpu(ba_resp->scd_flow);
2936
2937 /* "ssn" is start of block-ack Tx win, corresponds to idx
2938 * (in Tx queue's circular buffer) of first TFD/frame in win */
2939 u16 ba_resp_scd_ssn = le16_to_cpu(ba_resp->scd_ssn);
2940
2941 if (scd_flow >= il->hw_params.max_txq_num) {
2942 IL_ERR("BUG_ON scd_flow is bigger than number of queues\n");
2943 return;
2944 }
2945
2946 txq = &il->txq[scd_flow];
2947 sta_id = ba_resp->sta_id;
2948 tid = ba_resp->tid;
2949 agg = &il->stations[sta_id].tid[tid].agg;
2950 if (unlikely(agg->txq_id != scd_flow)) {
2951 /*
2952 * FIXME: this is a uCode bug which need to be addressed,
2953 * log the information and return for now!
2954 * since it is possible happen very often and in order
2955 * not to fill the syslog, don't enable the logging by default
2956 */
2957 D_TX_REPLY("BA scd_flow %d does not match txq_id %d\n",
2958 scd_flow, agg->txq_id);
2959 return;
2960 }
2961
2962 /* Find idx just before block-ack win */
2963 idx = il_queue_dec_wrap(ba_resp_scd_ssn & 0xff, txq->q.n_bd);
2964
2965 spin_lock_irqsave(&il->sta_lock, flags);
2966
2967 D_TX_REPLY("N_COMPRESSED_BA [%d] Received from %pM, " "sta_id = %d\n",
2968 agg->wait_for_ba, (u8 *) &ba_resp->sta_addr_lo32,
2969 ba_resp->sta_id);
2970 D_TX_REPLY("TID = %d, SeqCtl = %d, bitmap = 0x%llx," "scd_flow = "
2971 "%d, scd_ssn = %d\n", ba_resp->tid, ba_resp->seq_ctl,
2972 (unsigned long long)le64_to_cpu(ba_resp->bitmap),
2973 ba_resp->scd_flow, ba_resp->scd_ssn);
2974 D_TX_REPLY("DAT start_idx = %d, bitmap = 0x%llx\n", agg->start_idx,
2975 (unsigned long long)agg->bitmap);
2976
2977 /* Update driver's record of ACK vs. not for each frame in win */
2978 il4965_tx_status_reply_compressed_ba(il, agg, ba_resp);
2979
2980 /* Release all TFDs before the SSN, i.e. all TFDs in front of
2981 * block-ack win (we assume that they've been successfully
2982 * transmitted ... if not, it's too late anyway). */
2983 if (txq->q.read_ptr != (ba_resp_scd_ssn & 0xff)) {
2984 /* calculate mac80211 ampdu sw queue to wake */
2985 int freed = il4965_tx_queue_reclaim(il, scd_flow, idx);
2986 il4965_free_tfds_in_queue(il, sta_id, tid, freed);
2987
2988 if (il_queue_space(&txq->q) > txq->q.low_mark &&
2989 il->mac80211_registered &&
2990 agg->state != IL_EMPTYING_HW_QUEUE_DELBA)
2991 il_wake_queue(il, txq);
2992
2993 il4965_txq_check_empty(il, sta_id, tid, scd_flow);
2994 }
2995
2996 spin_unlock_irqrestore(&il->sta_lock, flags);
2997}
2998
2999#ifdef CONFIG_IWLEGACY_DEBUG
3000const char *
3001il4965_get_tx_fail_reason(u32 status)
3002{
3003#define TX_STATUS_FAIL(x) case TX_STATUS_FAIL_ ## x: return #x
3004#define TX_STATUS_POSTPONE(x) case TX_STATUS_POSTPONE_ ## x: return #x
3005
3006 switch (status & TX_STATUS_MSK) {
3007 case TX_STATUS_SUCCESS:
3008 return "SUCCESS";
3009 TX_STATUS_POSTPONE(DELAY);
3010 TX_STATUS_POSTPONE(FEW_BYTES);
3011 TX_STATUS_POSTPONE(QUIET_PERIOD);
3012 TX_STATUS_POSTPONE(CALC_TTAK);
3013 TX_STATUS_FAIL(INTERNAL_CROSSED_RETRY);
3014 TX_STATUS_FAIL(SHORT_LIMIT);
3015 TX_STATUS_FAIL(LONG_LIMIT);
3016 TX_STATUS_FAIL(FIFO_UNDERRUN);
3017 TX_STATUS_FAIL(DRAIN_FLOW);
3018 TX_STATUS_FAIL(RFKILL_FLUSH);
3019 TX_STATUS_FAIL(LIFE_EXPIRE);
3020 TX_STATUS_FAIL(DEST_PS);
3021 TX_STATUS_FAIL(HOST_ABORTED);
3022 TX_STATUS_FAIL(BT_RETRY);
3023 TX_STATUS_FAIL(STA_INVALID);
3024 TX_STATUS_FAIL(FRAG_DROPPED);
3025 TX_STATUS_FAIL(TID_DISABLE);
3026 TX_STATUS_FAIL(FIFO_FLUSHED);
3027 TX_STATUS_FAIL(INSUFFICIENT_CF_POLL);
3028 TX_STATUS_FAIL(PASSIVE_NO_RX);
3029 TX_STATUS_FAIL(NO_BEACON_ON_RADAR);
3030 }
3031
3032 return "UNKNOWN";
3033
3034#undef TX_STATUS_FAIL
3035#undef TX_STATUS_POSTPONE
3036}
3037#endif /* CONFIG_IWLEGACY_DEBUG */
3038
3039static struct il_link_quality_cmd *
3040il4965_sta_alloc_lq(struct il_priv *il, u8 sta_id)
3041{
3042 int i, r;
3043 struct il_link_quality_cmd *link_cmd;
3044 u32 rate_flags = 0;
3045 __le32 rate_n_flags;
3046
3047 link_cmd = kzalloc(sizeof(struct il_link_quality_cmd), GFP_KERNEL);
3048 if (!link_cmd) {
3049 IL_ERR("Unable to allocate memory for LQ cmd.\n");
3050 return NULL;
3051 }
3052 /* Set up the rate scaling to start at selected rate, fall back
3053 * all the way down to 1M in IEEE order, and then spin on 1M */
3054 if (il->band == IEEE80211_BAND_5GHZ)
3055 r = RATE_6M_IDX;
3056 else
3057 r = RATE_1M_IDX;
3058
3059 if (r >= IL_FIRST_CCK_RATE && r <= IL_LAST_CCK_RATE)
3060 rate_flags |= RATE_MCS_CCK_MSK;
3061
3062 rate_flags |=
3063 il4965_first_antenna(il->hw_params.
3064 valid_tx_ant) << RATE_MCS_ANT_POS;
3065 rate_n_flags = cpu_to_le32(il_rates[r].plcp | rate_flags);
3066 for (i = 0; i < LINK_QUAL_MAX_RETRY_NUM; i++)
3067 link_cmd->rs_table[i].rate_n_flags = rate_n_flags;
3068
3069 link_cmd->general_params.single_stream_ant_msk =
3070 il4965_first_antenna(il->hw_params.valid_tx_ant);
3071
3072 link_cmd->general_params.dual_stream_ant_msk =
3073 il->hw_params.valid_tx_ant & ~il4965_first_antenna(il->hw_params.
3074 valid_tx_ant);
3075 if (!link_cmd->general_params.dual_stream_ant_msk) {
3076 link_cmd->general_params.dual_stream_ant_msk = ANT_AB;
3077 } else if (il4965_num_of_ant(il->hw_params.valid_tx_ant) == 2) {
3078 link_cmd->general_params.dual_stream_ant_msk =
3079 il->hw_params.valid_tx_ant;
3080 }
3081
3082 link_cmd->agg_params.agg_dis_start_th = LINK_QUAL_AGG_DISABLE_START_DEF;
3083 link_cmd->agg_params.agg_time_limit =
3084 cpu_to_le16(LINK_QUAL_AGG_TIME_LIMIT_DEF);
3085
3086 link_cmd->sta_id = sta_id;
3087
3088 return link_cmd;
3089}
3090
3091/*
3092 * il4965_add_bssid_station - Add the special IBSS BSSID station
3093 *
3094 * Function sleeps.
3095 */
3096int
3097il4965_add_bssid_station(struct il_priv *il, const u8 *addr, u8 *sta_id_r)
3098{
3099 int ret;
3100 u8 sta_id;
3101 struct il_link_quality_cmd *link_cmd;
3102 unsigned long flags;
3103
3104 if (sta_id_r)
3105 *sta_id_r = IL_INVALID_STATION;
3106
3107 ret = il_add_station_common(il, addr, 0, NULL, &sta_id);
3108 if (ret) {
3109 IL_ERR("Unable to add station %pM\n", addr);
3110 return ret;
3111 }
3112
3113 if (sta_id_r)
3114 *sta_id_r = sta_id;
3115
3116 spin_lock_irqsave(&il->sta_lock, flags);
3117 il->stations[sta_id].used |= IL_STA_LOCAL;
3118 spin_unlock_irqrestore(&il->sta_lock, flags);
3119
3120 /* Set up default rate scaling table in device's station table */
3121 link_cmd = il4965_sta_alloc_lq(il, sta_id);
3122 if (!link_cmd) {
3123 IL_ERR("Unable to initialize rate scaling for station %pM.\n",
3124 addr);
3125 return -ENOMEM;
3126 }
3127
3128 ret = il_send_lq_cmd(il, link_cmd, CMD_SYNC, true);
3129 if (ret)
3130 IL_ERR("Link quality command failed (%d)\n", ret);
3131
3132 spin_lock_irqsave(&il->sta_lock, flags);
3133 il->stations[sta_id].lq = link_cmd;
3134 spin_unlock_irqrestore(&il->sta_lock, flags);
3135
3136 return 0;
3137}
3138
3139static int
3140il4965_static_wepkey_cmd(struct il_priv *il, bool send_if_empty)
3141{
3142 int i;
3143 u8 buff[sizeof(struct il_wep_cmd) +
3144 sizeof(struct il_wep_key) * WEP_KEYS_MAX];
3145 struct il_wep_cmd *wep_cmd = (struct il_wep_cmd *)buff;
3146 size_t cmd_size = sizeof(struct il_wep_cmd);
3147 struct il_host_cmd cmd = {
3148 .id = C_WEPKEY,
3149 .data = wep_cmd,
3150 .flags = CMD_SYNC,
3151 };
3152 bool not_empty = false;
3153
3154 might_sleep();
3155
3156 memset(wep_cmd, 0,
3157 cmd_size + (sizeof(struct il_wep_key) * WEP_KEYS_MAX));
3158
3159 for (i = 0; i < WEP_KEYS_MAX; i++) {
3160 u8 key_size = il->_4965.wep_keys[i].key_size;
3161
3162 wep_cmd->key[i].key_idx = i;
3163 if (key_size) {
3164 wep_cmd->key[i].key_offset = i;
3165 not_empty = true;
3166 } else
3167 wep_cmd->key[i].key_offset = WEP_INVALID_OFFSET;
3168
3169 wep_cmd->key[i].key_size = key_size;
3170 memcpy(&wep_cmd->key[i].key[3], il->_4965.wep_keys[i].key, key_size);
3171 }
3172
3173 wep_cmd->global_key_type = WEP_KEY_WEP_TYPE;
3174 wep_cmd->num_keys = WEP_KEYS_MAX;
3175
3176 cmd_size += sizeof(struct il_wep_key) * WEP_KEYS_MAX;
3177 cmd.len = cmd_size;
3178
3179 if (not_empty || send_if_empty)
3180 return il_send_cmd(il, &cmd);
3181 else
3182 return 0;
3183}
3184
3185int
3186il4965_restore_default_wep_keys(struct il_priv *il)
3187{
3188 lockdep_assert_held(&il->mutex);
3189
3190 return il4965_static_wepkey_cmd(il, false);
3191}
3192
3193int
3194il4965_remove_default_wep_key(struct il_priv *il,
3195 struct ieee80211_key_conf *keyconf)
3196{
3197 int ret;
3198 int idx = keyconf->keyidx;
3199
3200 lockdep_assert_held(&il->mutex);
3201
3202 D_WEP("Removing default WEP key: idx=%d\n", idx);
3203
3204 memset(&il->_4965.wep_keys[idx], 0, sizeof(struct il_wep_key));
3205 if (il_is_rfkill(il)) {
3206 D_WEP("Not sending C_WEPKEY command due to RFKILL.\n");
3207 /* but keys in device are clear anyway so return success */
3208 return 0;
3209 }
3210 ret = il4965_static_wepkey_cmd(il, 1);
3211 D_WEP("Remove default WEP key: idx=%d ret=%d\n", idx, ret);
3212
3213 return ret;
3214}
3215
3216int
3217il4965_set_default_wep_key(struct il_priv *il,
3218 struct ieee80211_key_conf *keyconf)
3219{
3220 int ret;
3221 int len = keyconf->keylen;
3222 int idx = keyconf->keyidx;
3223
3224 lockdep_assert_held(&il->mutex);
3225
3226 if (len != WEP_KEY_LEN_128 && len != WEP_KEY_LEN_64) {
3227 D_WEP("Bad WEP key length %d\n", keyconf->keylen);
3228 return -EINVAL;
3229 }
3230
3231 keyconf->flags &= ~IEEE80211_KEY_FLAG_GENERATE_IV;
3232 keyconf->hw_key_idx = HW_KEY_DEFAULT;
3233 il->stations[IL_AP_ID].keyinfo.cipher = keyconf->cipher;
3234
3235 il->_4965.wep_keys[idx].key_size = len;
3236 memcpy(&il->_4965.wep_keys[idx].key, &keyconf->key, len);
3237
3238 ret = il4965_static_wepkey_cmd(il, false);
3239
3240 D_WEP("Set default WEP key: len=%d idx=%d ret=%d\n", len, idx, ret);
3241 return ret;
3242}
3243
3244static int
3245il4965_set_wep_dynamic_key_info(struct il_priv *il,
3246 struct ieee80211_key_conf *keyconf, u8 sta_id)
3247{
3248 unsigned long flags;
3249 __le16 key_flags = 0;
3250 struct il_addsta_cmd sta_cmd;
3251
3252 lockdep_assert_held(&il->mutex);
3253
3254 keyconf->flags &= ~IEEE80211_KEY_FLAG_GENERATE_IV;
3255
3256 key_flags |= (STA_KEY_FLG_WEP | STA_KEY_FLG_MAP_KEY_MSK);
3257 key_flags |= cpu_to_le16(keyconf->keyidx << STA_KEY_FLG_KEYID_POS);
3258 key_flags &= ~STA_KEY_FLG_INVALID;
3259
3260 if (keyconf->keylen == WEP_KEY_LEN_128)
3261 key_flags |= STA_KEY_FLG_KEY_SIZE_MSK;
3262
3263 if (sta_id == il->hw_params.bcast_id)
3264 key_flags |= STA_KEY_MULTICAST_MSK;
3265
3266 spin_lock_irqsave(&il->sta_lock, flags);
3267
3268 il->stations[sta_id].keyinfo.cipher = keyconf->cipher;
3269 il->stations[sta_id].keyinfo.keylen = keyconf->keylen;
3270 il->stations[sta_id].keyinfo.keyidx = keyconf->keyidx;
3271
3272 memcpy(il->stations[sta_id].keyinfo.key, keyconf->key, keyconf->keylen);
3273
3274 memcpy(&il->stations[sta_id].sta.key.key[3], keyconf->key,
3275 keyconf->keylen);
3276
3277 if ((il->stations[sta_id].sta.key.
3278 key_flags & STA_KEY_FLG_ENCRYPT_MSK) == STA_KEY_FLG_NO_ENC)
3279 il->stations[sta_id].sta.key.key_offset =
3280 il_get_free_ucode_key_idx(il);
3281 /* else, we are overriding an existing key => no need to allocated room
3282 * in uCode. */
3283
3284 WARN(il->stations[sta_id].sta.key.key_offset == WEP_INVALID_OFFSET,
3285 "no space for a new key");
3286
3287 il->stations[sta_id].sta.key.key_flags = key_flags;
3288 il->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_KEY_MASK;
3289 il->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
3290
3291 memcpy(&sta_cmd, &il->stations[sta_id].sta,
3292 sizeof(struct il_addsta_cmd));
3293 spin_unlock_irqrestore(&il->sta_lock, flags);
3294
3295 return il_send_add_sta(il, &sta_cmd, CMD_SYNC);
3296}
3297
3298static int
3299il4965_set_ccmp_dynamic_key_info(struct il_priv *il,
3300 struct ieee80211_key_conf *keyconf, u8 sta_id)
3301{
3302 unsigned long flags;
3303 __le16 key_flags = 0;
3304 struct il_addsta_cmd sta_cmd;
3305
3306 lockdep_assert_held(&il->mutex);
3307
3308 key_flags |= (STA_KEY_FLG_CCMP | STA_KEY_FLG_MAP_KEY_MSK);
3309 key_flags |= cpu_to_le16(keyconf->keyidx << STA_KEY_FLG_KEYID_POS);
3310 key_flags &= ~STA_KEY_FLG_INVALID;
3311
3312 if (sta_id == il->hw_params.bcast_id)
3313 key_flags |= STA_KEY_MULTICAST_MSK;
3314
3315 keyconf->flags |= IEEE80211_KEY_FLAG_GENERATE_IV;
3316
3317 spin_lock_irqsave(&il->sta_lock, flags);
3318 il->stations[sta_id].keyinfo.cipher = keyconf->cipher;
3319 il->stations[sta_id].keyinfo.keylen = keyconf->keylen;
3320
3321 memcpy(il->stations[sta_id].keyinfo.key, keyconf->key, keyconf->keylen);
3322
3323 memcpy(il->stations[sta_id].sta.key.key, keyconf->key, keyconf->keylen);
3324
3325 if ((il->stations[sta_id].sta.key.
3326 key_flags & STA_KEY_FLG_ENCRYPT_MSK) == STA_KEY_FLG_NO_ENC)
3327 il->stations[sta_id].sta.key.key_offset =
3328 il_get_free_ucode_key_idx(il);
3329 /* else, we are overriding an existing key => no need to allocated room
3330 * in uCode. */
3331
3332 WARN(il->stations[sta_id].sta.key.key_offset == WEP_INVALID_OFFSET,
3333 "no space for a new key");
3334
3335 il->stations[sta_id].sta.key.key_flags = key_flags;
3336 il->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_KEY_MASK;
3337 il->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
3338
3339 memcpy(&sta_cmd, &il->stations[sta_id].sta,
3340 sizeof(struct il_addsta_cmd));
3341 spin_unlock_irqrestore(&il->sta_lock, flags);
3342
3343 return il_send_add_sta(il, &sta_cmd, CMD_SYNC);
3344}
3345
3346static int
3347il4965_set_tkip_dynamic_key_info(struct il_priv *il,
3348 struct ieee80211_key_conf *keyconf, u8 sta_id)
3349{
3350 unsigned long flags;
3351 int ret = 0;
3352 __le16 key_flags = 0;
3353
3354 key_flags |= (STA_KEY_FLG_TKIP | STA_KEY_FLG_MAP_KEY_MSK);
3355 key_flags |= cpu_to_le16(keyconf->keyidx << STA_KEY_FLG_KEYID_POS);
3356 key_flags &= ~STA_KEY_FLG_INVALID;
3357
3358 if (sta_id == il->hw_params.bcast_id)
3359 key_flags |= STA_KEY_MULTICAST_MSK;
3360
3361 keyconf->flags |= IEEE80211_KEY_FLAG_GENERATE_IV;
3362 keyconf->flags |= IEEE80211_KEY_FLAG_GENERATE_MMIC;
3363
3364 spin_lock_irqsave(&il->sta_lock, flags);
3365
3366 il->stations[sta_id].keyinfo.cipher = keyconf->cipher;
3367 il->stations[sta_id].keyinfo.keylen = 16;
3368
3369 if ((il->stations[sta_id].sta.key.
3370 key_flags & STA_KEY_FLG_ENCRYPT_MSK) == STA_KEY_FLG_NO_ENC)
3371 il->stations[sta_id].sta.key.key_offset =
3372 il_get_free_ucode_key_idx(il);
3373 /* else, we are overriding an existing key => no need to allocated room
3374 * in uCode. */
3375
3376 WARN(il->stations[sta_id].sta.key.key_offset == WEP_INVALID_OFFSET,
3377 "no space for a new key");
3378
3379 il->stations[sta_id].sta.key.key_flags = key_flags;
3380
3381 /* This copy is acutally not needed: we get the key with each TX */
3382 memcpy(il->stations[sta_id].keyinfo.key, keyconf->key, 16);
3383
3384 memcpy(il->stations[sta_id].sta.key.key, keyconf->key, 16);
3385
3386 spin_unlock_irqrestore(&il->sta_lock, flags);
3387
3388 return ret;
3389}
3390
3391void
3392il4965_update_tkip_key(struct il_priv *il, struct ieee80211_key_conf *keyconf,
3393 struct ieee80211_sta *sta, u32 iv32, u16 *phase1key)
3394{
3395 u8 sta_id;
3396 unsigned long flags;
3397 int i;
3398
3399 if (il_scan_cancel(il)) {
3400 /* cancel scan failed, just live w/ bad key and rely
3401 briefly on SW decryption */
3402 return;
3403 }
3404
3405 sta_id = il_sta_id_or_broadcast(il, sta);
3406 if (sta_id == IL_INVALID_STATION)
3407 return;
3408
3409 spin_lock_irqsave(&il->sta_lock, flags);
3410
3411 il->stations[sta_id].sta.key.tkip_rx_tsc_byte2 = (u8) iv32;
3412
3413 for (i = 0; i < 5; i++)
3414 il->stations[sta_id].sta.key.tkip_rx_ttak[i] =
3415 cpu_to_le16(phase1key[i]);
3416
3417 il->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_KEY_MASK;
3418 il->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
3419
3420 il_send_add_sta(il, &il->stations[sta_id].sta, CMD_ASYNC);
3421
3422 spin_unlock_irqrestore(&il->sta_lock, flags);
3423}
3424
3425int
3426il4965_remove_dynamic_key(struct il_priv *il,
3427 struct ieee80211_key_conf *keyconf, u8 sta_id)
3428{
3429 unsigned long flags;
3430 u16 key_flags;
3431 u8 keyidx;
3432 struct il_addsta_cmd sta_cmd;
3433
3434 lockdep_assert_held(&il->mutex);
3435
3436 il->_4965.key_mapping_keys--;
3437
3438 spin_lock_irqsave(&il->sta_lock, flags);
3439 key_flags = le16_to_cpu(il->stations[sta_id].sta.key.key_flags);
3440 keyidx = (key_flags >> STA_KEY_FLG_KEYID_POS) & 0x3;
3441
3442 D_WEP("Remove dynamic key: idx=%d sta=%d\n", keyconf->keyidx, sta_id);
3443
3444 if (keyconf->keyidx != keyidx) {
3445 /* We need to remove a key with idx different that the one
3446 * in the uCode. This means that the key we need to remove has
3447 * been replaced by another one with different idx.
3448 * Don't do anything and return ok
3449 */
3450 spin_unlock_irqrestore(&il->sta_lock, flags);
3451 return 0;
3452 }
3453
3454 if (il->stations[sta_id].sta.key.key_flags & STA_KEY_FLG_INVALID) {
3455 IL_WARN("Removing wrong key %d 0x%x\n", keyconf->keyidx,
3456 key_flags);
3457 spin_unlock_irqrestore(&il->sta_lock, flags);
3458 return 0;
3459 }
3460
3461 if (!test_and_clear_bit
3462 (il->stations[sta_id].sta.key.key_offset, &il->ucode_key_table))
3463 IL_ERR("idx %d not used in uCode key table.\n",
3464 il->stations[sta_id].sta.key.key_offset);
3465 memset(&il->stations[sta_id].keyinfo, 0, sizeof(struct il_hw_key));
3466 memset(&il->stations[sta_id].sta.key, 0, sizeof(struct il4965_keyinfo));
3467 il->stations[sta_id].sta.key.key_flags =
3468 STA_KEY_FLG_NO_ENC | STA_KEY_FLG_INVALID;
3469 il->stations[sta_id].sta.key.key_offset = keyconf->hw_key_idx;
3470 il->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_KEY_MASK;
3471 il->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
3472
3473 if (il_is_rfkill(il)) {
3474 D_WEP
3475 ("Not sending C_ADD_STA command because RFKILL enabled.\n");
3476 spin_unlock_irqrestore(&il->sta_lock, flags);
3477 return 0;
3478 }
3479 memcpy(&sta_cmd, &il->stations[sta_id].sta,
3480 sizeof(struct il_addsta_cmd));
3481 spin_unlock_irqrestore(&il->sta_lock, flags);
3482
3483 return il_send_add_sta(il, &sta_cmd, CMD_SYNC);
3484}
3485
3486int
3487il4965_set_dynamic_key(struct il_priv *il, struct ieee80211_key_conf *keyconf,
3488 u8 sta_id)
3489{
3490 int ret;
3491
3492 lockdep_assert_held(&il->mutex);
3493
3494 il->_4965.key_mapping_keys++;
3495 keyconf->hw_key_idx = HW_KEY_DYNAMIC;
3496
3497 switch (keyconf->cipher) {
3498 case WLAN_CIPHER_SUITE_CCMP:
3499 ret =
3500 il4965_set_ccmp_dynamic_key_info(il, keyconf, sta_id);
3501 break;
3502 case WLAN_CIPHER_SUITE_TKIP:
3503 ret =
3504 il4965_set_tkip_dynamic_key_info(il, keyconf, sta_id);
3505 break;
3506 case WLAN_CIPHER_SUITE_WEP40:
3507 case WLAN_CIPHER_SUITE_WEP104:
3508 ret = il4965_set_wep_dynamic_key_info(il, keyconf, sta_id);
3509 break;
3510 default:
3511 IL_ERR("Unknown alg: %s cipher = %x\n", __func__,
3512 keyconf->cipher);
3513 ret = -EINVAL;
3514 }
3515
3516 D_WEP("Set dynamic key: cipher=%x len=%d idx=%d sta=%d ret=%d\n",
3517 keyconf->cipher, keyconf->keylen, keyconf->keyidx, sta_id, ret);
3518
3519 return ret;
3520}
3521
3522/**
3523 * il4965_alloc_bcast_station - add broadcast station into driver's station table.
3524 *
3525 * This adds the broadcast station into the driver's station table
3526 * and marks it driver active, so that it will be restored to the
3527 * device at the next best time.
3528 */
3529int
3530il4965_alloc_bcast_station(struct il_priv *il)
3531{
3532 struct il_link_quality_cmd *link_cmd;
3533 unsigned long flags;
3534 u8 sta_id;
3535
3536 spin_lock_irqsave(&il->sta_lock, flags);
3537 sta_id = il_prep_station(il, il_bcast_addr, false, NULL);
3538 if (sta_id == IL_INVALID_STATION) {
3539 IL_ERR("Unable to prepare broadcast station\n");
3540 spin_unlock_irqrestore(&il->sta_lock, flags);
3541
3542 return -EINVAL;
3543 }
3544
3545 il->stations[sta_id].used |= IL_STA_DRIVER_ACTIVE;
3546 il->stations[sta_id].used |= IL_STA_BCAST;
3547 spin_unlock_irqrestore(&il->sta_lock, flags);
3548
3549 link_cmd = il4965_sta_alloc_lq(il, sta_id);
3550 if (!link_cmd) {
3551 IL_ERR
3552 ("Unable to initialize rate scaling for bcast station.\n");
3553 return -ENOMEM;
3554 }
3555
3556 spin_lock_irqsave(&il->sta_lock, flags);
3557 il->stations[sta_id].lq = link_cmd;
3558 spin_unlock_irqrestore(&il->sta_lock, flags);
3559
3560 return 0;
3561}
3562
3563/**
3564 * il4965_update_bcast_station - update broadcast station's LQ command
3565 *
3566 * Only used by iwl4965. Placed here to have all bcast station management
3567 * code together.
3568 */
3569static int
3570il4965_update_bcast_station(struct il_priv *il)
3571{
3572 unsigned long flags;
3573 struct il_link_quality_cmd *link_cmd;
3574 u8 sta_id = il->hw_params.bcast_id;
3575
3576 link_cmd = il4965_sta_alloc_lq(il, sta_id);
3577 if (!link_cmd) {
3578 IL_ERR("Unable to initialize rate scaling for bcast sta.\n");
3579 return -ENOMEM;
3580 }
3581
3582 spin_lock_irqsave(&il->sta_lock, flags);
3583 if (il->stations[sta_id].lq)
3584 kfree(il->stations[sta_id].lq);
3585 else
3586 D_INFO("Bcast sta rate scaling has not been initialized.\n");
3587 il->stations[sta_id].lq = link_cmd;
3588 spin_unlock_irqrestore(&il->sta_lock, flags);
3589
3590 return 0;
3591}
3592
3593int
3594il4965_update_bcast_stations(struct il_priv *il)
3595{
3596 return il4965_update_bcast_station(il);
3597}
3598
3599/**
3600 * il4965_sta_tx_modify_enable_tid - Enable Tx for this TID in station table
3601 */
3602int
3603il4965_sta_tx_modify_enable_tid(struct il_priv *il, int sta_id, int tid)
3604{
3605 unsigned long flags;
3606 struct il_addsta_cmd sta_cmd;
3607
3608 lockdep_assert_held(&il->mutex);
3609
3610 /* Remove "disable" flag, to enable Tx for this TID */
3611 spin_lock_irqsave(&il->sta_lock, flags);
3612 il->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_TID_DISABLE_TX;
3613 il->stations[sta_id].sta.tid_disable_tx &= cpu_to_le16(~(1 << tid));
3614 il->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
3615 memcpy(&sta_cmd, &il->stations[sta_id].sta,
3616 sizeof(struct il_addsta_cmd));
3617 spin_unlock_irqrestore(&il->sta_lock, flags);
3618
3619 return il_send_add_sta(il, &sta_cmd, CMD_SYNC);
3620}
3621
3622int
3623il4965_sta_rx_agg_start(struct il_priv *il, struct ieee80211_sta *sta, int tid,
3624 u16 ssn)
3625{
3626 unsigned long flags;
3627 int sta_id;
3628 struct il_addsta_cmd sta_cmd;
3629
3630 lockdep_assert_held(&il->mutex);
3631
3632 sta_id = il_sta_id(sta);
3633 if (sta_id == IL_INVALID_STATION)
3634 return -ENXIO;
3635
3636 spin_lock_irqsave(&il->sta_lock, flags);
3637 il->stations[sta_id].sta.station_flags_msk = 0;
3638 il->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_ADDBA_TID_MSK;
3639 il->stations[sta_id].sta.add_immediate_ba_tid = (u8) tid;
3640 il->stations[sta_id].sta.add_immediate_ba_ssn = cpu_to_le16(ssn);
3641 il->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
3642 memcpy(&sta_cmd, &il->stations[sta_id].sta,
3643 sizeof(struct il_addsta_cmd));
3644 spin_unlock_irqrestore(&il->sta_lock, flags);
3645
3646 return il_send_add_sta(il, &sta_cmd, CMD_SYNC);
3647}
3648
3649int
3650il4965_sta_rx_agg_stop(struct il_priv *il, struct ieee80211_sta *sta, int tid)
3651{
3652 unsigned long flags;
3653 int sta_id;
3654 struct il_addsta_cmd sta_cmd;
3655
3656 lockdep_assert_held(&il->mutex);
3657
3658 sta_id = il_sta_id(sta);
3659 if (sta_id == IL_INVALID_STATION) {
3660 IL_ERR("Invalid station for AGG tid %d\n", tid);
3661 return -ENXIO;
3662 }
3663
3664 spin_lock_irqsave(&il->sta_lock, flags);
3665 il->stations[sta_id].sta.station_flags_msk = 0;
3666 il->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_DELBA_TID_MSK;
3667 il->stations[sta_id].sta.remove_immediate_ba_tid = (u8) tid;
3668 il->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
3669 memcpy(&sta_cmd, &il->stations[sta_id].sta,
3670 sizeof(struct il_addsta_cmd));
3671 spin_unlock_irqrestore(&il->sta_lock, flags);
3672
3673 return il_send_add_sta(il, &sta_cmd, CMD_SYNC);
3674}
3675
3676void
3677il4965_sta_modify_sleep_tx_count(struct il_priv *il, int sta_id, int cnt)
3678{
3679 unsigned long flags;
3680
3681 spin_lock_irqsave(&il->sta_lock, flags);
3682 il->stations[sta_id].sta.station_flags |= STA_FLG_PWR_SAVE_MSK;
3683 il->stations[sta_id].sta.station_flags_msk = STA_FLG_PWR_SAVE_MSK;
3684 il->stations[sta_id].sta.sta.modify_mask =
3685 STA_MODIFY_SLEEP_TX_COUNT_MSK;
3686 il->stations[sta_id].sta.sleep_tx_count = cpu_to_le16(cnt);
3687 il->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
3688 il_send_add_sta(il, &il->stations[sta_id].sta, CMD_ASYNC);
3689 spin_unlock_irqrestore(&il->sta_lock, flags);
3690
3691}
3692
3693void
3694il4965_update_chain_flags(struct il_priv *il)
3695{
3696 if (il->ops->set_rxon_chain) {
3697 il->ops->set_rxon_chain(il);
3698 if (il->active.rx_chain != il->staging.rx_chain)
3699 il_commit_rxon(il);
3700 }
3701}
3702
3703static void
3704il4965_clear_free_frames(struct il_priv *il)
3705{
3706 struct list_head *element;
3707
3708 D_INFO("%d frames on pre-allocated heap on clear.\n", il->frames_count);
3709
3710 while (!list_empty(&il->free_frames)) {
3711 element = il->free_frames.next;
3712 list_del(element);
3713 kfree(list_entry(element, struct il_frame, list));
3714 il->frames_count--;
3715 }
3716
3717 if (il->frames_count) {
3718 IL_WARN("%d frames still in use. Did we lose one?\n",
3719 il->frames_count);
3720 il->frames_count = 0;
3721 }
3722}
3723
3724static struct il_frame *
3725il4965_get_free_frame(struct il_priv *il)
3726{
3727 struct il_frame *frame;
3728 struct list_head *element;
3729 if (list_empty(&il->free_frames)) {
3730 frame = kzalloc(sizeof(*frame), GFP_KERNEL);
3731 if (!frame) {
3732 IL_ERR("Could not allocate frame!\n");
3733 return NULL;
3734 }
3735
3736 il->frames_count++;
3737 return frame;
3738 }
3739
3740 element = il->free_frames.next;
3741 list_del(element);
3742 return list_entry(element, struct il_frame, list);
3743}
3744
3745static void
3746il4965_free_frame(struct il_priv *il, struct il_frame *frame)
3747{
3748 memset(frame, 0, sizeof(*frame));
3749 list_add(&frame->list, &il->free_frames);
3750}
3751
3752static u32
3753il4965_fill_beacon_frame(struct il_priv *il, struct ieee80211_hdr *hdr,
3754 int left)
3755{
3756 lockdep_assert_held(&il->mutex);
3757
3758 if (!il->beacon_skb)
3759 return 0;
3760
3761 if (il->beacon_skb->len > left)
3762 return 0;
3763
3764 memcpy(hdr, il->beacon_skb->data, il->beacon_skb->len);
3765
3766 return il->beacon_skb->len;
3767}
3768
3769/* Parse the beacon frame to find the TIM element and set tim_idx & tim_size */
3770static void
3771il4965_set_beacon_tim(struct il_priv *il,
3772 struct il_tx_beacon_cmd *tx_beacon_cmd, u8 * beacon,
3773 u32 frame_size)
3774{
3775 u16 tim_idx;
3776 struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *)beacon;
3777
3778 /*
3779 * The idx is relative to frame start but we start looking at the
3780 * variable-length part of the beacon.
3781 */
3782 tim_idx = mgmt->u.beacon.variable - beacon;
3783
3784 /* Parse variable-length elements of beacon to find WLAN_EID_TIM */
3785 while ((tim_idx < (frame_size - 2)) &&
3786 (beacon[tim_idx] != WLAN_EID_TIM))
3787 tim_idx += beacon[tim_idx + 1] + 2;
3788
3789 /* If TIM field was found, set variables */
3790 if ((tim_idx < (frame_size - 1)) && (beacon[tim_idx] == WLAN_EID_TIM)) {
3791 tx_beacon_cmd->tim_idx = cpu_to_le16(tim_idx);
3792 tx_beacon_cmd->tim_size = beacon[tim_idx + 1];
3793 } else
3794 IL_WARN("Unable to find TIM Element in beacon\n");
3795}
3796
3797static unsigned int
3798il4965_hw_get_beacon_cmd(struct il_priv *il, struct il_frame *frame)
3799{
3800 struct il_tx_beacon_cmd *tx_beacon_cmd;
3801 u32 frame_size;
3802 u32 rate_flags;
3803 u32 rate;
3804 /*
3805 * We have to set up the TX command, the TX Beacon command, and the
3806 * beacon contents.
3807 */
3808
3809 lockdep_assert_held(&il->mutex);
3810
3811 if (!il->beacon_enabled) {
3812 IL_ERR("Trying to build beacon without beaconing enabled\n");
3813 return 0;
3814 }
3815
3816 /* Initialize memory */
3817 tx_beacon_cmd = &frame->u.beacon;
3818 memset(tx_beacon_cmd, 0, sizeof(*tx_beacon_cmd));
3819
3820 /* Set up TX beacon contents */
3821 frame_size =
3822 il4965_fill_beacon_frame(il, tx_beacon_cmd->frame,
3823 sizeof(frame->u) - sizeof(*tx_beacon_cmd));
3824 if (WARN_ON_ONCE(frame_size > MAX_MPDU_SIZE))
3825 return 0;
3826 if (!frame_size)
3827 return 0;
3828
3829 /* Set up TX command fields */
3830 tx_beacon_cmd->tx.len = cpu_to_le16((u16) frame_size);
3831 tx_beacon_cmd->tx.sta_id = il->hw_params.bcast_id;
3832 tx_beacon_cmd->tx.stop_time.life_time = TX_CMD_LIFE_TIME_INFINITE;
3833 tx_beacon_cmd->tx.tx_flags =
3834 TX_CMD_FLG_SEQ_CTL_MSK | TX_CMD_FLG_TSF_MSK |
3835 TX_CMD_FLG_STA_RATE_MSK;
3836
3837 /* Set up TX beacon command fields */
3838 il4965_set_beacon_tim(il, tx_beacon_cmd, (u8 *) tx_beacon_cmd->frame,
3839 frame_size);
3840
3841 /* Set up packet rate and flags */
3842 rate = il_get_lowest_plcp(il);
3843 il4965_toggle_tx_ant(il, &il->mgmt_tx_ant, il->hw_params.valid_tx_ant);
3844 rate_flags = BIT(il->mgmt_tx_ant) << RATE_MCS_ANT_POS;
3845 if ((rate >= IL_FIRST_CCK_RATE) && (rate <= IL_LAST_CCK_RATE))
3846 rate_flags |= RATE_MCS_CCK_MSK;
3847 tx_beacon_cmd->tx.rate_n_flags = cpu_to_le32(rate | rate_flags);
3848
3849 return sizeof(*tx_beacon_cmd) + frame_size;
3850}
3851
3852int
3853il4965_send_beacon_cmd(struct il_priv *il)
3854{
3855 struct il_frame *frame;
3856 unsigned int frame_size;
3857 int rc;
3858
3859 frame = il4965_get_free_frame(il);
3860 if (!frame) {
3861 IL_ERR("Could not obtain free frame buffer for beacon "
3862 "command.\n");
3863 return -ENOMEM;
3864 }
3865
3866 frame_size = il4965_hw_get_beacon_cmd(il, frame);
3867 if (!frame_size) {
3868 IL_ERR("Error configuring the beacon command\n");
3869 il4965_free_frame(il, frame);
3870 return -EINVAL;
3871 }
3872
3873 rc = il_send_cmd_pdu(il, C_TX_BEACON, frame_size, &frame->u.cmd[0]);
3874
3875 il4965_free_frame(il, frame);
3876
3877 return rc;
3878}
3879
3880static inline dma_addr_t
3881il4965_tfd_tb_get_addr(struct il_tfd *tfd, u8 idx)
3882{
3883 struct il_tfd_tb *tb = &tfd->tbs[idx];
3884
3885 dma_addr_t addr = get_unaligned_le32(&tb->lo);
3886 if (sizeof(dma_addr_t) > sizeof(u32))
3887 addr |=
3888 ((dma_addr_t) (le16_to_cpu(tb->hi_n_len) & 0xF) << 16) <<
3889 16;
3890
3891 return addr;
3892}
3893
3894static inline u16
3895il4965_tfd_tb_get_len(struct il_tfd *tfd, u8 idx)
3896{
3897 struct il_tfd_tb *tb = &tfd->tbs[idx];
3898
3899 return le16_to_cpu(tb->hi_n_len) >> 4;
3900}
3901
3902static inline void
3903il4965_tfd_set_tb(struct il_tfd *tfd, u8 idx, dma_addr_t addr, u16 len)
3904{
3905 struct il_tfd_tb *tb = &tfd->tbs[idx];
3906 u16 hi_n_len = len << 4;
3907
3908 put_unaligned_le32(addr, &tb->lo);
3909 if (sizeof(dma_addr_t) > sizeof(u32))
3910 hi_n_len |= ((addr >> 16) >> 16) & 0xF;
3911
3912 tb->hi_n_len = cpu_to_le16(hi_n_len);
3913
3914 tfd->num_tbs = idx + 1;
3915}
3916
3917static inline u8
3918il4965_tfd_get_num_tbs(struct il_tfd *tfd)
3919{
3920 return tfd->num_tbs & 0x1f;
3921}
3922
3923/**
3924 * il4965_hw_txq_free_tfd - Free all chunks referenced by TFD [txq->q.read_ptr]
3925 * @il - driver ilate data
3926 * @txq - tx queue
3927 *
3928 * Does NOT advance any TFD circular buffer read/write idxes
3929 * Does NOT free the TFD itself (which is within circular buffer)
3930 */
3931void
3932il4965_hw_txq_free_tfd(struct il_priv *il, struct il_tx_queue *txq)
3933{
3934 struct il_tfd *tfd_tmp = (struct il_tfd *)txq->tfds;
3935 struct il_tfd *tfd;
3936 struct pci_dev *dev = il->pci_dev;
3937 int idx = txq->q.read_ptr;
3938 int i;
3939 int num_tbs;
3940
3941 tfd = &tfd_tmp[idx];
3942
3943 /* Sanity check on number of chunks */
3944 num_tbs = il4965_tfd_get_num_tbs(tfd);
3945
3946 if (num_tbs >= IL_NUM_OF_TBS) {
3947 IL_ERR("Too many chunks: %i\n", num_tbs);
3948 /* @todo issue fatal error, it is quite serious situation */
3949 return;
3950 }
3951
3952 /* Unmap tx_cmd */
3953 if (num_tbs)
3954 pci_unmap_single(dev, dma_unmap_addr(&txq->meta[idx], mapping),
3955 dma_unmap_len(&txq->meta[idx], len),
3956 PCI_DMA_BIDIRECTIONAL);
3957
3958 /* Unmap chunks, if any. */
3959 for (i = 1; i < num_tbs; i++)
3960 pci_unmap_single(dev, il4965_tfd_tb_get_addr(tfd, i),
3961 il4965_tfd_tb_get_len(tfd, i),
3962 PCI_DMA_TODEVICE);
3963
3964 /* free SKB */
3965 if (txq->skbs) {
3966 struct sk_buff *skb = txq->skbs[txq->q.read_ptr];
3967
3968 /* can be called from irqs-disabled context */
3969 if (skb) {
3970 dev_kfree_skb_any(skb);
3971 txq->skbs[txq->q.read_ptr] = NULL;
3972 }
3973 }
3974}
3975
3976int
3977il4965_hw_txq_attach_buf_to_tfd(struct il_priv *il, struct il_tx_queue *txq,
3978 dma_addr_t addr, u16 len, u8 reset, u8 pad)
3979{
3980 struct il_queue *q;
3981 struct il_tfd *tfd, *tfd_tmp;
3982 u32 num_tbs;
3983
3984 q = &txq->q;
3985 tfd_tmp = (struct il_tfd *)txq->tfds;
3986 tfd = &tfd_tmp[q->write_ptr];
3987
3988 if (reset)
3989 memset(tfd, 0, sizeof(*tfd));
3990
3991 num_tbs = il4965_tfd_get_num_tbs(tfd);
3992
3993 /* Each TFD can point to a maximum 20 Tx buffers */
3994 if (num_tbs >= IL_NUM_OF_TBS) {
3995 IL_ERR("Error can not send more than %d chunks\n",
3996 IL_NUM_OF_TBS);
3997 return -EINVAL;
3998 }
3999
4000 BUG_ON(addr & ~DMA_BIT_MASK(36));
4001 if (unlikely(addr & ~IL_TX_DMA_MASK))
4002 IL_ERR("Unaligned address = %llx\n", (unsigned long long)addr);
4003
4004 il4965_tfd_set_tb(tfd, num_tbs, addr, len);
4005
4006 return 0;
4007}
4008
4009/*
4010 * Tell nic where to find circular buffer of Tx Frame Descriptors for
4011 * given Tx queue, and enable the DMA channel used for that queue.
4012 *
4013 * 4965 supports up to 16 Tx queues in DRAM, mapped to up to 8 Tx DMA
4014 * channels supported in hardware.
4015 */
4016int
4017il4965_hw_tx_queue_init(struct il_priv *il, struct il_tx_queue *txq)
4018{
4019 int txq_id = txq->q.id;
4020
4021 /* Circular buffer (TFD queue in DRAM) physical base address */
4022 il_wr(il, FH49_MEM_CBBC_QUEUE(txq_id), txq->q.dma_addr >> 8);
4023
4024 return 0;
4025}
4026
4027/******************************************************************************
4028 *
4029 * Generic RX handler implementations
4030 *
4031 ******************************************************************************/
4032static void
4033il4965_hdl_alive(struct il_priv *il, struct il_rx_buf *rxb)
4034{
4035 struct il_rx_pkt *pkt = rxb_addr(rxb);
4036 struct il_alive_resp *palive;
4037 struct delayed_work *pwork;
4038
4039 palive = &pkt->u.alive_frame;
4040
4041 D_INFO("Alive ucode status 0x%08X revision " "0x%01X 0x%01X\n",
4042 palive->is_valid, palive->ver_type, palive->ver_subtype);
4043
4044 if (palive->ver_subtype == INITIALIZE_SUBTYPE) {
4045 D_INFO("Initialization Alive received.\n");
4046 memcpy(&il->card_alive_init, &pkt->u.alive_frame,
4047 sizeof(struct il_init_alive_resp));
4048 pwork = &il->init_alive_start;
4049 } else {
4050 D_INFO("Runtime Alive received.\n");
4051 memcpy(&il->card_alive, &pkt->u.alive_frame,
4052 sizeof(struct il_alive_resp));
4053 pwork = &il->alive_start;
4054 }
4055
4056 /* We delay the ALIVE response by 5ms to
4057 * give the HW RF Kill time to activate... */
4058 if (palive->is_valid == UCODE_VALID_OK)
4059 queue_delayed_work(il->workqueue, pwork, msecs_to_jiffies(5));
4060 else
4061 IL_WARN("uCode did not respond OK.\n");
4062}
4063
4064/**
4065 * il4965_bg_stats_periodic - Timer callback to queue stats
4066 *
4067 * This callback is provided in order to send a stats request.
4068 *
4069 * This timer function is continually reset to execute within
4070 * 60 seconds since the last N_STATS was received. We need to
4071 * ensure we receive the stats in order to update the temperature
4072 * used for calibrating the TXPOWER.
4073 */
4074static void
4075il4965_bg_stats_periodic(unsigned long data)
4076{
4077 struct il_priv *il = (struct il_priv *)data;
4078
4079 if (test_bit(S_EXIT_PENDING, &il->status))
4080 return;
4081
4082 /* dont send host command if rf-kill is on */
4083 if (!il_is_ready_rf(il))
4084 return;
4085
4086 il_send_stats_request(il, CMD_ASYNC, false);
4087}
4088
4089static void
4090il4965_hdl_beacon(struct il_priv *il, struct il_rx_buf *rxb)
4091{
4092 struct il_rx_pkt *pkt = rxb_addr(rxb);
4093 struct il4965_beacon_notif *beacon =
4094 (struct il4965_beacon_notif *)pkt->u.raw;
4095#ifdef CONFIG_IWLEGACY_DEBUG
4096 u8 rate = il4965_hw_get_rate(beacon->beacon_notify_hdr.rate_n_flags);
4097
4098 D_RX("beacon status %x retries %d iss %d tsf:0x%.8x%.8x rate %d\n",
4099 le32_to_cpu(beacon->beacon_notify_hdr.u.status) & TX_STATUS_MSK,
4100 beacon->beacon_notify_hdr.failure_frame,
4101 le32_to_cpu(beacon->ibss_mgr_status),
4102 le32_to_cpu(beacon->high_tsf), le32_to_cpu(beacon->low_tsf), rate);
4103#endif
4104 il->ibss_manager = le32_to_cpu(beacon->ibss_mgr_status);
4105}
4106
4107static void
4108il4965_perform_ct_kill_task(struct il_priv *il)
4109{
4110 unsigned long flags;
4111
4112 D_POWER("Stop all queues\n");
4113
4114 if (il->mac80211_registered)
4115 ieee80211_stop_queues(il->hw);
4116
4117 _il_wr(il, CSR_UCODE_DRV_GP1_SET,
4118 CSR_UCODE_DRV_GP1_REG_BIT_CT_KILL_EXIT);
4119 _il_rd(il, CSR_UCODE_DRV_GP1);
4120
4121 spin_lock_irqsave(&il->reg_lock, flags);
4122 if (likely(_il_grab_nic_access(il)))
4123 _il_release_nic_access(il);
4124 spin_unlock_irqrestore(&il->reg_lock, flags);
4125}
4126
4127/* Handle notification from uCode that card's power state is changing
4128 * due to software, hardware, or critical temperature RFKILL */
4129static void
4130il4965_hdl_card_state(struct il_priv *il, struct il_rx_buf *rxb)
4131{
4132 struct il_rx_pkt *pkt = rxb_addr(rxb);
4133 u32 flags = le32_to_cpu(pkt->u.card_state_notif.flags);
4134 unsigned long status = il->status;
4135
4136 D_RF_KILL("Card state received: HW:%s SW:%s CT:%s\n",
4137 (flags & HW_CARD_DISABLED) ? "Kill" : "On",
4138 (flags & SW_CARD_DISABLED) ? "Kill" : "On",
4139 (flags & CT_CARD_DISABLED) ? "Reached" : "Not reached");
4140
4141 if (flags & (SW_CARD_DISABLED | HW_CARD_DISABLED | CT_CARD_DISABLED)) {
4142
4143 _il_wr(il, CSR_UCODE_DRV_GP1_SET,
4144 CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED);
4145
4146 il_wr(il, HBUS_TARG_MBX_C, HBUS_TARG_MBX_C_REG_BIT_CMD_BLOCKED);
4147
4148 if (!(flags & RXON_CARD_DISABLED)) {
4149 _il_wr(il, CSR_UCODE_DRV_GP1_CLR,
4150 CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED);
4151 il_wr(il, HBUS_TARG_MBX_C,
4152 HBUS_TARG_MBX_C_REG_BIT_CMD_BLOCKED);
4153 }
4154 }
4155
4156 if (flags & CT_CARD_DISABLED)
4157 il4965_perform_ct_kill_task(il);
4158
4159 if (flags & HW_CARD_DISABLED)
4160 set_bit(S_RFKILL, &il->status);
4161 else
4162 clear_bit(S_RFKILL, &il->status);
4163
4164 if (!(flags & RXON_CARD_DISABLED))
4165 il_scan_cancel(il);
4166
4167 if ((test_bit(S_RFKILL, &status) !=
4168 test_bit(S_RFKILL, &il->status)))
4169 wiphy_rfkill_set_hw_state(il->hw->wiphy,
4170 test_bit(S_RFKILL, &il->status));
4171 else
4172 wake_up(&il->wait_command_queue);
4173}
4174
4175/**
4176 * il4965_setup_handlers - Initialize Rx handler callbacks
4177 *
4178 * Setup the RX handlers for each of the reply types sent from the uCode
4179 * to the host.
4180 *
4181 * This function chains into the hardware specific files for them to setup
4182 * any hardware specific handlers as well.
4183 */
4184static void
4185il4965_setup_handlers(struct il_priv *il)
4186{
4187 il->handlers[N_ALIVE] = il4965_hdl_alive;
4188 il->handlers[N_ERROR] = il_hdl_error;
4189 il->handlers[N_CHANNEL_SWITCH] = il_hdl_csa;
4190 il->handlers[N_SPECTRUM_MEASUREMENT] = il_hdl_spectrum_measurement;
4191 il->handlers[N_PM_SLEEP] = il_hdl_pm_sleep;
4192 il->handlers[N_PM_DEBUG_STATS] = il_hdl_pm_debug_stats;
4193 il->handlers[N_BEACON] = il4965_hdl_beacon;
4194
4195 /*
4196 * The same handler is used for both the REPLY to a discrete
4197 * stats request from the host as well as for the periodic
4198 * stats notifications (after received beacons) from the uCode.
4199 */
4200 il->handlers[C_STATS] = il4965_hdl_c_stats;
4201 il->handlers[N_STATS] = il4965_hdl_stats;
4202
4203 il_setup_rx_scan_handlers(il);
4204
4205 /* status change handler */
4206 il->handlers[N_CARD_STATE] = il4965_hdl_card_state;
4207
4208 il->handlers[N_MISSED_BEACONS] = il4965_hdl_missed_beacon;
4209 /* Rx handlers */
4210 il->handlers[N_RX_PHY] = il4965_hdl_rx_phy;
4211 il->handlers[N_RX_MPDU] = il4965_hdl_rx;
4212 il->handlers[N_RX] = il4965_hdl_rx;
4213 /* block ack */
4214 il->handlers[N_COMPRESSED_BA] = il4965_hdl_compressed_ba;
4215 /* Tx response */
4216 il->handlers[C_TX] = il4965_hdl_tx;
4217}
4218
4219/**
4220 * il4965_rx_handle - Main entry function for receiving responses from uCode
4221 *
4222 * Uses the il->handlers callback function array to invoke
4223 * the appropriate handlers, including command responses,
4224 * frame-received notifications, and other notifications.
4225 */
4226void
4227il4965_rx_handle(struct il_priv *il)
4228{
4229 struct il_rx_buf *rxb;
4230 struct il_rx_pkt *pkt;
4231 struct il_rx_queue *rxq = &il->rxq;
4232 u32 r, i;
4233 int reclaim;
4234 unsigned long flags;
4235 u8 fill_rx = 0;
4236 u32 count = 8;
4237 int total_empty;
4238
4239 /* uCode's read idx (stored in shared DRAM) indicates the last Rx
4240 * buffer that the driver may process (last buffer filled by ucode). */
4241 r = le16_to_cpu(rxq->rb_stts->closed_rb_num) & 0x0FFF;
4242 i = rxq->read;
4243
4244 /* Rx interrupt, but nothing sent from uCode */
4245 if (i == r)
4246 D_RX("r = %d, i = %d\n", r, i);
4247
4248 /* calculate total frames need to be restock after handling RX */
4249 total_empty = r - rxq->write_actual;
4250 if (total_empty < 0)
4251 total_empty += RX_QUEUE_SIZE;
4252
4253 if (total_empty > (RX_QUEUE_SIZE / 2))
4254 fill_rx = 1;
4255
4256 while (i != r) {
4257 int len;
4258
4259 rxb = rxq->queue[i];
4260
4261 /* If an RXB doesn't have a Rx queue slot associated with it,
4262 * then a bug has been introduced in the queue refilling
4263 * routines -- catch it here */
4264 BUG_ON(rxb == NULL);
4265
4266 rxq->queue[i] = NULL;
4267
4268 pci_unmap_page(il->pci_dev, rxb->page_dma,
4269 PAGE_SIZE << il->hw_params.rx_page_order,
4270 PCI_DMA_FROMDEVICE);
4271 pkt = rxb_addr(rxb);
4272
4273 len = le32_to_cpu(pkt->len_n_flags) & IL_RX_FRAME_SIZE_MSK;
4274 len += sizeof(u32); /* account for status word */
4275
4276 reclaim = il_need_reclaim(il, pkt);
4277
4278 /* Based on type of command response or notification,
4279 * handle those that need handling via function in
4280 * handlers table. See il4965_setup_handlers() */
4281 if (il->handlers[pkt->hdr.cmd]) {
4282 D_RX("r = %d, i = %d, %s, 0x%02x\n", r, i,
4283 il_get_cmd_string(pkt->hdr.cmd), pkt->hdr.cmd);
4284 il->isr_stats.handlers[pkt->hdr.cmd]++;
4285 il->handlers[pkt->hdr.cmd] (il, rxb);
4286 } else {
4287 /* No handling needed */
4288 D_RX("r %d i %d No handler needed for %s, 0x%02x\n", r,
4289 i, il_get_cmd_string(pkt->hdr.cmd), pkt->hdr.cmd);
4290 }
4291
4292 /*
4293 * XXX: After here, we should always check rxb->page
4294 * against NULL before touching it or its virtual
4295 * memory (pkt). Because some handler might have
4296 * already taken or freed the pages.
4297 */
4298
4299 if (reclaim) {
4300 /* Invoke any callbacks, transfer the buffer to caller,
4301 * and fire off the (possibly) blocking il_send_cmd()
4302 * as we reclaim the driver command queue */
4303 if (rxb->page)
4304 il_tx_cmd_complete(il, rxb);
4305 else
4306 IL_WARN("Claim null rxb?\n");
4307 }
4308
4309 /* Reuse the page if possible. For notification packets and
4310 * SKBs that fail to Rx correctly, add them back into the
4311 * rx_free list for reuse later. */
4312 spin_lock_irqsave(&rxq->lock, flags);
4313 if (rxb->page != NULL) {
4314 rxb->page_dma =
4315 pci_map_page(il->pci_dev, rxb->page, 0,
4316 PAGE_SIZE << il->hw_params.
4317 rx_page_order, PCI_DMA_FROMDEVICE);
4318
4319 if (unlikely(pci_dma_mapping_error(il->pci_dev,
4320 rxb->page_dma))) {
4321 __il_free_pages(il, rxb->page);
4322 rxb->page = NULL;
4323 list_add_tail(&rxb->list, &rxq->rx_used);
4324 } else {
4325 list_add_tail(&rxb->list, &rxq->rx_free);
4326 rxq->free_count++;
4327 }
4328 } else
4329 list_add_tail(&rxb->list, &rxq->rx_used);
4330
4331 spin_unlock_irqrestore(&rxq->lock, flags);
4332
4333 i = (i + 1) & RX_QUEUE_MASK;
4334 /* If there are a lot of unused frames,
4335 * restock the Rx queue so ucode wont assert. */
4336 if (fill_rx) {
4337 count++;
4338 if (count >= 8) {
4339 rxq->read = i;
4340 il4965_rx_replenish_now(il);
4341 count = 0;
4342 }
4343 }
4344 }
4345
4346 /* Backtrack one entry */
4347 rxq->read = i;
4348 if (fill_rx)
4349 il4965_rx_replenish_now(il);
4350 else
4351 il4965_rx_queue_restock(il);
4352}
4353
4354/* call this function to flush any scheduled tasklet */
4355static inline void
4356il4965_synchronize_irq(struct il_priv *il)
4357{
4358 /* wait to make sure we flush pending tasklet */
4359 synchronize_irq(il->pci_dev->irq);
4360 tasklet_kill(&il->irq_tasklet);
4361}
4362
4363static void
4364il4965_irq_tasklet(struct il_priv *il)
4365{
4366 u32 inta, handled = 0;
4367 u32 inta_fh;
4368 unsigned long flags;
4369 u32 i;
4370#ifdef CONFIG_IWLEGACY_DEBUG
4371 u32 inta_mask;
4372#endif
4373
4374 spin_lock_irqsave(&il->lock, flags);
4375
4376 /* Ack/clear/reset pending uCode interrupts.
4377 * Note: Some bits in CSR_INT are "OR" of bits in CSR_FH_INT_STATUS,
4378 * and will clear only when CSR_FH_INT_STATUS gets cleared. */
4379 inta = _il_rd(il, CSR_INT);
4380 _il_wr(il, CSR_INT, inta);
4381
4382 /* Ack/clear/reset pending flow-handler (DMA) interrupts.
4383 * Any new interrupts that happen after this, either while we're
4384 * in this tasklet, or later, will show up in next ISR/tasklet. */
4385 inta_fh = _il_rd(il, CSR_FH_INT_STATUS);
4386 _il_wr(il, CSR_FH_INT_STATUS, inta_fh);
4387
4388#ifdef CONFIG_IWLEGACY_DEBUG
4389 if (il_get_debug_level(il) & IL_DL_ISR) {
4390 /* just for debug */
4391 inta_mask = _il_rd(il, CSR_INT_MASK);
4392 D_ISR("inta 0x%08x, enabled 0x%08x, fh 0x%08x\n", inta,
4393 inta_mask, inta_fh);
4394 }
4395#endif
4396
4397 spin_unlock_irqrestore(&il->lock, flags);
4398
4399 /* Since CSR_INT and CSR_FH_INT_STATUS reads and clears are not
4400 * atomic, make sure that inta covers all the interrupts that
4401 * we've discovered, even if FH interrupt came in just after
4402 * reading CSR_INT. */
4403 if (inta_fh & CSR49_FH_INT_RX_MASK)
4404 inta |= CSR_INT_BIT_FH_RX;
4405 if (inta_fh & CSR49_FH_INT_TX_MASK)
4406 inta |= CSR_INT_BIT_FH_TX;
4407
4408 /* Now service all interrupt bits discovered above. */
4409 if (inta & CSR_INT_BIT_HW_ERR) {
4410 IL_ERR("Hardware error detected. Restarting.\n");
4411
4412 /* Tell the device to stop sending interrupts */
4413 il_disable_interrupts(il);
4414
4415 il->isr_stats.hw++;
4416 il_irq_handle_error(il);
4417
4418 handled |= CSR_INT_BIT_HW_ERR;
4419
4420 return;
4421 }
4422#ifdef CONFIG_IWLEGACY_DEBUG
4423 if (il_get_debug_level(il) & (IL_DL_ISR)) {
4424 /* NIC fires this, but we don't use it, redundant with WAKEUP */
4425 if (inta & CSR_INT_BIT_SCD) {
4426 D_ISR("Scheduler finished to transmit "
4427 "the frame/frames.\n");
4428 il->isr_stats.sch++;
4429 }
4430
4431 /* Alive notification via Rx interrupt will do the real work */
4432 if (inta & CSR_INT_BIT_ALIVE) {
4433 D_ISR("Alive interrupt\n");
4434 il->isr_stats.alive++;
4435 }
4436 }
4437#endif
4438 /* Safely ignore these bits for debug checks below */
4439 inta &= ~(CSR_INT_BIT_SCD | CSR_INT_BIT_ALIVE);
4440
4441 /* HW RF KILL switch toggled */
4442 if (inta & CSR_INT_BIT_RF_KILL) {
4443 int hw_rf_kill = 0;
4444
4445 if (!(_il_rd(il, CSR_GP_CNTRL) & CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW))
4446 hw_rf_kill = 1;
4447
4448 IL_WARN("RF_KILL bit toggled to %s.\n",
4449 hw_rf_kill ? "disable radio" : "enable radio");
4450
4451 il->isr_stats.rfkill++;
4452
4453 /* driver only loads ucode once setting the interface up.
4454 * the driver allows loading the ucode even if the radio
4455 * is killed. Hence update the killswitch state here. The
4456 * rfkill handler will care about restarting if needed.
4457 */
4458 if (hw_rf_kill) {
4459 set_bit(S_RFKILL, &il->status);
4460 } else {
4461 clear_bit(S_RFKILL, &il->status);
4462 il_force_reset(il, true);
4463 }
4464 wiphy_rfkill_set_hw_state(il->hw->wiphy, hw_rf_kill);
4465
4466 handled |= CSR_INT_BIT_RF_KILL;
4467 }
4468
4469 /* Chip got too hot and stopped itself */
4470 if (inta & CSR_INT_BIT_CT_KILL) {
4471 IL_ERR("Microcode CT kill error detected.\n");
4472 il->isr_stats.ctkill++;
4473 handled |= CSR_INT_BIT_CT_KILL;
4474 }
4475
4476 /* Error detected by uCode */
4477 if (inta & CSR_INT_BIT_SW_ERR) {
4478 IL_ERR("Microcode SW error detected. " " Restarting 0x%X.\n",
4479 inta);
4480 il->isr_stats.sw++;
4481 il_irq_handle_error(il);
4482 handled |= CSR_INT_BIT_SW_ERR;
4483 }
4484
4485 /*
4486 * uCode wakes up after power-down sleep.
4487 * Tell device about any new tx or host commands enqueued,
4488 * and about any Rx buffers made available while asleep.
4489 */
4490 if (inta & CSR_INT_BIT_WAKEUP) {
4491 D_ISR("Wakeup interrupt\n");
4492 il_rx_queue_update_write_ptr(il, &il->rxq);
4493 for (i = 0; i < il->hw_params.max_txq_num; i++)
4494 il_txq_update_write_ptr(il, &il->txq[i]);
4495 il->isr_stats.wakeup++;
4496 handled |= CSR_INT_BIT_WAKEUP;
4497 }
4498
4499 /* All uCode command responses, including Tx command responses,
4500 * Rx "responses" (frame-received notification), and other
4501 * notifications from uCode come through here*/
4502 if (inta & (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX)) {
4503 il4965_rx_handle(il);
4504 il->isr_stats.rx++;
4505 handled |= (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX);
4506 }
4507
4508 /* This "Tx" DMA channel is used only for loading uCode */
4509 if (inta & CSR_INT_BIT_FH_TX) {
4510 D_ISR("uCode load interrupt\n");
4511 il->isr_stats.tx++;
4512 handled |= CSR_INT_BIT_FH_TX;
4513 /* Wake up uCode load routine, now that load is complete */
4514 il->ucode_write_complete = 1;
4515 wake_up(&il->wait_command_queue);
4516 }
4517
4518 if (inta & ~handled) {
4519 IL_ERR("Unhandled INTA bits 0x%08x\n", inta & ~handled);
4520 il->isr_stats.unhandled++;
4521 }
4522
4523 if (inta & ~(il->inta_mask)) {
4524 IL_WARN("Disabled INTA bits 0x%08x were pending\n",
4525 inta & ~il->inta_mask);
4526 IL_WARN(" with FH49_INT = 0x%08x\n", inta_fh);
4527 }
4528
4529 /* Re-enable all interrupts */
4530 /* only Re-enable if disabled by irq */
4531 if (test_bit(S_INT_ENABLED, &il->status))
4532 il_enable_interrupts(il);
4533 /* Re-enable RF_KILL if it occurred */
4534 else if (handled & CSR_INT_BIT_RF_KILL)
4535 il_enable_rfkill_int(il);
4536
4537#ifdef CONFIG_IWLEGACY_DEBUG
4538 if (il_get_debug_level(il) & (IL_DL_ISR)) {
4539 inta = _il_rd(il, CSR_INT);
4540 inta_mask = _il_rd(il, CSR_INT_MASK);
4541 inta_fh = _il_rd(il, CSR_FH_INT_STATUS);
4542 D_ISR("End inta 0x%08x, enabled 0x%08x, fh 0x%08x, "
4543 "flags 0x%08lx\n", inta, inta_mask, inta_fh, flags);
4544 }
4545#endif
4546}
4547
4548/*****************************************************************************
4549 *
4550 * sysfs attributes
4551 *
4552 *****************************************************************************/
4553
4554#ifdef CONFIG_IWLEGACY_DEBUG
4555
4556/*
4557 * The following adds a new attribute to the sysfs representation
4558 * of this device driver (i.e. a new file in /sys/class/net/wlan0/device/)
4559 * used for controlling the debug level.
4560 *
4561 * See the level definitions in iwl for details.
4562 *
4563 * The debug_level being managed using sysfs below is a per device debug
4564 * level that is used instead of the global debug level if it (the per
4565 * device debug level) is set.
4566 */
4567static ssize_t
4568il4965_show_debug_level(struct device *d, struct device_attribute *attr,
4569 char *buf)
4570{
4571 struct il_priv *il = dev_get_drvdata(d);
4572 return sprintf(buf, "0x%08X\n", il_get_debug_level(il));
4573}
4574
4575static ssize_t
4576il4965_store_debug_level(struct device *d, struct device_attribute *attr,
4577 const char *buf, size_t count)
4578{
4579 struct il_priv *il = dev_get_drvdata(d);
4580 unsigned long val;
4581 int ret;
4582
4583 ret = kstrtoul(buf, 0, &val);
4584 if (ret)
4585 IL_ERR("%s is not in hex or decimal form.\n", buf);
4586 else
4587 il->debug_level = val;
4588
4589 return strnlen(buf, count);
4590}
4591
4592static DEVICE_ATTR(debug_level, S_IWUSR | S_IRUGO, il4965_show_debug_level,
4593 il4965_store_debug_level);
4594
4595#endif /* CONFIG_IWLEGACY_DEBUG */
4596
4597static ssize_t
4598il4965_show_temperature(struct device *d, struct device_attribute *attr,
4599 char *buf)
4600{
4601 struct il_priv *il = dev_get_drvdata(d);
4602
4603 if (!il_is_alive(il))
4604 return -EAGAIN;
4605
4606 return sprintf(buf, "%d\n", il->temperature);
4607}
4608
4609static DEVICE_ATTR(temperature, S_IRUGO, il4965_show_temperature, NULL);
4610
4611static ssize_t
4612il4965_show_tx_power(struct device *d, struct device_attribute *attr, char *buf)
4613{
4614 struct il_priv *il = dev_get_drvdata(d);
4615
4616 if (!il_is_ready_rf(il))
4617 return sprintf(buf, "off\n");
4618 else
4619 return sprintf(buf, "%d\n", il->tx_power_user_lmt);
4620}
4621
4622static ssize_t
4623il4965_store_tx_power(struct device *d, struct device_attribute *attr,
4624 const char *buf, size_t count)
4625{
4626 struct il_priv *il = dev_get_drvdata(d);
4627 unsigned long val;
4628 int ret;
4629
4630 ret = kstrtoul(buf, 10, &val);
4631 if (ret)
4632 IL_INFO("%s is not in decimal form.\n", buf);
4633 else {
4634 ret = il_set_tx_power(il, val, false);
4635 if (ret)
4636 IL_ERR("failed setting tx power (0x%08x).\n", ret);
4637 else
4638 ret = count;
4639 }
4640 return ret;
4641}
4642
4643static DEVICE_ATTR(tx_power, S_IWUSR | S_IRUGO, il4965_show_tx_power,
4644 il4965_store_tx_power);
4645
4646static struct attribute *il_sysfs_entries[] = {
4647 &dev_attr_temperature.attr,
4648 &dev_attr_tx_power.attr,
4649#ifdef CONFIG_IWLEGACY_DEBUG
4650 &dev_attr_debug_level.attr,
4651#endif
4652 NULL
4653};
4654
4655static struct attribute_group il_attribute_group = {
4656 .name = NULL, /* put in device directory */
4657 .attrs = il_sysfs_entries,
4658};
4659
4660/******************************************************************************
4661 *
4662 * uCode download functions
4663 *
4664 ******************************************************************************/
4665
4666static void
4667il4965_dealloc_ucode_pci(struct il_priv *il)
4668{
4669 il_free_fw_desc(il->pci_dev, &il->ucode_code);
4670 il_free_fw_desc(il->pci_dev, &il->ucode_data);
4671 il_free_fw_desc(il->pci_dev, &il->ucode_data_backup);
4672 il_free_fw_desc(il->pci_dev, &il->ucode_init);
4673 il_free_fw_desc(il->pci_dev, &il->ucode_init_data);
4674 il_free_fw_desc(il->pci_dev, &il->ucode_boot);
4675}
4676
4677static void
4678il4965_nic_start(struct il_priv *il)
4679{
4680 /* Remove all resets to allow NIC to operate */
4681 _il_wr(il, CSR_RESET, 0);
4682}
4683
4684static void il4965_ucode_callback(const struct firmware *ucode_raw,
4685 void *context);
4686static int il4965_mac_setup_register(struct il_priv *il, u32 max_probe_length);
4687
4688static int __must_check
4689il4965_request_firmware(struct il_priv *il, bool first)
4690{
4691 const char *name_pre = il->cfg->fw_name_pre;
4692 char tag[8];
4693
4694 if (first) {
4695 il->fw_idx = il->cfg->ucode_api_max;
4696 sprintf(tag, "%d", il->fw_idx);
4697 } else {
4698 il->fw_idx--;
4699 sprintf(tag, "%d", il->fw_idx);
4700 }
4701
4702 if (il->fw_idx < il->cfg->ucode_api_min) {
4703 IL_ERR("no suitable firmware found!\n");
4704 return -ENOENT;
4705 }
4706
4707 sprintf(il->firmware_name, "%s%s%s", name_pre, tag, ".ucode");
4708
4709 D_INFO("attempting to load firmware '%s'\n", il->firmware_name);
4710
4711 return request_firmware_nowait(THIS_MODULE, 1, il->firmware_name,
4712 &il->pci_dev->dev, GFP_KERNEL, il,
4713 il4965_ucode_callback);
4714}
4715
4716struct il4965_firmware_pieces {
4717 const void *inst, *data, *init, *init_data, *boot;
4718 size_t inst_size, data_size, init_size, init_data_size, boot_size;
4719};
4720
4721static int
4722il4965_load_firmware(struct il_priv *il, const struct firmware *ucode_raw,
4723 struct il4965_firmware_pieces *pieces)
4724{
4725 struct il_ucode_header *ucode = (void *)ucode_raw->data;
4726 u32 api_ver, hdr_size;
4727 const u8 *src;
4728
4729 il->ucode_ver = le32_to_cpu(ucode->ver);
4730 api_ver = IL_UCODE_API(il->ucode_ver);
4731
4732 switch (api_ver) {
4733 default:
4734 case 0:
4735 case 1:
4736 case 2:
4737 hdr_size = 24;
4738 if (ucode_raw->size < hdr_size) {
4739 IL_ERR("File size too small!\n");
4740 return -EINVAL;
4741 }
4742 pieces->inst_size = le32_to_cpu(ucode->v1.inst_size);
4743 pieces->data_size = le32_to_cpu(ucode->v1.data_size);
4744 pieces->init_size = le32_to_cpu(ucode->v1.init_size);
4745 pieces->init_data_size = le32_to_cpu(ucode->v1.init_data_size);
4746 pieces->boot_size = le32_to_cpu(ucode->v1.boot_size);
4747 src = ucode->v1.data;
4748 break;
4749 }
4750
4751 /* Verify size of file vs. image size info in file's header */
4752 if (ucode_raw->size !=
4753 hdr_size + pieces->inst_size + pieces->data_size +
4754 pieces->init_size + pieces->init_data_size + pieces->boot_size) {
4755
4756 IL_ERR("uCode file size %d does not match expected size\n",
4757 (int)ucode_raw->size);
4758 return -EINVAL;
4759 }
4760
4761 pieces->inst = src;
4762 src += pieces->inst_size;
4763 pieces->data = src;
4764 src += pieces->data_size;
4765 pieces->init = src;
4766 src += pieces->init_size;
4767 pieces->init_data = src;
4768 src += pieces->init_data_size;
4769 pieces->boot = src;
4770 src += pieces->boot_size;
4771
4772 return 0;
4773}
4774
4775/**
4776 * il4965_ucode_callback - callback when firmware was loaded
4777 *
4778 * If loaded successfully, copies the firmware into buffers
4779 * for the card to fetch (via DMA).
4780 */
4781static void
4782il4965_ucode_callback(const struct firmware *ucode_raw, void *context)
4783{
4784 struct il_priv *il = context;
4785 struct il_ucode_header *ucode;
4786 int err;
4787 struct il4965_firmware_pieces pieces;
4788 const unsigned int api_max = il->cfg->ucode_api_max;
4789 const unsigned int api_min = il->cfg->ucode_api_min;
4790 u32 api_ver;
4791
4792 u32 max_probe_length = 200;
4793 u32 standard_phy_calibration_size =
4794 IL_DEFAULT_STANDARD_PHY_CALIBRATE_TBL_SIZE;
4795
4796 memset(&pieces, 0, sizeof(pieces));
4797
4798 if (!ucode_raw) {
4799 if (il->fw_idx <= il->cfg->ucode_api_max)
4800 IL_ERR("request for firmware file '%s' failed.\n",
4801 il->firmware_name);
4802 goto try_again;
4803 }
4804
4805 D_INFO("Loaded firmware file '%s' (%zd bytes).\n", il->firmware_name,
4806 ucode_raw->size);
4807
4808 /* Make sure that we got at least the API version number */
4809 if (ucode_raw->size < 4) {
4810 IL_ERR("File size way too small!\n");
4811 goto try_again;
4812 }
4813
4814 /* Data from ucode file: header followed by uCode images */
4815 ucode = (struct il_ucode_header *)ucode_raw->data;
4816
4817 err = il4965_load_firmware(il, ucode_raw, &pieces);
4818
4819 if (err)
4820 goto try_again;
4821
4822 api_ver = IL_UCODE_API(il->ucode_ver);
4823
4824 /*
4825 * api_ver should match the api version forming part of the
4826 * firmware filename ... but we don't check for that and only rely
4827 * on the API version read from firmware header from here on forward
4828 */
4829 if (api_ver < api_min || api_ver > api_max) {
4830 IL_ERR("Driver unable to support your firmware API. "
4831 "Driver supports v%u, firmware is v%u.\n", api_max,
4832 api_ver);
4833 goto try_again;
4834 }
4835
4836 if (api_ver != api_max)
4837 IL_ERR("Firmware has old API version. Expected v%u, "
4838 "got v%u. New firmware can be obtained "
4839 "from http://www.intellinuxwireless.org.\n", api_max,
4840 api_ver);
4841
4842 IL_INFO("loaded firmware version %u.%u.%u.%u\n",
4843 IL_UCODE_MAJOR(il->ucode_ver), IL_UCODE_MINOR(il->ucode_ver),
4844 IL_UCODE_API(il->ucode_ver), IL_UCODE_SERIAL(il->ucode_ver));
4845
4846 snprintf(il->hw->wiphy->fw_version, sizeof(il->hw->wiphy->fw_version),
4847 "%u.%u.%u.%u", IL_UCODE_MAJOR(il->ucode_ver),
4848 IL_UCODE_MINOR(il->ucode_ver), IL_UCODE_API(il->ucode_ver),
4849 IL_UCODE_SERIAL(il->ucode_ver));
4850
4851 /*
4852 * For any of the failures below (before allocating pci memory)
4853 * we will try to load a version with a smaller API -- maybe the
4854 * user just got a corrupted version of the latest API.
4855 */
4856
4857 D_INFO("f/w package hdr ucode version raw = 0x%x\n", il->ucode_ver);
4858 D_INFO("f/w package hdr runtime inst size = %Zd\n", pieces.inst_size);
4859 D_INFO("f/w package hdr runtime data size = %Zd\n", pieces.data_size);
4860 D_INFO("f/w package hdr init inst size = %Zd\n", pieces.init_size);
4861 D_INFO("f/w package hdr init data size = %Zd\n", pieces.init_data_size);
4862 D_INFO("f/w package hdr boot inst size = %Zd\n", pieces.boot_size);
4863
4864 /* Verify that uCode images will fit in card's SRAM */
4865 if (pieces.inst_size > il->hw_params.max_inst_size) {
4866 IL_ERR("uCode instr len %Zd too large to fit in\n",
4867 pieces.inst_size);
4868 goto try_again;
4869 }
4870
4871 if (pieces.data_size > il->hw_params.max_data_size) {
4872 IL_ERR("uCode data len %Zd too large to fit in\n",
4873 pieces.data_size);
4874 goto try_again;
4875 }
4876
4877 if (pieces.init_size > il->hw_params.max_inst_size) {
4878 IL_ERR("uCode init instr len %Zd too large to fit in\n",
4879 pieces.init_size);
4880 goto try_again;
4881 }
4882
4883 if (pieces.init_data_size > il->hw_params.max_data_size) {
4884 IL_ERR("uCode init data len %Zd too large to fit in\n",
4885 pieces.init_data_size);
4886 goto try_again;
4887 }
4888
4889 if (pieces.boot_size > il->hw_params.max_bsm_size) {
4890 IL_ERR("uCode boot instr len %Zd too large to fit in\n",
4891 pieces.boot_size);
4892 goto try_again;
4893 }
4894
4895 /* Allocate ucode buffers for card's bus-master loading ... */
4896
4897 /* Runtime instructions and 2 copies of data:
4898 * 1) unmodified from disk
4899 * 2) backup cache for save/restore during power-downs */
4900 il->ucode_code.len = pieces.inst_size;
4901 il_alloc_fw_desc(il->pci_dev, &il->ucode_code);
4902
4903 il->ucode_data.len = pieces.data_size;
4904 il_alloc_fw_desc(il->pci_dev, &il->ucode_data);
4905
4906 il->ucode_data_backup.len = pieces.data_size;
4907 il_alloc_fw_desc(il->pci_dev, &il->ucode_data_backup);
4908
4909 if (!il->ucode_code.v_addr || !il->ucode_data.v_addr ||
4910 !il->ucode_data_backup.v_addr)
4911 goto err_pci_alloc;
4912
4913 /* Initialization instructions and data */
4914 if (pieces.init_size && pieces.init_data_size) {
4915 il->ucode_init.len = pieces.init_size;
4916 il_alloc_fw_desc(il->pci_dev, &il->ucode_init);
4917
4918 il->ucode_init_data.len = pieces.init_data_size;
4919 il_alloc_fw_desc(il->pci_dev, &il->ucode_init_data);
4920
4921 if (!il->ucode_init.v_addr || !il->ucode_init_data.v_addr)
4922 goto err_pci_alloc;
4923 }
4924
4925 /* Bootstrap (instructions only, no data) */
4926 if (pieces.boot_size) {
4927 il->ucode_boot.len = pieces.boot_size;
4928 il_alloc_fw_desc(il->pci_dev, &il->ucode_boot);
4929
4930 if (!il->ucode_boot.v_addr)
4931 goto err_pci_alloc;
4932 }
4933
4934 /* Now that we can no longer fail, copy information */
4935
4936 il->sta_key_max_num = STA_KEY_MAX_NUM;
4937
4938 /* Copy images into buffers for card's bus-master reads ... */
4939
4940 /* Runtime instructions (first block of data in file) */
4941 D_INFO("Copying (but not loading) uCode instr len %Zd\n",
4942 pieces.inst_size);
4943 memcpy(il->ucode_code.v_addr, pieces.inst, pieces.inst_size);
4944
4945 D_INFO("uCode instr buf vaddr = 0x%p, paddr = 0x%08x\n",
4946 il->ucode_code.v_addr, (u32) il->ucode_code.p_addr);
4947
4948 /*
4949 * Runtime data
4950 * NOTE: Copy into backup buffer will be done in il_up()
4951 */
4952 D_INFO("Copying (but not loading) uCode data len %Zd\n",
4953 pieces.data_size);
4954 memcpy(il->ucode_data.v_addr, pieces.data, pieces.data_size);
4955 memcpy(il->ucode_data_backup.v_addr, pieces.data, pieces.data_size);
4956
4957 /* Initialization instructions */
4958 if (pieces.init_size) {
4959 D_INFO("Copying (but not loading) init instr len %Zd\n",
4960 pieces.init_size);
4961 memcpy(il->ucode_init.v_addr, pieces.init, pieces.init_size);
4962 }
4963
4964 /* Initialization data */
4965 if (pieces.init_data_size) {
4966 D_INFO("Copying (but not loading) init data len %Zd\n",
4967 pieces.init_data_size);
4968 memcpy(il->ucode_init_data.v_addr, pieces.init_data,
4969 pieces.init_data_size);
4970 }
4971
4972 /* Bootstrap instructions */
4973 D_INFO("Copying (but not loading) boot instr len %Zd\n",
4974 pieces.boot_size);
4975 memcpy(il->ucode_boot.v_addr, pieces.boot, pieces.boot_size);
4976
4977 /*
4978 * figure out the offset of chain noise reset and gain commands
4979 * base on the size of standard phy calibration commands table size
4980 */
4981 il->_4965.phy_calib_chain_noise_reset_cmd =
4982 standard_phy_calibration_size;
4983 il->_4965.phy_calib_chain_noise_gain_cmd =
4984 standard_phy_calibration_size + 1;
4985
4986 /**************************************************
4987 * This is still part of probe() in a sense...
4988 *
4989 * 9. Setup and register with mac80211 and debugfs
4990 **************************************************/
4991 err = il4965_mac_setup_register(il, max_probe_length);
4992 if (err)
4993 goto out_unbind;
4994
4995 err = il_dbgfs_register(il, DRV_NAME);
4996 if (err)
4997 IL_ERR("failed to create debugfs files. Ignoring error: %d\n",
4998 err);
4999
5000 err = sysfs_create_group(&il->pci_dev->dev.kobj, &il_attribute_group);
5001 if (err) {
5002 IL_ERR("failed to create sysfs device attributes\n");
5003 goto out_unbind;
5004 }
5005
5006 /* We have our copies now, allow OS release its copies */
5007 release_firmware(ucode_raw);
5008 complete(&il->_4965.firmware_loading_complete);
5009 return;
5010
5011try_again:
5012 /* try next, if any */
5013 if (il4965_request_firmware(il, false))
5014 goto out_unbind;
5015 release_firmware(ucode_raw);
5016 return;
5017
5018err_pci_alloc:
5019 IL_ERR("failed to allocate pci memory\n");
5020 il4965_dealloc_ucode_pci(il);
5021out_unbind:
5022 complete(&il->_4965.firmware_loading_complete);
5023 device_release_driver(&il->pci_dev->dev);
5024 release_firmware(ucode_raw);
5025}
5026
5027static const char *const desc_lookup_text[] = {
5028 "OK",
5029 "FAIL",
5030 "BAD_PARAM",
5031 "BAD_CHECKSUM",
5032 "NMI_INTERRUPT_WDG",
5033 "SYSASSERT",
5034 "FATAL_ERROR",
5035 "BAD_COMMAND",
5036 "HW_ERROR_TUNE_LOCK",
5037 "HW_ERROR_TEMPERATURE",
5038 "ILLEGAL_CHAN_FREQ",
5039 "VCC_NOT_STBL",
5040 "FH49_ERROR",
5041 "NMI_INTERRUPT_HOST",
5042 "NMI_INTERRUPT_ACTION_PT",
5043 "NMI_INTERRUPT_UNKNOWN",
5044 "UCODE_VERSION_MISMATCH",
5045 "HW_ERROR_ABS_LOCK",
5046 "HW_ERROR_CAL_LOCK_FAIL",
5047 "NMI_INTERRUPT_INST_ACTION_PT",
5048 "NMI_INTERRUPT_DATA_ACTION_PT",
5049 "NMI_TRM_HW_ER",
5050 "NMI_INTERRUPT_TRM",
5051 "NMI_INTERRUPT_BREAK_POINT",
5052 "DEBUG_0",
5053 "DEBUG_1",
5054 "DEBUG_2",
5055 "DEBUG_3",
5056};
5057
5058static struct {
5059 char *name;
5060 u8 num;
5061} advanced_lookup[] = {
5062 {
5063 "NMI_INTERRUPT_WDG", 0x34}, {
5064 "SYSASSERT", 0x35}, {
5065 "UCODE_VERSION_MISMATCH", 0x37}, {
5066 "BAD_COMMAND", 0x38}, {
5067 "NMI_INTERRUPT_DATA_ACTION_PT", 0x3C}, {
5068 "FATAL_ERROR", 0x3D}, {
5069 "NMI_TRM_HW_ERR", 0x46}, {
5070 "NMI_INTERRUPT_TRM", 0x4C}, {
5071 "NMI_INTERRUPT_BREAK_POINT", 0x54}, {
5072 "NMI_INTERRUPT_WDG_RXF_FULL", 0x5C}, {
5073 "NMI_INTERRUPT_WDG_NO_RBD_RXF_FULL", 0x64}, {
5074 "NMI_INTERRUPT_HOST", 0x66}, {
5075 "NMI_INTERRUPT_ACTION_PT", 0x7C}, {
5076 "NMI_INTERRUPT_UNKNOWN", 0x84}, {
5077 "NMI_INTERRUPT_INST_ACTION_PT", 0x86}, {
5078"ADVANCED_SYSASSERT", 0},};
5079
5080static const char *
5081il4965_desc_lookup(u32 num)
5082{
5083 int i;
5084 int max = ARRAY_SIZE(desc_lookup_text);
5085
5086 if (num < max)
5087 return desc_lookup_text[num];
5088
5089 max = ARRAY_SIZE(advanced_lookup) - 1;
5090 for (i = 0; i < max; i++) {
5091 if (advanced_lookup[i].num == num)
5092 break;
5093 }
5094 return advanced_lookup[i].name;
5095}
5096
5097#define ERROR_START_OFFSET (1 * sizeof(u32))
5098#define ERROR_ELEM_SIZE (7 * sizeof(u32))
5099
5100void
5101il4965_dump_nic_error_log(struct il_priv *il)
5102{
5103 u32 data2, line;
5104 u32 desc, time, count, base, data1;
5105 u32 blink1, blink2, ilink1, ilink2;
5106 u32 pc, hcmd;
5107
5108 if (il->ucode_type == UCODE_INIT)
5109 base = le32_to_cpu(il->card_alive_init.error_event_table_ptr);
5110 else
5111 base = le32_to_cpu(il->card_alive.error_event_table_ptr);
5112
5113 if (!il->ops->is_valid_rtc_data_addr(base)) {
5114 IL_ERR("Not valid error log pointer 0x%08X for %s uCode\n",
5115 base, (il->ucode_type == UCODE_INIT) ? "Init" : "RT");
5116 return;
5117 }
5118
5119 count = il_read_targ_mem(il, base);
5120
5121 if (ERROR_START_OFFSET <= count * ERROR_ELEM_SIZE) {
5122 IL_ERR("Start IWL Error Log Dump:\n");
5123 IL_ERR("Status: 0x%08lX, count: %d\n", il->status, count);
5124 }
5125
5126 desc = il_read_targ_mem(il, base + 1 * sizeof(u32));
5127 il->isr_stats.err_code = desc;
5128 pc = il_read_targ_mem(il, base + 2 * sizeof(u32));
5129 blink1 = il_read_targ_mem(il, base + 3 * sizeof(u32));
5130 blink2 = il_read_targ_mem(il, base + 4 * sizeof(u32));
5131 ilink1 = il_read_targ_mem(il, base + 5 * sizeof(u32));
5132 ilink2 = il_read_targ_mem(il, base + 6 * sizeof(u32));
5133 data1 = il_read_targ_mem(il, base + 7 * sizeof(u32));
5134 data2 = il_read_targ_mem(il, base + 8 * sizeof(u32));
5135 line = il_read_targ_mem(il, base + 9 * sizeof(u32));
5136 time = il_read_targ_mem(il, base + 11 * sizeof(u32));
5137 hcmd = il_read_targ_mem(il, base + 22 * sizeof(u32));
5138
5139 IL_ERR("Desc Time "
5140 "data1 data2 line\n");
5141 IL_ERR("%-28s (0x%04X) %010u 0x%08X 0x%08X %u\n",
5142 il4965_desc_lookup(desc), desc, time, data1, data2, line);
5143 IL_ERR("pc blink1 blink2 ilink1 ilink2 hcmd\n");
5144 IL_ERR("0x%05X 0x%05X 0x%05X 0x%05X 0x%05X 0x%05X\n", pc, blink1,
5145 blink2, ilink1, ilink2, hcmd);
5146}
5147
5148static void
5149il4965_rf_kill_ct_config(struct il_priv *il)
5150{
5151 struct il_ct_kill_config cmd;
5152 unsigned long flags;
5153 int ret = 0;
5154
5155 spin_lock_irqsave(&il->lock, flags);
5156 _il_wr(il, CSR_UCODE_DRV_GP1_CLR,
5157 CSR_UCODE_DRV_GP1_REG_BIT_CT_KILL_EXIT);
5158 spin_unlock_irqrestore(&il->lock, flags);
5159
5160 cmd.critical_temperature_R =
5161 cpu_to_le32(il->hw_params.ct_kill_threshold);
5162
5163 ret = il_send_cmd_pdu(il, C_CT_KILL_CONFIG, sizeof(cmd), &cmd);
5164 if (ret)
5165 IL_ERR("C_CT_KILL_CONFIG failed\n");
5166 else
5167 D_INFO("C_CT_KILL_CONFIG " "succeeded, "
5168 "critical temperature is %d\n",
5169 il->hw_params.ct_kill_threshold);
5170}
5171
5172static const s8 default_queue_to_tx_fifo[] = {
5173 IL_TX_FIFO_VO,
5174 IL_TX_FIFO_VI,
5175 IL_TX_FIFO_BE,
5176 IL_TX_FIFO_BK,
5177 IL49_CMD_FIFO_NUM,
5178 IL_TX_FIFO_UNUSED,
5179 IL_TX_FIFO_UNUSED,
5180};
5181
5182#define IL_MASK(lo, hi) ((1 << (hi)) | ((1 << (hi)) - (1 << (lo))))
5183
5184static int
5185il4965_alive_notify(struct il_priv *il)
5186{
5187 u32 a;
5188 unsigned long flags;
5189 int i, chan;
5190 u32 reg_val;
5191
5192 spin_lock_irqsave(&il->lock, flags);
5193
5194 /* Clear 4965's internal Tx Scheduler data base */
5195 il->scd_base_addr = il_rd_prph(il, IL49_SCD_SRAM_BASE_ADDR);
5196 a = il->scd_base_addr + IL49_SCD_CONTEXT_DATA_OFFSET;
5197 for (; a < il->scd_base_addr + IL49_SCD_TX_STTS_BITMAP_OFFSET; a += 4)
5198 il_write_targ_mem(il, a, 0);
5199 for (; a < il->scd_base_addr + IL49_SCD_TRANSLATE_TBL_OFFSET; a += 4)
5200 il_write_targ_mem(il, a, 0);
5201 for (;
5202 a <
5203 il->scd_base_addr +
5204 IL49_SCD_TRANSLATE_TBL_OFFSET_QUEUE(il->hw_params.max_txq_num);
5205 a += 4)
5206 il_write_targ_mem(il, a, 0);
5207
5208 /* Tel 4965 where to find Tx byte count tables */
5209 il_wr_prph(il, IL49_SCD_DRAM_BASE_ADDR, il->scd_bc_tbls.dma >> 10);
5210
5211 /* Enable DMA channel */
5212 for (chan = 0; chan < FH49_TCSR_CHNL_NUM; chan++)
5213 il_wr(il, FH49_TCSR_CHNL_TX_CONFIG_REG(chan),
5214 FH49_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE |
5215 FH49_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE);
5216
5217 /* Update FH chicken bits */
5218 reg_val = il_rd(il, FH49_TX_CHICKEN_BITS_REG);
5219 il_wr(il, FH49_TX_CHICKEN_BITS_REG,
5220 reg_val | FH49_TX_CHICKEN_BITS_SCD_AUTO_RETRY_EN);
5221
5222 /* Disable chain mode for all queues */
5223 il_wr_prph(il, IL49_SCD_QUEUECHAIN_SEL, 0);
5224
5225 /* Initialize each Tx queue (including the command queue) */
5226 for (i = 0; i < il->hw_params.max_txq_num; i++) {
5227
5228 /* TFD circular buffer read/write idxes */
5229 il_wr_prph(il, IL49_SCD_QUEUE_RDPTR(i), 0);
5230 il_wr(il, HBUS_TARG_WRPTR, 0 | (i << 8));
5231
5232 /* Max Tx Window size for Scheduler-ACK mode */
5233 il_write_targ_mem(il,
5234 il->scd_base_addr +
5235 IL49_SCD_CONTEXT_QUEUE_OFFSET(i),
5236 (SCD_WIN_SIZE <<
5237 IL49_SCD_QUEUE_CTX_REG1_WIN_SIZE_POS) &
5238 IL49_SCD_QUEUE_CTX_REG1_WIN_SIZE_MSK);
5239
5240 /* Frame limit */
5241 il_write_targ_mem(il,
5242 il->scd_base_addr +
5243 IL49_SCD_CONTEXT_QUEUE_OFFSET(i) +
5244 sizeof(u32),
5245 (SCD_FRAME_LIMIT <<
5246 IL49_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS) &
5247 IL49_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK);
5248
5249 }
5250 il_wr_prph(il, IL49_SCD_INTERRUPT_MASK,
5251 (1 << il->hw_params.max_txq_num) - 1);
5252
5253 /* Activate all Tx DMA/FIFO channels */
5254 il4965_txq_set_sched(il, IL_MASK(0, 6));
5255
5256 il4965_set_wr_ptrs(il, IL_DEFAULT_CMD_QUEUE_NUM, 0);
5257
5258 /* make sure all queue are not stopped */
5259 memset(&il->queue_stopped[0], 0, sizeof(il->queue_stopped));
5260 for (i = 0; i < 4; i++)
5261 atomic_set(&il->queue_stop_count[i], 0);
5262
5263 /* reset to 0 to enable all the queue first */
5264 il->txq_ctx_active_msk = 0;
5265 /* Map each Tx/cmd queue to its corresponding fifo */
5266 BUILD_BUG_ON(ARRAY_SIZE(default_queue_to_tx_fifo) != 7);
5267
5268 for (i = 0; i < ARRAY_SIZE(default_queue_to_tx_fifo); i++) {
5269 int ac = default_queue_to_tx_fifo[i];
5270
5271 il_txq_ctx_activate(il, i);
5272
5273 if (ac == IL_TX_FIFO_UNUSED)
5274 continue;
5275
5276 il4965_tx_queue_set_status(il, &il->txq[i], ac, 0);
5277 }
5278
5279 spin_unlock_irqrestore(&il->lock, flags);
5280
5281 return 0;
5282}
5283
5284/**
5285 * il4965_alive_start - called after N_ALIVE notification received
5286 * from protocol/runtime uCode (initialization uCode's
5287 * Alive gets handled by il_init_alive_start()).
5288 */
5289static void
5290il4965_alive_start(struct il_priv *il)
5291{
5292 int ret = 0;
5293
5294 D_INFO("Runtime Alive received.\n");
5295
5296 if (il->card_alive.is_valid != UCODE_VALID_OK) {
5297 /* We had an error bringing up the hardware, so take it
5298 * all the way back down so we can try again */
5299 D_INFO("Alive failed.\n");
5300 goto restart;
5301 }
5302
5303 /* Initialize uCode has loaded Runtime uCode ... verify inst image.
5304 * This is a paranoid check, because we would not have gotten the
5305 * "runtime" alive if code weren't properly loaded. */
5306 if (il4965_verify_ucode(il)) {
5307 /* Runtime instruction load was bad;
5308 * take it all the way back down so we can try again */
5309 D_INFO("Bad runtime uCode load.\n");
5310 goto restart;
5311 }
5312
5313 ret = il4965_alive_notify(il);
5314 if (ret) {
5315 IL_WARN("Could not complete ALIVE transition [ntf]: %d\n", ret);
5316 goto restart;
5317 }
5318
5319 /* After the ALIVE response, we can send host commands to the uCode */
5320 set_bit(S_ALIVE, &il->status);
5321
5322 /* Enable watchdog to monitor the driver tx queues */
5323 il_setup_watchdog(il);
5324
5325 if (il_is_rfkill(il))
5326 return;
5327
5328 ieee80211_wake_queues(il->hw);
5329
5330 il->active_rate = RATES_MASK;
5331
5332 il_power_update_mode(il, true);
5333 D_INFO("Updated power mode\n");
5334
5335 if (il_is_associated(il)) {
5336 struct il_rxon_cmd *active_rxon =
5337 (struct il_rxon_cmd *)&il->active;
5338 /* apply any changes in staging */
5339 il->staging.filter_flags |= RXON_FILTER_ASSOC_MSK;
5340 active_rxon->filter_flags &= ~RXON_FILTER_ASSOC_MSK;
5341 } else {
5342 /* Initialize our rx_config data */
5343 il_connection_init_rx_config(il);
5344
5345 if (il->ops->set_rxon_chain)
5346 il->ops->set_rxon_chain(il);
5347 }
5348
5349 /* Configure bluetooth coexistence if enabled */
5350 il_send_bt_config(il);
5351
5352 il4965_reset_run_time_calib(il);
5353
5354 set_bit(S_READY, &il->status);
5355
5356 /* Configure the adapter for unassociated operation */
5357 il_commit_rxon(il);
5358
5359 /* At this point, the NIC is initialized and operational */
5360 il4965_rf_kill_ct_config(il);
5361
5362 D_INFO("ALIVE processing complete.\n");
5363 wake_up(&il->wait_command_queue);
5364
5365 return;
5366
5367restart:
5368 queue_work(il->workqueue, &il->restart);
5369}
5370
5371static void il4965_cancel_deferred_work(struct il_priv *il);
5372
5373static void
5374__il4965_down(struct il_priv *il)
5375{
5376 unsigned long flags;
5377 int exit_pending;
5378
5379 D_INFO(DRV_NAME " is going down\n");
5380
5381 il_scan_cancel_timeout(il, 200);
5382
5383 exit_pending = test_and_set_bit(S_EXIT_PENDING, &il->status);
5384
5385 /* Stop TX queues watchdog. We need to have S_EXIT_PENDING bit set
5386 * to prevent rearm timer */
5387 del_timer_sync(&il->watchdog);
5388
5389 il_clear_ucode_stations(il);
5390
5391 /* FIXME: race conditions ? */
5392 spin_lock_irq(&il->sta_lock);
5393 /*
5394 * Remove all key information that is not stored as part
5395 * of station information since mac80211 may not have had
5396 * a chance to remove all the keys. When device is
5397 * reconfigured by mac80211 after an error all keys will
5398 * be reconfigured.
5399 */
5400 memset(il->_4965.wep_keys, 0, sizeof(il->_4965.wep_keys));
5401 il->_4965.key_mapping_keys = 0;
5402 spin_unlock_irq(&il->sta_lock);
5403
5404 il_dealloc_bcast_stations(il);
5405 il_clear_driver_stations(il);
5406
5407 /* Unblock any waiting calls */
5408 wake_up_all(&il->wait_command_queue);
5409
5410 /* Wipe out the EXIT_PENDING status bit if we are not actually
5411 * exiting the module */
5412 if (!exit_pending)
5413 clear_bit(S_EXIT_PENDING, &il->status);
5414
5415 /* stop and reset the on-board processor */
5416 _il_wr(il, CSR_RESET, CSR_RESET_REG_FLAG_NEVO_RESET);
5417
5418 /* tell the device to stop sending interrupts */
5419 spin_lock_irqsave(&il->lock, flags);
5420 il_disable_interrupts(il);
5421 spin_unlock_irqrestore(&il->lock, flags);
5422 il4965_synchronize_irq(il);
5423
5424 if (il->mac80211_registered)
5425 ieee80211_stop_queues(il->hw);
5426
5427 /* If we have not previously called il_init() then
5428 * clear all bits but the RF Kill bit and return */
5429 if (!il_is_init(il)) {
5430 il->status =
5431 test_bit(S_RFKILL, &il->status) << S_RFKILL |
5432 test_bit(S_GEO_CONFIGURED, &il->status) << S_GEO_CONFIGURED |
5433 test_bit(S_EXIT_PENDING, &il->status) << S_EXIT_PENDING;
5434 goto exit;
5435 }
5436
5437 /* ...otherwise clear out all the status bits but the RF Kill
5438 * bit and continue taking the NIC down. */
5439 il->status &=
5440 test_bit(S_RFKILL, &il->status) << S_RFKILL |
5441 test_bit(S_GEO_CONFIGURED, &il->status) << S_GEO_CONFIGURED |
5442 test_bit(S_FW_ERROR, &il->status) << S_FW_ERROR |
5443 test_bit(S_EXIT_PENDING, &il->status) << S_EXIT_PENDING;
5444
5445 /*
5446 * We disabled and synchronized interrupt, and priv->mutex is taken, so
5447 * here is the only thread which will program device registers, but
5448 * still have lockdep assertions, so we are taking reg_lock.
5449 */
5450 spin_lock_irq(&il->reg_lock);
5451 /* FIXME: il_grab_nic_access if rfkill is off ? */
5452
5453 il4965_txq_ctx_stop(il);
5454 il4965_rxq_stop(il);
5455 /* Power-down device's busmaster DMA clocks */
5456 _il_wr_prph(il, APMG_CLK_DIS_REG, APMG_CLK_VAL_DMA_CLK_RQT);
5457 udelay(5);
5458 /* Make sure (redundant) we've released our request to stay awake */
5459 _il_clear_bit(il, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
5460 /* Stop the device, and put it in low power state */
5461 _il_apm_stop(il);
5462
5463 spin_unlock_irq(&il->reg_lock);
5464
5465 il4965_txq_ctx_unmap(il);
5466exit:
5467 memset(&il->card_alive, 0, sizeof(struct il_alive_resp));
5468
5469 dev_kfree_skb(il->beacon_skb);
5470 il->beacon_skb = NULL;
5471
5472 /* clear out any free frames */
5473 il4965_clear_free_frames(il);
5474}
5475
5476static void
5477il4965_down(struct il_priv *il)
5478{
5479 mutex_lock(&il->mutex);
5480 __il4965_down(il);
5481 mutex_unlock(&il->mutex);
5482
5483 il4965_cancel_deferred_work(il);
5484}
5485
5486
5487static void
5488il4965_set_hw_ready(struct il_priv *il)
5489{
5490 int ret;
5491
5492 il_set_bit(il, CSR_HW_IF_CONFIG_REG,
5493 CSR_HW_IF_CONFIG_REG_BIT_NIC_READY);
5494
5495 /* See if we got it */
5496 ret = _il_poll_bit(il, CSR_HW_IF_CONFIG_REG,
5497 CSR_HW_IF_CONFIG_REG_BIT_NIC_READY,
5498 CSR_HW_IF_CONFIG_REG_BIT_NIC_READY,
5499 100);
5500 if (ret >= 0)
5501 il->hw_ready = true;
5502
5503 D_INFO("hardware %s ready\n", (il->hw_ready) ? "" : "not");
5504}
5505
5506static void
5507il4965_prepare_card_hw(struct il_priv *il)
5508{
5509 int ret;
5510
5511 il->hw_ready = false;
5512
5513 il4965_set_hw_ready(il);
5514 if (il->hw_ready)
5515 return;
5516
5517 /* If HW is not ready, prepare the conditions to check again */
5518 il_set_bit(il, CSR_HW_IF_CONFIG_REG, CSR_HW_IF_CONFIG_REG_PREPARE);
5519
5520 ret =
5521 _il_poll_bit(il, CSR_HW_IF_CONFIG_REG,
5522 ~CSR_HW_IF_CONFIG_REG_BIT_NIC_PREPARE_DONE,
5523 CSR_HW_IF_CONFIG_REG_BIT_NIC_PREPARE_DONE, 150000);
5524
5525 /* HW should be ready by now, check again. */
5526 if (ret != -ETIMEDOUT)
5527 il4965_set_hw_ready(il);
5528}
5529
5530#define MAX_HW_RESTARTS 5
5531
5532static int
5533__il4965_up(struct il_priv *il)
5534{
5535 int i;
5536 int ret;
5537
5538 if (test_bit(S_EXIT_PENDING, &il->status)) {
5539 IL_WARN("Exit pending; will not bring the NIC up\n");
5540 return -EIO;
5541 }
5542
5543 if (!il->ucode_data_backup.v_addr || !il->ucode_data.v_addr) {
5544 IL_ERR("ucode not available for device bringup\n");
5545 return -EIO;
5546 }
5547
5548 ret = il4965_alloc_bcast_station(il);
5549 if (ret) {
5550 il_dealloc_bcast_stations(il);
5551 return ret;
5552 }
5553
5554 il4965_prepare_card_hw(il);
5555 if (!il->hw_ready) {
5556 IL_ERR("HW not ready\n");
5557 return -EIO;
5558 }
5559
5560 /* If platform's RF_KILL switch is NOT set to KILL */
5561 if (_il_rd(il, CSR_GP_CNTRL) & CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW)
5562 clear_bit(S_RFKILL, &il->status);
5563 else {
5564 set_bit(S_RFKILL, &il->status);
5565 wiphy_rfkill_set_hw_state(il->hw->wiphy, true);
5566
5567 il_enable_rfkill_int(il);
5568 IL_WARN("Radio disabled by HW RF Kill switch\n");
5569 return 0;
5570 }
5571
5572 _il_wr(il, CSR_INT, 0xFFFFFFFF);
5573
5574 /* must be initialised before il_hw_nic_init */
5575 il->cmd_queue = IL_DEFAULT_CMD_QUEUE_NUM;
5576
5577 ret = il4965_hw_nic_init(il);
5578 if (ret) {
5579 IL_ERR("Unable to init nic\n");
5580 return ret;
5581 }
5582
5583 /* make sure rfkill handshake bits are cleared */
5584 _il_wr(il, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
5585 _il_wr(il, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED);
5586
5587 /* clear (again), then enable host interrupts */
5588 _il_wr(il, CSR_INT, 0xFFFFFFFF);
5589 il_enable_interrupts(il);
5590
5591 /* really make sure rfkill handshake bits are cleared */
5592 _il_wr(il, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
5593 _il_wr(il, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
5594
5595 /* Copy original ucode data image from disk into backup cache.
5596 * This will be used to initialize the on-board processor's
5597 * data SRAM for a clean start when the runtime program first loads. */
5598 memcpy(il->ucode_data_backup.v_addr, il->ucode_data.v_addr,
5599 il->ucode_data.len);
5600
5601 for (i = 0; i < MAX_HW_RESTARTS; i++) {
5602
5603 /* load bootstrap state machine,
5604 * load bootstrap program into processor's memory,
5605 * prepare to load the "initialize" uCode */
5606 ret = il->ops->load_ucode(il);
5607
5608 if (ret) {
5609 IL_ERR("Unable to set up bootstrap uCode: %d\n", ret);
5610 continue;
5611 }
5612
5613 /* start card; "initialize" will load runtime ucode */
5614 il4965_nic_start(il);
5615
5616 D_INFO(DRV_NAME " is coming up\n");
5617
5618 return 0;
5619 }
5620
5621 set_bit(S_EXIT_PENDING, &il->status);
5622 __il4965_down(il);
5623 clear_bit(S_EXIT_PENDING, &il->status);
5624
5625 /* tried to restart and config the device for as long as our
5626 * patience could withstand */
5627 IL_ERR("Unable to initialize device after %d attempts.\n", i);
5628 return -EIO;
5629}
5630
5631/*****************************************************************************
5632 *
5633 * Workqueue callbacks
5634 *
5635 *****************************************************************************/
5636
5637static void
5638il4965_bg_init_alive_start(struct work_struct *data)
5639{
5640 struct il_priv *il =
5641 container_of(data, struct il_priv, init_alive_start.work);
5642
5643 mutex_lock(&il->mutex);
5644 if (test_bit(S_EXIT_PENDING, &il->status))
5645 goto out;
5646
5647 il->ops->init_alive_start(il);
5648out:
5649 mutex_unlock(&il->mutex);
5650}
5651
5652static void
5653il4965_bg_alive_start(struct work_struct *data)
5654{
5655 struct il_priv *il =
5656 container_of(data, struct il_priv, alive_start.work);
5657
5658 mutex_lock(&il->mutex);
5659 if (test_bit(S_EXIT_PENDING, &il->status))
5660 goto out;
5661
5662 il4965_alive_start(il);
5663out:
5664 mutex_unlock(&il->mutex);
5665}
5666
5667static void
5668il4965_bg_run_time_calib_work(struct work_struct *work)
5669{
5670 struct il_priv *il = container_of(work, struct il_priv,
5671 run_time_calib_work);
5672
5673 mutex_lock(&il->mutex);
5674
5675 if (test_bit(S_EXIT_PENDING, &il->status) ||
5676 test_bit(S_SCANNING, &il->status)) {
5677 mutex_unlock(&il->mutex);
5678 return;
5679 }
5680
5681 if (il->start_calib) {
5682 il4965_chain_noise_calibration(il, (void *)&il->_4965.stats);
5683 il4965_sensitivity_calibration(il, (void *)&il->_4965.stats);
5684 }
5685
5686 mutex_unlock(&il->mutex);
5687}
5688
5689static void
5690il4965_bg_restart(struct work_struct *data)
5691{
5692 struct il_priv *il = container_of(data, struct il_priv, restart);
5693
5694 if (test_bit(S_EXIT_PENDING, &il->status))
5695 return;
5696
5697 if (test_and_clear_bit(S_FW_ERROR, &il->status)) {
5698 mutex_lock(&il->mutex);
5699 il->is_open = 0;
5700
5701 __il4965_down(il);
5702
5703 mutex_unlock(&il->mutex);
5704 il4965_cancel_deferred_work(il);
5705 ieee80211_restart_hw(il->hw);
5706 } else {
5707 il4965_down(il);
5708
5709 mutex_lock(&il->mutex);
5710 if (test_bit(S_EXIT_PENDING, &il->status)) {
5711 mutex_unlock(&il->mutex);
5712 return;
5713 }
5714
5715 __il4965_up(il);
5716 mutex_unlock(&il->mutex);
5717 }
5718}
5719
5720static void
5721il4965_bg_rx_replenish(struct work_struct *data)
5722{
5723 struct il_priv *il = container_of(data, struct il_priv, rx_replenish);
5724
5725 if (test_bit(S_EXIT_PENDING, &il->status))
5726 return;
5727
5728 mutex_lock(&il->mutex);
5729 il4965_rx_replenish(il);
5730 mutex_unlock(&il->mutex);
5731}
5732
5733/*****************************************************************************
5734 *
5735 * mac80211 entry point functions
5736 *
5737 *****************************************************************************/
5738
5739#define UCODE_READY_TIMEOUT (4 * HZ)
5740
5741/*
5742 * Not a mac80211 entry point function, but it fits in with all the
5743 * other mac80211 functions grouped here.
5744 */
5745static int
5746il4965_mac_setup_register(struct il_priv *il, u32 max_probe_length)
5747{
5748 int ret;
5749 struct ieee80211_hw *hw = il->hw;
5750
5751 hw->rate_control_algorithm = "iwl-4965-rs";
5752
5753 /* Tell mac80211 our characteristics */
5754 ieee80211_hw_set(hw, SUPPORTS_DYNAMIC_PS);
5755 ieee80211_hw_set(hw, SUPPORTS_PS);
5756 ieee80211_hw_set(hw, REPORTS_TX_ACK_STATUS);
5757 ieee80211_hw_set(hw, SPECTRUM_MGMT);
5758 ieee80211_hw_set(hw, NEED_DTIM_BEFORE_ASSOC);
5759 ieee80211_hw_set(hw, SIGNAL_DBM);
5760 ieee80211_hw_set(hw, AMPDU_AGGREGATION);
5761 if (il->cfg->sku & IL_SKU_N)
5762 hw->wiphy->features |= NL80211_FEATURE_DYNAMIC_SMPS |
5763 NL80211_FEATURE_STATIC_SMPS;
5764
5765 hw->sta_data_size = sizeof(struct il_station_priv);
5766 hw->vif_data_size = sizeof(struct il_vif_priv);
5767
5768 hw->wiphy->interface_modes =
5769 BIT(NL80211_IFTYPE_STATION) | BIT(NL80211_IFTYPE_ADHOC);
5770
5771 hw->wiphy->flags |= WIPHY_FLAG_IBSS_RSN;
5772 hw->wiphy->regulatory_flags |= REGULATORY_CUSTOM_REG |
5773 REGULATORY_DISABLE_BEACON_HINTS;
5774
5775 /*
5776 * For now, disable PS by default because it affects
5777 * RX performance significantly.
5778 */
5779 hw->wiphy->flags &= ~WIPHY_FLAG_PS_ON_BY_DEFAULT;
5780
5781 hw->wiphy->max_scan_ssids = PROBE_OPTION_MAX;
5782 /* we create the 802.11 header and a zero-length SSID element */
5783 hw->wiphy->max_scan_ie_len = max_probe_length - 24 - 2;
5784
5785 /* Default value; 4 EDCA QOS priorities */
5786 hw->queues = 4;
5787
5788 hw->max_listen_interval = IL_CONN_MAX_LISTEN_INTERVAL;
5789
5790 if (il->bands[IEEE80211_BAND_2GHZ].n_channels)
5791 il->hw->wiphy->bands[IEEE80211_BAND_2GHZ] =
5792 &il->bands[IEEE80211_BAND_2GHZ];
5793 if (il->bands[IEEE80211_BAND_5GHZ].n_channels)
5794 il->hw->wiphy->bands[IEEE80211_BAND_5GHZ] =
5795 &il->bands[IEEE80211_BAND_5GHZ];
5796
5797 il_leds_init(il);
5798
5799 ret = ieee80211_register_hw(il->hw);
5800 if (ret) {
5801 IL_ERR("Failed to register hw (error %d)\n", ret);
5802 return ret;
5803 }
5804 il->mac80211_registered = 1;
5805
5806 return 0;
5807}
5808
5809int
5810il4965_mac_start(struct ieee80211_hw *hw)
5811{
5812 struct il_priv *il = hw->priv;
5813 int ret;
5814
5815 D_MAC80211("enter\n");
5816
5817 /* we should be verifying the device is ready to be opened */
5818 mutex_lock(&il->mutex);
5819 ret = __il4965_up(il);
5820 mutex_unlock(&il->mutex);
5821
5822 if (ret)
5823 return ret;
5824
5825 if (il_is_rfkill(il))
5826 goto out;
5827
5828 D_INFO("Start UP work done.\n");
5829
5830 /* Wait for START_ALIVE from Run Time ucode. Otherwise callbacks from
5831 * mac80211 will not be run successfully. */
5832 ret = wait_event_timeout(il->wait_command_queue,
5833 test_bit(S_READY, &il->status),
5834 UCODE_READY_TIMEOUT);
5835 if (!ret) {
5836 if (!test_bit(S_READY, &il->status)) {
5837 IL_ERR("START_ALIVE timeout after %dms.\n",
5838 jiffies_to_msecs(UCODE_READY_TIMEOUT));
5839 return -ETIMEDOUT;
5840 }
5841 }
5842
5843 il4965_led_enable(il);
5844
5845out:
5846 il->is_open = 1;
5847 D_MAC80211("leave\n");
5848 return 0;
5849}
5850
5851void
5852il4965_mac_stop(struct ieee80211_hw *hw)
5853{
5854 struct il_priv *il = hw->priv;
5855
5856 D_MAC80211("enter\n");
5857
5858 if (!il->is_open)
5859 return;
5860
5861 il->is_open = 0;
5862
5863 il4965_down(il);
5864
5865 flush_workqueue(il->workqueue);
5866
5867 /* User space software may expect getting rfkill changes
5868 * even if interface is down */
5869 _il_wr(il, CSR_INT, 0xFFFFFFFF);
5870 il_enable_rfkill_int(il);
5871
5872 D_MAC80211("leave\n");
5873}
5874
5875void
5876il4965_mac_tx(struct ieee80211_hw *hw,
5877 struct ieee80211_tx_control *control,
5878 struct sk_buff *skb)
5879{
5880 struct il_priv *il = hw->priv;
5881
5882 D_MACDUMP("enter\n");
5883
5884 D_TX("dev->xmit(%d bytes) at rate 0x%02x\n", skb->len,
5885 ieee80211_get_tx_rate(hw, IEEE80211_SKB_CB(skb))->bitrate);
5886
5887 if (il4965_tx_skb(il, control->sta, skb))
5888 dev_kfree_skb_any(skb);
5889
5890 D_MACDUMP("leave\n");
5891}
5892
5893void
5894il4965_mac_update_tkip_key(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
5895 struct ieee80211_key_conf *keyconf,
5896 struct ieee80211_sta *sta, u32 iv32, u16 * phase1key)
5897{
5898 struct il_priv *il = hw->priv;
5899
5900 D_MAC80211("enter\n");
5901
5902 il4965_update_tkip_key(il, keyconf, sta, iv32, phase1key);
5903
5904 D_MAC80211("leave\n");
5905}
5906
5907int
5908il4965_mac_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
5909 struct ieee80211_vif *vif, struct ieee80211_sta *sta,
5910 struct ieee80211_key_conf *key)
5911{
5912 struct il_priv *il = hw->priv;
5913 int ret;
5914 u8 sta_id;
5915 bool is_default_wep_key = false;
5916
5917 D_MAC80211("enter\n");
5918
5919 if (il->cfg->mod_params->sw_crypto) {
5920 D_MAC80211("leave - hwcrypto disabled\n");
5921 return -EOPNOTSUPP;
5922 }
5923
5924 /*
5925 * To support IBSS RSN, don't program group keys in IBSS, the
5926 * hardware will then not attempt to decrypt the frames.
5927 */
5928 if (vif->type == NL80211_IFTYPE_ADHOC &&
5929 !(key->flags & IEEE80211_KEY_FLAG_PAIRWISE)) {
5930 D_MAC80211("leave - ad-hoc group key\n");
5931 return -EOPNOTSUPP;
5932 }
5933
5934 sta_id = il_sta_id_or_broadcast(il, sta);
5935 if (sta_id == IL_INVALID_STATION)
5936 return -EINVAL;
5937
5938 mutex_lock(&il->mutex);
5939 il_scan_cancel_timeout(il, 100);
5940
5941 /*
5942 * If we are getting WEP group key and we didn't receive any key mapping
5943 * so far, we are in legacy wep mode (group key only), otherwise we are
5944 * in 1X mode.
5945 * In legacy wep mode, we use another host command to the uCode.
5946 */
5947 if ((key->cipher == WLAN_CIPHER_SUITE_WEP40 ||
5948 key->cipher == WLAN_CIPHER_SUITE_WEP104) && !sta) {
5949 if (cmd == SET_KEY)
5950 is_default_wep_key = !il->_4965.key_mapping_keys;
5951 else
5952 is_default_wep_key =
5953 (key->hw_key_idx == HW_KEY_DEFAULT);
5954 }
5955
5956 switch (cmd) {
5957 case SET_KEY:
5958 if (is_default_wep_key)
5959 ret = il4965_set_default_wep_key(il, key);
5960 else
5961 ret = il4965_set_dynamic_key(il, key, sta_id);
5962
5963 D_MAC80211("enable hwcrypto key\n");
5964 break;
5965 case DISABLE_KEY:
5966 if (is_default_wep_key)
5967 ret = il4965_remove_default_wep_key(il, key);
5968 else
5969 ret = il4965_remove_dynamic_key(il, key, sta_id);
5970
5971 D_MAC80211("disable hwcrypto key\n");
5972 break;
5973 default:
5974 ret = -EINVAL;
5975 }
5976
5977 mutex_unlock(&il->mutex);
5978 D_MAC80211("leave\n");
5979
5980 return ret;
5981}
5982
5983int
5984il4965_mac_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
5985 enum ieee80211_ampdu_mlme_action action,
5986 struct ieee80211_sta *sta, u16 tid, u16 * ssn,
5987 u8 buf_size, bool amsdu)
5988{
5989 struct il_priv *il = hw->priv;
5990 int ret = -EINVAL;
5991
5992 D_HT("A-MPDU action on addr %pM tid %d\n", sta->addr, tid);
5993
5994 if (!(il->cfg->sku & IL_SKU_N))
5995 return -EACCES;
5996
5997 mutex_lock(&il->mutex);
5998
5999 switch (action) {
6000 case IEEE80211_AMPDU_RX_START:
6001 D_HT("start Rx\n");
6002 ret = il4965_sta_rx_agg_start(il, sta, tid, *ssn);
6003 break;
6004 case IEEE80211_AMPDU_RX_STOP:
6005 D_HT("stop Rx\n");
6006 ret = il4965_sta_rx_agg_stop(il, sta, tid);
6007 if (test_bit(S_EXIT_PENDING, &il->status))
6008 ret = 0;
6009 break;
6010 case IEEE80211_AMPDU_TX_START:
6011 D_HT("start Tx\n");
6012 ret = il4965_tx_agg_start(il, vif, sta, tid, ssn);
6013 break;
6014 case IEEE80211_AMPDU_TX_STOP_CONT:
6015 case IEEE80211_AMPDU_TX_STOP_FLUSH:
6016 case IEEE80211_AMPDU_TX_STOP_FLUSH_CONT:
6017 D_HT("stop Tx\n");
6018 ret = il4965_tx_agg_stop(il, vif, sta, tid);
6019 if (test_bit(S_EXIT_PENDING, &il->status))
6020 ret = 0;
6021 break;
6022 case IEEE80211_AMPDU_TX_OPERATIONAL:
6023 ret = 0;
6024 break;
6025 }
6026 mutex_unlock(&il->mutex);
6027
6028 return ret;
6029}
6030
6031int
6032il4965_mac_sta_add(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
6033 struct ieee80211_sta *sta)
6034{
6035 struct il_priv *il = hw->priv;
6036 struct il_station_priv *sta_priv = (void *)sta->drv_priv;
6037 bool is_ap = vif->type == NL80211_IFTYPE_STATION;
6038 int ret;
6039 u8 sta_id;
6040
6041 D_INFO("received request to add station %pM\n", sta->addr);
6042 mutex_lock(&il->mutex);
6043 D_INFO("proceeding to add station %pM\n", sta->addr);
6044 sta_priv->common.sta_id = IL_INVALID_STATION;
6045
6046 atomic_set(&sta_priv->pending_frames, 0);
6047
6048 ret =
6049 il_add_station_common(il, sta->addr, is_ap, sta, &sta_id);
6050 if (ret) {
6051 IL_ERR("Unable to add station %pM (%d)\n", sta->addr, ret);
6052 /* Should we return success if return code is EEXIST ? */
6053 mutex_unlock(&il->mutex);
6054 return ret;
6055 }
6056
6057 sta_priv->common.sta_id = sta_id;
6058
6059 /* Initialize rate scaling */
6060 D_INFO("Initializing rate scaling for station %pM\n", sta->addr);
6061 il4965_rs_rate_init(il, sta, sta_id);
6062 mutex_unlock(&il->mutex);
6063
6064 return 0;
6065}
6066
6067void
6068il4965_mac_channel_switch(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
6069 struct ieee80211_channel_switch *ch_switch)
6070{
6071 struct il_priv *il = hw->priv;
6072 const struct il_channel_info *ch_info;
6073 struct ieee80211_conf *conf = &hw->conf;
6074 struct ieee80211_channel *channel = ch_switch->chandef.chan;
6075 struct il_ht_config *ht_conf = &il->current_ht_config;
6076 u16 ch;
6077
6078 D_MAC80211("enter\n");
6079
6080 mutex_lock(&il->mutex);
6081
6082 if (il_is_rfkill(il))
6083 goto out;
6084
6085 if (test_bit(S_EXIT_PENDING, &il->status) ||
6086 test_bit(S_SCANNING, &il->status) ||
6087 test_bit(S_CHANNEL_SWITCH_PENDING, &il->status))
6088 goto out;
6089
6090 if (!il_is_associated(il))
6091 goto out;
6092
6093 if (!il->ops->set_channel_switch)
6094 goto out;
6095
6096 ch = channel->hw_value;
6097 if (le16_to_cpu(il->active.channel) == ch)
6098 goto out;
6099
6100 ch_info = il_get_channel_info(il, channel->band, ch);
6101 if (!il_is_channel_valid(ch_info)) {
6102 D_MAC80211("invalid channel\n");
6103 goto out;
6104 }
6105
6106 spin_lock_irq(&il->lock);
6107
6108 il->current_ht_config.smps = conf->smps_mode;
6109
6110 /* Configure HT40 channels */
6111 switch (cfg80211_get_chandef_type(&ch_switch->chandef)) {
6112 case NL80211_CHAN_NO_HT:
6113 case NL80211_CHAN_HT20:
6114 il->ht.is_40mhz = false;
6115 il->ht.extension_chan_offset = IEEE80211_HT_PARAM_CHA_SEC_NONE;
6116 break;
6117 case NL80211_CHAN_HT40MINUS:
6118 il->ht.extension_chan_offset = IEEE80211_HT_PARAM_CHA_SEC_BELOW;
6119 il->ht.is_40mhz = true;
6120 break;
6121 case NL80211_CHAN_HT40PLUS:
6122 il->ht.extension_chan_offset = IEEE80211_HT_PARAM_CHA_SEC_ABOVE;
6123 il->ht.is_40mhz = true;
6124 break;
6125 }
6126
6127 if ((le16_to_cpu(il->staging.channel) != ch))
6128 il->staging.flags = 0;
6129
6130 il_set_rxon_channel(il, channel);
6131 il_set_rxon_ht(il, ht_conf);
6132 il_set_flags_for_band(il, channel->band, il->vif);
6133
6134 spin_unlock_irq(&il->lock);
6135
6136 il_set_rate(il);
6137 /*
6138 * at this point, staging_rxon has the
6139 * configuration for channel switch
6140 */
6141 set_bit(S_CHANNEL_SWITCH_PENDING, &il->status);
6142 il->switch_channel = cpu_to_le16(ch);
6143 if (il->ops->set_channel_switch(il, ch_switch)) {
6144 clear_bit(S_CHANNEL_SWITCH_PENDING, &il->status);
6145 il->switch_channel = 0;
6146 ieee80211_chswitch_done(il->vif, false);
6147 }
6148
6149out:
6150 mutex_unlock(&il->mutex);
6151 D_MAC80211("leave\n");
6152}
6153
6154void
6155il4965_configure_filter(struct ieee80211_hw *hw, unsigned int changed_flags,
6156 unsigned int *total_flags, u64 multicast)
6157{
6158 struct il_priv *il = hw->priv;
6159 __le32 filter_or = 0, filter_nand = 0;
6160
6161#define CHK(test, flag) do { \
6162 if (*total_flags & (test)) \
6163 filter_or |= (flag); \
6164 else \
6165 filter_nand |= (flag); \
6166 } while (0)
6167
6168 D_MAC80211("Enter: changed: 0x%x, total: 0x%x\n", changed_flags,
6169 *total_flags);
6170
6171 CHK(FIF_OTHER_BSS, RXON_FILTER_PROMISC_MSK);
6172 /* Setting _just_ RXON_FILTER_CTL2HOST_MSK causes FH errors */
6173 CHK(FIF_CONTROL, RXON_FILTER_CTL2HOST_MSK | RXON_FILTER_PROMISC_MSK);
6174 CHK(FIF_BCN_PRBRESP_PROMISC, RXON_FILTER_BCON_AWARE_MSK);
6175
6176#undef CHK
6177
6178 mutex_lock(&il->mutex);
6179
6180 il->staging.filter_flags &= ~filter_nand;
6181 il->staging.filter_flags |= filter_or;
6182
6183 /*
6184 * Not committing directly because hardware can perform a scan,
6185 * but we'll eventually commit the filter flags change anyway.
6186 */
6187
6188 mutex_unlock(&il->mutex);
6189
6190 /*
6191 * Receiving all multicast frames is always enabled by the
6192 * default flags setup in il_connection_init_rx_config()
6193 * since we currently do not support programming multicast
6194 * filters into the device.
6195 */
6196 *total_flags &=
6197 FIF_OTHER_BSS | FIF_ALLMULTI |
6198 FIF_BCN_PRBRESP_PROMISC | FIF_CONTROL;
6199}
6200
6201/*****************************************************************************
6202 *
6203 * driver setup and teardown
6204 *
6205 *****************************************************************************/
6206
6207static void
6208il4965_bg_txpower_work(struct work_struct *work)
6209{
6210 struct il_priv *il = container_of(work, struct il_priv,
6211 txpower_work);
6212
6213 mutex_lock(&il->mutex);
6214
6215 /* If a scan happened to start before we got here
6216 * then just return; the stats notification will
6217 * kick off another scheduled work to compensate for
6218 * any temperature delta we missed here. */
6219 if (test_bit(S_EXIT_PENDING, &il->status) ||
6220 test_bit(S_SCANNING, &il->status))
6221 goto out;
6222
6223 /* Regardless of if we are associated, we must reconfigure the
6224 * TX power since frames can be sent on non-radar channels while
6225 * not associated */
6226 il->ops->send_tx_power(il);
6227
6228 /* Update last_temperature to keep is_calib_needed from running
6229 * when it isn't needed... */
6230 il->last_temperature = il->temperature;
6231out:
6232 mutex_unlock(&il->mutex);
6233}
6234
6235static void
6236il4965_setup_deferred_work(struct il_priv *il)
6237{
6238 il->workqueue = create_singlethread_workqueue(DRV_NAME);
6239
6240 init_waitqueue_head(&il->wait_command_queue);
6241
6242 INIT_WORK(&il->restart, il4965_bg_restart);
6243 INIT_WORK(&il->rx_replenish, il4965_bg_rx_replenish);
6244 INIT_WORK(&il->run_time_calib_work, il4965_bg_run_time_calib_work);
6245 INIT_DELAYED_WORK(&il->init_alive_start, il4965_bg_init_alive_start);
6246 INIT_DELAYED_WORK(&il->alive_start, il4965_bg_alive_start);
6247
6248 il_setup_scan_deferred_work(il);
6249
6250 INIT_WORK(&il->txpower_work, il4965_bg_txpower_work);
6251
6252 setup_timer(&il->stats_periodic, il4965_bg_stats_periodic,
6253 (unsigned long)il);
6254
6255 setup_timer(&il->watchdog, il_bg_watchdog, (unsigned long)il);
6256
6257 tasklet_init(&il->irq_tasklet,
6258 (void (*)(unsigned long))il4965_irq_tasklet,
6259 (unsigned long)il);
6260}
6261
6262static void
6263il4965_cancel_deferred_work(struct il_priv *il)
6264{
6265 cancel_work_sync(&il->txpower_work);
6266 cancel_delayed_work_sync(&il->init_alive_start);
6267 cancel_delayed_work(&il->alive_start);
6268 cancel_work_sync(&il->run_time_calib_work);
6269
6270 il_cancel_scan_deferred_work(il);
6271
6272 del_timer_sync(&il->stats_periodic);
6273}
6274
6275static void
6276il4965_init_hw_rates(struct il_priv *il, struct ieee80211_rate *rates)
6277{
6278 int i;
6279
6280 for (i = 0; i < RATE_COUNT_LEGACY; i++) {
6281 rates[i].bitrate = il_rates[i].ieee * 5;
6282 rates[i].hw_value = i; /* Rate scaling will work on idxes */
6283 rates[i].hw_value_short = i;
6284 rates[i].flags = 0;
6285 if ((i >= IL_FIRST_CCK_RATE) && (i <= IL_LAST_CCK_RATE)) {
6286 /*
6287 * If CCK != 1M then set short preamble rate flag.
6288 */
6289 rates[i].flags |=
6290 (il_rates[i].plcp ==
6291 RATE_1M_PLCP) ? 0 : IEEE80211_RATE_SHORT_PREAMBLE;
6292 }
6293 }
6294}
6295
6296/*
6297 * Acquire il->lock before calling this function !
6298 */
6299void
6300il4965_set_wr_ptrs(struct il_priv *il, int txq_id, u32 idx)
6301{
6302 il_wr(il, HBUS_TARG_WRPTR, (idx & 0xff) | (txq_id << 8));
6303 il_wr_prph(il, IL49_SCD_QUEUE_RDPTR(txq_id), idx);
6304}
6305
6306void
6307il4965_tx_queue_set_status(struct il_priv *il, struct il_tx_queue *txq,
6308 int tx_fifo_id, int scd_retry)
6309{
6310 int txq_id = txq->q.id;
6311
6312 /* Find out whether to activate Tx queue */
6313 int active = test_bit(txq_id, &il->txq_ctx_active_msk) ? 1 : 0;
6314
6315 /* Set up and activate */
6316 il_wr_prph(il, IL49_SCD_QUEUE_STATUS_BITS(txq_id),
6317 (active << IL49_SCD_QUEUE_STTS_REG_POS_ACTIVE) |
6318 (tx_fifo_id << IL49_SCD_QUEUE_STTS_REG_POS_TXF) |
6319 (scd_retry << IL49_SCD_QUEUE_STTS_REG_POS_WSL) |
6320 (scd_retry << IL49_SCD_QUEUE_STTS_REG_POS_SCD_ACK) |
6321 IL49_SCD_QUEUE_STTS_REG_MSK);
6322
6323 txq->sched_retry = scd_retry;
6324
6325 D_INFO("%s %s Queue %d on AC %d\n", active ? "Activate" : "Deactivate",
6326 scd_retry ? "BA" : "AC", txq_id, tx_fifo_id);
6327}
6328
6329static const struct ieee80211_ops il4965_mac_ops = {
6330 .tx = il4965_mac_tx,
6331 .start = il4965_mac_start,
6332 .stop = il4965_mac_stop,
6333 .add_interface = il_mac_add_interface,
6334 .remove_interface = il_mac_remove_interface,
6335 .change_interface = il_mac_change_interface,
6336 .config = il_mac_config,
6337 .configure_filter = il4965_configure_filter,
6338 .set_key = il4965_mac_set_key,
6339 .update_tkip_key = il4965_mac_update_tkip_key,
6340 .conf_tx = il_mac_conf_tx,
6341 .reset_tsf = il_mac_reset_tsf,
6342 .bss_info_changed = il_mac_bss_info_changed,
6343 .ampdu_action = il4965_mac_ampdu_action,
6344 .hw_scan = il_mac_hw_scan,
6345 .sta_add = il4965_mac_sta_add,
6346 .sta_remove = il_mac_sta_remove,
6347 .channel_switch = il4965_mac_channel_switch,
6348 .tx_last_beacon = il_mac_tx_last_beacon,
6349 .flush = il_mac_flush,
6350};
6351
6352static int
6353il4965_init_drv(struct il_priv *il)
6354{
6355 int ret;
6356
6357 spin_lock_init(&il->sta_lock);
6358 spin_lock_init(&il->hcmd_lock);
6359
6360 INIT_LIST_HEAD(&il->free_frames);
6361
6362 mutex_init(&il->mutex);
6363
6364 il->ieee_channels = NULL;
6365 il->ieee_rates = NULL;
6366 il->band = IEEE80211_BAND_2GHZ;
6367
6368 il->iw_mode = NL80211_IFTYPE_STATION;
6369 il->current_ht_config.smps = IEEE80211_SMPS_STATIC;
6370 il->missed_beacon_threshold = IL_MISSED_BEACON_THRESHOLD_DEF;
6371
6372 /* initialize force reset */
6373 il->force_reset.reset_duration = IL_DELAY_NEXT_FORCE_FW_RELOAD;
6374
6375 /* Choose which receivers/antennas to use */
6376 if (il->ops->set_rxon_chain)
6377 il->ops->set_rxon_chain(il);
6378
6379 il_init_scan_params(il);
6380
6381 ret = il_init_channel_map(il);
6382 if (ret) {
6383 IL_ERR("initializing regulatory failed: %d\n", ret);
6384 goto err;
6385 }
6386
6387 ret = il_init_geos(il);
6388 if (ret) {
6389 IL_ERR("initializing geos failed: %d\n", ret);
6390 goto err_free_channel_map;
6391 }
6392 il4965_init_hw_rates(il, il->ieee_rates);
6393
6394 return 0;
6395
6396err_free_channel_map:
6397 il_free_channel_map(il);
6398err:
6399 return ret;
6400}
6401
6402static void
6403il4965_uninit_drv(struct il_priv *il)
6404{
6405 il_free_geos(il);
6406 il_free_channel_map(il);
6407 kfree(il->scan_cmd);
6408}
6409
6410static void
6411il4965_hw_detect(struct il_priv *il)
6412{
6413 il->hw_rev = _il_rd(il, CSR_HW_REV);
6414 il->hw_wa_rev = _il_rd(il, CSR_HW_REV_WA_REG);
6415 il->rev_id = il->pci_dev->revision;
6416 D_INFO("HW Revision ID = 0x%X\n", il->rev_id);
6417}
6418
6419static struct il_sensitivity_ranges il4965_sensitivity = {
6420 .min_nrg_cck = 97,
6421 .max_nrg_cck = 0, /* not used, set to 0 */
6422
6423 .auto_corr_min_ofdm = 85,
6424 .auto_corr_min_ofdm_mrc = 170,
6425 .auto_corr_min_ofdm_x1 = 105,
6426 .auto_corr_min_ofdm_mrc_x1 = 220,
6427
6428 .auto_corr_max_ofdm = 120,
6429 .auto_corr_max_ofdm_mrc = 210,
6430 .auto_corr_max_ofdm_x1 = 140,
6431 .auto_corr_max_ofdm_mrc_x1 = 270,
6432
6433 .auto_corr_min_cck = 125,
6434 .auto_corr_max_cck = 200,
6435 .auto_corr_min_cck_mrc = 200,
6436 .auto_corr_max_cck_mrc = 400,
6437
6438 .nrg_th_cck = 100,
6439 .nrg_th_ofdm = 100,
6440
6441 .barker_corr_th_min = 190,
6442 .barker_corr_th_min_mrc = 390,
6443 .nrg_th_cca = 62,
6444};
6445
6446static void
6447il4965_set_hw_params(struct il_priv *il)
6448{
6449 il->hw_params.bcast_id = IL4965_BROADCAST_ID;
6450 il->hw_params.max_rxq_size = RX_QUEUE_SIZE;
6451 il->hw_params.max_rxq_log = RX_QUEUE_SIZE_LOG;
6452 if (il->cfg->mod_params->amsdu_size_8K)
6453 il->hw_params.rx_page_order = get_order(IL_RX_BUF_SIZE_8K);
6454 else
6455 il->hw_params.rx_page_order = get_order(IL_RX_BUF_SIZE_4K);
6456
6457 il->hw_params.max_beacon_itrvl = IL_MAX_UCODE_BEACON_INTERVAL;
6458
6459 if (il->cfg->mod_params->disable_11n)
6460 il->cfg->sku &= ~IL_SKU_N;
6461
6462 if (il->cfg->mod_params->num_of_queues >= IL_MIN_NUM_QUEUES &&
6463 il->cfg->mod_params->num_of_queues <= IL49_NUM_QUEUES)
6464 il->cfg->num_of_queues =
6465 il->cfg->mod_params->num_of_queues;
6466
6467 il->hw_params.max_txq_num = il->cfg->num_of_queues;
6468 il->hw_params.dma_chnl_num = FH49_TCSR_CHNL_NUM;
6469 il->hw_params.scd_bc_tbls_size =
6470 il->cfg->num_of_queues *
6471 sizeof(struct il4965_scd_bc_tbl);
6472
6473 il->hw_params.tfd_size = sizeof(struct il_tfd);
6474 il->hw_params.max_stations = IL4965_STATION_COUNT;
6475 il->hw_params.max_data_size = IL49_RTC_DATA_SIZE;
6476 il->hw_params.max_inst_size = IL49_RTC_INST_SIZE;
6477 il->hw_params.max_bsm_size = BSM_SRAM_SIZE;
6478 il->hw_params.ht40_channel = BIT(IEEE80211_BAND_5GHZ);
6479
6480 il->hw_params.rx_wrt_ptr_reg = FH49_RSCSR_CHNL0_WPTR;
6481
6482 il->hw_params.tx_chains_num = il4965_num_of_ant(il->cfg->valid_tx_ant);
6483 il->hw_params.rx_chains_num = il4965_num_of_ant(il->cfg->valid_rx_ant);
6484 il->hw_params.valid_tx_ant = il->cfg->valid_tx_ant;
6485 il->hw_params.valid_rx_ant = il->cfg->valid_rx_ant;
6486
6487 il->hw_params.ct_kill_threshold =
6488 CELSIUS_TO_KELVIN(CT_KILL_THRESHOLD_LEGACY);
6489
6490 il->hw_params.sens = &il4965_sensitivity;
6491 il->hw_params.beacon_time_tsf_bits = IL4965_EXT_BEACON_TIME_POS;
6492}
6493
6494static int
6495il4965_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
6496{
6497 int err = 0;
6498 struct il_priv *il;
6499 struct ieee80211_hw *hw;
6500 struct il_cfg *cfg = (struct il_cfg *)(ent->driver_data);
6501 unsigned long flags;
6502 u16 pci_cmd;
6503
6504 /************************
6505 * 1. Allocating HW data
6506 ************************/
6507
6508 hw = ieee80211_alloc_hw(sizeof(struct il_priv), &il4965_mac_ops);
6509 if (!hw) {
6510 err = -ENOMEM;
6511 goto out;
6512 }
6513 il = hw->priv;
6514 il->hw = hw;
6515 SET_IEEE80211_DEV(hw, &pdev->dev);
6516
6517 D_INFO("*** LOAD DRIVER ***\n");
6518 il->cfg = cfg;
6519 il->ops = &il4965_ops;
6520#ifdef CONFIG_IWLEGACY_DEBUGFS
6521 il->debugfs_ops = &il4965_debugfs_ops;
6522#endif
6523 il->pci_dev = pdev;
6524 il->inta_mask = CSR_INI_SET_MASK;
6525
6526 /**************************
6527 * 2. Initializing PCI bus
6528 **************************/
6529 pci_disable_link_state(pdev,
6530 PCIE_LINK_STATE_L0S | PCIE_LINK_STATE_L1 |
6531 PCIE_LINK_STATE_CLKPM);
6532
6533 if (pci_enable_device(pdev)) {
6534 err = -ENODEV;
6535 goto out_ieee80211_free_hw;
6536 }
6537
6538 pci_set_master(pdev);
6539
6540 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(36));
6541 if (!err)
6542 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(36));
6543 if (err) {
6544 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
6545 if (!err)
6546 err =
6547 pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
6548 /* both attempts failed: */
6549 if (err) {
6550 IL_WARN("No suitable DMA available.\n");
6551 goto out_pci_disable_device;
6552 }
6553 }
6554
6555 err = pci_request_regions(pdev, DRV_NAME);
6556 if (err)
6557 goto out_pci_disable_device;
6558
6559 pci_set_drvdata(pdev, il);
6560
6561 /***********************
6562 * 3. Read REV register
6563 ***********************/
6564 il->hw_base = pci_ioremap_bar(pdev, 0);
6565 if (!il->hw_base) {
6566 err = -ENODEV;
6567 goto out_pci_release_regions;
6568 }
6569
6570 D_INFO("pci_resource_len = 0x%08llx\n",
6571 (unsigned long long)pci_resource_len(pdev, 0));
6572 D_INFO("pci_resource_base = %p\n", il->hw_base);
6573
6574 /* these spin locks will be used in apm_ops.init and EEPROM access
6575 * we should init now
6576 */
6577 spin_lock_init(&il->reg_lock);
6578 spin_lock_init(&il->lock);
6579
6580 /*
6581 * stop and reset the on-board processor just in case it is in a
6582 * strange state ... like being left stranded by a primary kernel
6583 * and this is now the kdump kernel trying to start up
6584 */
6585 _il_wr(il, CSR_RESET, CSR_RESET_REG_FLAG_NEVO_RESET);
6586
6587 il4965_hw_detect(il);
6588 IL_INFO("Detected %s, REV=0x%X\n", il->cfg->name, il->hw_rev);
6589
6590 /* We disable the RETRY_TIMEOUT register (0x41) to keep
6591 * PCI Tx retries from interfering with C3 CPU state */
6592 pci_write_config_byte(pdev, PCI_CFG_RETRY_TIMEOUT, 0x00);
6593
6594 il4965_prepare_card_hw(il);
6595 if (!il->hw_ready) {
6596 IL_WARN("Failed, HW not ready\n");
6597 err = -EIO;
6598 goto out_iounmap;
6599 }
6600
6601 /*****************
6602 * 4. Read EEPROM
6603 *****************/
6604 /* Read the EEPROM */
6605 err = il_eeprom_init(il);
6606 if (err) {
6607 IL_ERR("Unable to init EEPROM\n");
6608 goto out_iounmap;
6609 }
6610 err = il4965_eeprom_check_version(il);
6611 if (err)
6612 goto out_free_eeprom;
6613
6614 /* extract MAC Address */
6615 il4965_eeprom_get_mac(il, il->addresses[0].addr);
6616 D_INFO("MAC address: %pM\n", il->addresses[0].addr);
6617 il->hw->wiphy->addresses = il->addresses;
6618 il->hw->wiphy->n_addresses = 1;
6619
6620 /************************
6621 * 5. Setup HW constants
6622 ************************/
6623 il4965_set_hw_params(il);
6624
6625 /*******************
6626 * 6. Setup il
6627 *******************/
6628
6629 err = il4965_init_drv(il);
6630 if (err)
6631 goto out_free_eeprom;
6632 /* At this point both hw and il are initialized. */
6633
6634 /********************
6635 * 7. Setup services
6636 ********************/
6637 spin_lock_irqsave(&il->lock, flags);
6638 il_disable_interrupts(il);
6639 spin_unlock_irqrestore(&il->lock, flags);
6640
6641 pci_enable_msi(il->pci_dev);
6642
6643 err = request_irq(il->pci_dev->irq, il_isr, IRQF_SHARED, DRV_NAME, il);
6644 if (err) {
6645 IL_ERR("Error allocating IRQ %d\n", il->pci_dev->irq);
6646 goto out_disable_msi;
6647 }
6648
6649 il4965_setup_deferred_work(il);
6650 il4965_setup_handlers(il);
6651
6652 /*********************************************
6653 * 8. Enable interrupts and read RFKILL state
6654 *********************************************/
6655
6656 /* enable rfkill interrupt: hw bug w/a */
6657 pci_read_config_word(il->pci_dev, PCI_COMMAND, &pci_cmd);
6658 if (pci_cmd & PCI_COMMAND_INTX_DISABLE) {
6659 pci_cmd &= ~PCI_COMMAND_INTX_DISABLE;
6660 pci_write_config_word(il->pci_dev, PCI_COMMAND, pci_cmd);
6661 }
6662
6663 il_enable_rfkill_int(il);
6664
6665 /* If platform's RF_KILL switch is NOT set to KILL */
6666 if (_il_rd(il, CSR_GP_CNTRL) & CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW)
6667 clear_bit(S_RFKILL, &il->status);
6668 else
6669 set_bit(S_RFKILL, &il->status);
6670
6671 wiphy_rfkill_set_hw_state(il->hw->wiphy,
6672 test_bit(S_RFKILL, &il->status));
6673
6674 il_power_initialize(il);
6675
6676 init_completion(&il->_4965.firmware_loading_complete);
6677
6678 err = il4965_request_firmware(il, true);
6679 if (err)
6680 goto out_destroy_workqueue;
6681
6682 return 0;
6683
6684out_destroy_workqueue:
6685 destroy_workqueue(il->workqueue);
6686 il->workqueue = NULL;
6687 free_irq(il->pci_dev->irq, il);
6688out_disable_msi:
6689 pci_disable_msi(il->pci_dev);
6690 il4965_uninit_drv(il);
6691out_free_eeprom:
6692 il_eeprom_free(il);
6693out_iounmap:
6694 iounmap(il->hw_base);
6695out_pci_release_regions:
6696 pci_release_regions(pdev);
6697out_pci_disable_device:
6698 pci_disable_device(pdev);
6699out_ieee80211_free_hw:
6700 ieee80211_free_hw(il->hw);
6701out:
6702 return err;
6703}
6704
6705static void
6706il4965_pci_remove(struct pci_dev *pdev)
6707{
6708 struct il_priv *il = pci_get_drvdata(pdev);
6709 unsigned long flags;
6710
6711 if (!il)
6712 return;
6713
6714 wait_for_completion(&il->_4965.firmware_loading_complete);
6715
6716 D_INFO("*** UNLOAD DRIVER ***\n");
6717
6718 il_dbgfs_unregister(il);
6719 sysfs_remove_group(&pdev->dev.kobj, &il_attribute_group);
6720
6721 /* ieee80211_unregister_hw call wil cause il_mac_stop to
6722 * to be called and il4965_down since we are removing the device
6723 * we need to set S_EXIT_PENDING bit.
6724 */
6725 set_bit(S_EXIT_PENDING, &il->status);
6726
6727 il_leds_exit(il);
6728
6729 if (il->mac80211_registered) {
6730 ieee80211_unregister_hw(il->hw);
6731 il->mac80211_registered = 0;
6732 } else {
6733 il4965_down(il);
6734 }
6735
6736 /*
6737 * Make sure device is reset to low power before unloading driver.
6738 * This may be redundant with il4965_down(), but there are paths to
6739 * run il4965_down() without calling apm_ops.stop(), and there are
6740 * paths to avoid running il4965_down() at all before leaving driver.
6741 * This (inexpensive) call *makes sure* device is reset.
6742 */
6743 il_apm_stop(il);
6744
6745 /* make sure we flush any pending irq or
6746 * tasklet for the driver
6747 */
6748 spin_lock_irqsave(&il->lock, flags);
6749 il_disable_interrupts(il);
6750 spin_unlock_irqrestore(&il->lock, flags);
6751
6752 il4965_synchronize_irq(il);
6753
6754 il4965_dealloc_ucode_pci(il);
6755
6756 if (il->rxq.bd)
6757 il4965_rx_queue_free(il, &il->rxq);
6758 il4965_hw_txq_ctx_free(il);
6759
6760 il_eeprom_free(il);
6761
6762 /*netif_stop_queue(dev); */
6763 flush_workqueue(il->workqueue);
6764
6765 /* ieee80211_unregister_hw calls il_mac_stop, which flushes
6766 * il->workqueue... so we can't take down the workqueue
6767 * until now... */
6768 destroy_workqueue(il->workqueue);
6769 il->workqueue = NULL;
6770
6771 free_irq(il->pci_dev->irq, il);
6772 pci_disable_msi(il->pci_dev);
6773 iounmap(il->hw_base);
6774 pci_release_regions(pdev);
6775 pci_disable_device(pdev);
6776
6777 il4965_uninit_drv(il);
6778
6779 dev_kfree_skb(il->beacon_skb);
6780
6781 ieee80211_free_hw(il->hw);
6782}
6783
6784/*
6785 * Activate/Deactivate Tx DMA/FIFO channels according tx fifos mask
6786 * must be called under il->lock and mac access
6787 */
6788void
6789il4965_txq_set_sched(struct il_priv *il, u32 mask)
6790{
6791 il_wr_prph(il, IL49_SCD_TXFACT, mask);
6792}
6793
6794/*****************************************************************************
6795 *
6796 * driver and module entry point
6797 *
6798 *****************************************************************************/
6799
6800/* Hardware specific file defines the PCI IDs table for that hardware module */
6801static const struct pci_device_id il4965_hw_card_ids[] = {
6802 {IL_PCI_DEVICE(0x4229, PCI_ANY_ID, il4965_cfg)},
6803 {IL_PCI_DEVICE(0x4230, PCI_ANY_ID, il4965_cfg)},
6804 {0}
6805};
6806MODULE_DEVICE_TABLE(pci, il4965_hw_card_ids);
6807
6808static struct pci_driver il4965_driver = {
6809 .name = DRV_NAME,
6810 .id_table = il4965_hw_card_ids,
6811 .probe = il4965_pci_probe,
6812 .remove = il4965_pci_remove,
6813 .driver.pm = IL_LEGACY_PM_OPS,
6814};
6815
6816static int __init
6817il4965_init(void)
6818{
6819
6820 int ret;
6821 pr_info(DRV_DESCRIPTION ", " DRV_VERSION "\n");
6822 pr_info(DRV_COPYRIGHT "\n");
6823
6824 ret = il4965_rate_control_register();
6825 if (ret) {
6826 pr_err("Unable to register rate control algorithm: %d\n", ret);
6827 return ret;
6828 }
6829
6830 ret = pci_register_driver(&il4965_driver);
6831 if (ret) {
6832 pr_err("Unable to initialize PCI module\n");
6833 goto error_register;
6834 }
6835
6836 return ret;
6837
6838error_register:
6839 il4965_rate_control_unregister();
6840 return ret;
6841}
6842
6843static void __exit
6844il4965_exit(void)
6845{
6846 pci_unregister_driver(&il4965_driver);
6847 il4965_rate_control_unregister();
6848}
6849
6850module_exit(il4965_exit);
6851module_init(il4965_init);
6852
6853#ifdef CONFIG_IWLEGACY_DEBUG
6854module_param_named(debug, il_debug_level, uint, S_IRUGO | S_IWUSR);
6855MODULE_PARM_DESC(debug, "debug output mask");
6856#endif
6857
6858module_param_named(swcrypto, il4965_mod_params.sw_crypto, int, S_IRUGO);
6859MODULE_PARM_DESC(swcrypto, "using crypto in software (default 0 [hardware])");
6860module_param_named(queues_num, il4965_mod_params.num_of_queues, int, S_IRUGO);
6861MODULE_PARM_DESC(queues_num, "number of hw queues.");
6862module_param_named(11n_disable, il4965_mod_params.disable_11n, int, S_IRUGO);
6863MODULE_PARM_DESC(11n_disable, "disable 11n functionality");
6864module_param_named(amsdu_size_8K, il4965_mod_params.amsdu_size_8K, int,
6865 S_IRUGO);
6866MODULE_PARM_DESC(amsdu_size_8K, "enable 8K amsdu size (default 0 [disabled])");
6867module_param_named(fw_restart, il4965_mod_params.restart_fw, int, S_IRUGO);
6868MODULE_PARM_DESC(fw_restart, "restart firmware in case of error");
diff --git a/drivers/net/wireless/intel/iwlegacy/4965-rs.c b/drivers/net/wireless/intel/iwlegacy/4965-rs.c
new file mode 100644
index 000000000000..bac60b2bc3f0
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlegacy/4965-rs.c
@@ -0,0 +1,2835 @@
1/******************************************************************************
2 *
3 * Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of version 2 of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
17 *
18 * The full GNU General Public License is included in this distribution in the
19 * file called LICENSE.
20 *
21 * Contact Information:
22 * Intel Linux Wireless <ilw@linux.intel.com>
23 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
24 *
25 *****************************************************************************/
26#include <linux/kernel.h>
27#include <linux/skbuff.h>
28#include <linux/slab.h>
29#include <net/mac80211.h>
30
31#include <linux/netdevice.h>
32#include <linux/etherdevice.h>
33#include <linux/delay.h>
34
35#include <linux/workqueue.h>
36
37#include "common.h"
38#include "4965.h"
39
40#define IL4965_RS_NAME "iwl-4965-rs"
41
42#define NUM_TRY_BEFORE_ANT_TOGGLE 1
43#define IL_NUMBER_TRY 1
44#define IL_HT_NUMBER_TRY 3
45
46#define RATE_MAX_WINDOW 62 /* # tx in history win */
47#define RATE_MIN_FAILURE_TH 6 /* min failures to calc tpt */
48#define RATE_MIN_SUCCESS_TH 8 /* min successes to calc tpt */
49
50/* max allowed rate miss before sync LQ cmd */
51#define IL_MISSED_RATE_MAX 15
52/* max time to accum history 2 seconds */
53#define RATE_SCALE_FLUSH_INTVL (3*HZ)
54
55static u8 rs_ht_to_legacy[] = {
56 RATE_6M_IDX, RATE_6M_IDX,
57 RATE_6M_IDX, RATE_6M_IDX,
58 RATE_6M_IDX,
59 RATE_6M_IDX, RATE_9M_IDX,
60 RATE_12M_IDX, RATE_18M_IDX,
61 RATE_24M_IDX, RATE_36M_IDX,
62 RATE_48M_IDX, RATE_54M_IDX
63};
64
65static const u8 ant_toggle_lookup[] = {
66 /*ANT_NONE -> */ ANT_NONE,
67 /*ANT_A -> */ ANT_B,
68 /*ANT_B -> */ ANT_C,
69 /*ANT_AB -> */ ANT_BC,
70 /*ANT_C -> */ ANT_A,
71 /*ANT_AC -> */ ANT_AB,
72 /*ANT_BC -> */ ANT_AC,
73 /*ANT_ABC -> */ ANT_ABC,
74};
75
76#define IL_DECLARE_RATE_INFO(r, s, ip, in, rp, rn, pp, np) \
77 [RATE_##r##M_IDX] = { RATE_##r##M_PLCP, \
78 RATE_SISO_##s##M_PLCP, \
79 RATE_MIMO2_##s##M_PLCP,\
80 RATE_##r##M_IEEE, \
81 RATE_##ip##M_IDX, \
82 RATE_##in##M_IDX, \
83 RATE_##rp##M_IDX, \
84 RATE_##rn##M_IDX, \
85 RATE_##pp##M_IDX, \
86 RATE_##np##M_IDX }
87
88/*
89 * Parameter order:
90 * rate, ht rate, prev rate, next rate, prev tgg rate, next tgg rate
91 *
92 * If there isn't a valid next or previous rate then INV is used which
93 * maps to RATE_INVALID
94 *
95 */
96const struct il_rate_info il_rates[RATE_COUNT] = {
97 IL_DECLARE_RATE_INFO(1, INV, INV, 2, INV, 2, INV, 2), /* 1mbps */
98 IL_DECLARE_RATE_INFO(2, INV, 1, 5, 1, 5, 1, 5), /* 2mbps */
99 IL_DECLARE_RATE_INFO(5, INV, 2, 6, 2, 11, 2, 11), /*5.5mbps */
100 IL_DECLARE_RATE_INFO(11, INV, 9, 12, 9, 12, 5, 18), /* 11mbps */
101 IL_DECLARE_RATE_INFO(6, 6, 5, 9, 5, 11, 5, 11), /* 6mbps */
102 IL_DECLARE_RATE_INFO(9, 6, 6, 11, 6, 11, 5, 11), /* 9mbps */
103 IL_DECLARE_RATE_INFO(12, 12, 11, 18, 11, 18, 11, 18), /* 12mbps */
104 IL_DECLARE_RATE_INFO(18, 18, 12, 24, 12, 24, 11, 24), /* 18mbps */
105 IL_DECLARE_RATE_INFO(24, 24, 18, 36, 18, 36, 18, 36), /* 24mbps */
106 IL_DECLARE_RATE_INFO(36, 36, 24, 48, 24, 48, 24, 48), /* 36mbps */
107 IL_DECLARE_RATE_INFO(48, 48, 36, 54, 36, 54, 36, 54), /* 48mbps */
108 IL_DECLARE_RATE_INFO(54, 54, 48, INV, 48, INV, 48, INV),/* 54mbps */
109 IL_DECLARE_RATE_INFO(60, 60, 48, INV, 48, INV, 48, INV),/* 60mbps */
110};
111
112static int
113il4965_hwrate_to_plcp_idx(u32 rate_n_flags)
114{
115 int idx = 0;
116
117 /* HT rate format */
118 if (rate_n_flags & RATE_MCS_HT_MSK) {
119 idx = (rate_n_flags & 0xff);
120
121 if (idx >= RATE_MIMO2_6M_PLCP)
122 idx = idx - RATE_MIMO2_6M_PLCP;
123
124 idx += IL_FIRST_OFDM_RATE;
125 /* skip 9M not supported in ht */
126 if (idx >= RATE_9M_IDX)
127 idx += 1;
128 if (idx >= IL_FIRST_OFDM_RATE && idx <= IL_LAST_OFDM_RATE)
129 return idx;
130
131 /* legacy rate format, search for match in table */
132 } else {
133 for (idx = 0; idx < ARRAY_SIZE(il_rates); idx++)
134 if (il_rates[idx].plcp == (rate_n_flags & 0xFF))
135 return idx;
136 }
137
138 return -1;
139}
140
141static void il4965_rs_rate_scale_perform(struct il_priv *il,
142 struct sk_buff *skb,
143 struct ieee80211_sta *sta,
144 struct il_lq_sta *lq_sta);
145static void il4965_rs_fill_link_cmd(struct il_priv *il,
146 struct il_lq_sta *lq_sta, u32 rate_n_flags);
147static void il4965_rs_stay_in_table(struct il_lq_sta *lq_sta,
148 bool force_search);
149
150#ifdef CONFIG_MAC80211_DEBUGFS
151static void il4965_rs_dbgfs_set_mcs(struct il_lq_sta *lq_sta,
152 u32 *rate_n_flags, int idx);
153#else
154static void
155il4965_rs_dbgfs_set_mcs(struct il_lq_sta *lq_sta, u32 * rate_n_flags, int idx)
156{
157}
158#endif
159
160/**
161 * The following tables contain the expected throughput metrics for all rates
162 *
163 * 1, 2, 5.5, 11, 6, 9, 12, 18, 24, 36, 48, 54, 60 MBits
164 *
165 * where invalid entries are zeros.
166 *
167 * CCK rates are only valid in legacy table and will only be used in G
168 * (2.4 GHz) band.
169 */
170
171static s32 expected_tpt_legacy[RATE_COUNT] = {
172 7, 13, 35, 58, 40, 57, 72, 98, 121, 154, 177, 186, 0
173};
174
175static s32 expected_tpt_siso20MHz[4][RATE_COUNT] = {
176 {0, 0, 0, 0, 42, 0, 76, 102, 124, 158, 183, 193, 202}, /* Norm */
177 {0, 0, 0, 0, 46, 0, 82, 110, 132, 167, 192, 202, 210}, /* SGI */
178 {0, 0, 0, 0, 48, 0, 93, 135, 176, 251, 319, 351, 381}, /* AGG */
179 {0, 0, 0, 0, 53, 0, 102, 149, 193, 275, 348, 381, 413}, /* AGG+SGI */
180};
181
182static s32 expected_tpt_siso40MHz[4][RATE_COUNT] = {
183 {0, 0, 0, 0, 77, 0, 127, 160, 184, 220, 242, 250, 257}, /* Norm */
184 {0, 0, 0, 0, 83, 0, 135, 169, 193, 229, 250, 257, 264}, /* SGI */
185 {0, 0, 0, 0, 96, 0, 182, 259, 328, 451, 553, 598, 640}, /* AGG */
186 {0, 0, 0, 0, 106, 0, 199, 282, 357, 487, 593, 640, 683}, /* AGG+SGI */
187};
188
189static s32 expected_tpt_mimo2_20MHz[4][RATE_COUNT] = {
190 {0, 0, 0, 0, 74, 0, 123, 155, 179, 213, 235, 243, 250}, /* Norm */
191 {0, 0, 0, 0, 81, 0, 131, 164, 187, 221, 242, 250, 256}, /* SGI */
192 {0, 0, 0, 0, 92, 0, 175, 250, 317, 436, 534, 578, 619}, /* AGG */
193 {0, 0, 0, 0, 102, 0, 192, 273, 344, 470, 573, 619, 660}, /* AGG+SGI */
194};
195
196static s32 expected_tpt_mimo2_40MHz[4][RATE_COUNT] = {
197 {0, 0, 0, 0, 123, 0, 182, 214, 235, 264, 279, 285, 289}, /* Norm */
198 {0, 0, 0, 0, 131, 0, 191, 222, 242, 270, 284, 289, 293}, /* SGI */
199 {0, 0, 0, 0, 180, 0, 327, 446, 545, 708, 828, 878, 922}, /* AGG */
200 {0, 0, 0, 0, 197, 0, 355, 481, 584, 752, 872, 922, 966}, /* AGG+SGI */
201};
202
203/* mbps, mcs */
204static const struct il_rate_mcs_info il_rate_mcs[RATE_COUNT] = {
205 {"1", "BPSK DSSS"},
206 {"2", "QPSK DSSS"},
207 {"5.5", "BPSK CCK"},
208 {"11", "QPSK CCK"},
209 {"6", "BPSK 1/2"},
210 {"9", "BPSK 1/2"},
211 {"12", "QPSK 1/2"},
212 {"18", "QPSK 3/4"},
213 {"24", "16QAM 1/2"},
214 {"36", "16QAM 3/4"},
215 {"48", "64QAM 2/3"},
216 {"54", "64QAM 3/4"},
217 {"60", "64QAM 5/6"},
218};
219
220#define MCS_IDX_PER_STREAM (8)
221
222static inline u8
223il4965_rs_extract_rate(u32 rate_n_flags)
224{
225 return (u8) (rate_n_flags & 0xFF);
226}
227
228static void
229il4965_rs_rate_scale_clear_win(struct il_rate_scale_data *win)
230{
231 win->data = 0;
232 win->success_counter = 0;
233 win->success_ratio = IL_INVALID_VALUE;
234 win->counter = 0;
235 win->average_tpt = IL_INVALID_VALUE;
236 win->stamp = 0;
237}
238
239static inline u8
240il4965_rs_is_valid_ant(u8 valid_antenna, u8 ant_type)
241{
242 return (ant_type & valid_antenna) == ant_type;
243}
244
245/*
246 * removes the old data from the stats. All data that is older than
247 * TID_MAX_TIME_DIFF, will be deleted.
248 */
249static void
250il4965_rs_tl_rm_old_stats(struct il_traffic_load *tl, u32 curr_time)
251{
252 /* The oldest age we want to keep */
253 u32 oldest_time = curr_time - TID_MAX_TIME_DIFF;
254
255 while (tl->queue_count && tl->time_stamp < oldest_time) {
256 tl->total -= tl->packet_count[tl->head];
257 tl->packet_count[tl->head] = 0;
258 tl->time_stamp += TID_QUEUE_CELL_SPACING;
259 tl->queue_count--;
260 tl->head++;
261 if (tl->head >= TID_QUEUE_MAX_SIZE)
262 tl->head = 0;
263 }
264}
265
266/*
267 * increment traffic load value for tid and also remove
268 * any old values if passed the certain time period
269 */
270static u8
271il4965_rs_tl_add_packet(struct il_lq_sta *lq_data, struct ieee80211_hdr *hdr)
272{
273 u32 curr_time = jiffies_to_msecs(jiffies);
274 u32 time_diff;
275 s32 idx;
276 struct il_traffic_load *tl = NULL;
277 u8 tid;
278
279 if (ieee80211_is_data_qos(hdr->frame_control)) {
280 u8 *qc = ieee80211_get_qos_ctl(hdr);
281 tid = qc[0] & 0xf;
282 } else
283 return MAX_TID_COUNT;
284
285 if (unlikely(tid >= TID_MAX_LOAD_COUNT))
286 return MAX_TID_COUNT;
287
288 tl = &lq_data->load[tid];
289
290 curr_time -= curr_time % TID_ROUND_VALUE;
291
292 /* Happens only for the first packet. Initialize the data */
293 if (!(tl->queue_count)) {
294 tl->total = 1;
295 tl->time_stamp = curr_time;
296 tl->queue_count = 1;
297 tl->head = 0;
298 tl->packet_count[0] = 1;
299 return MAX_TID_COUNT;
300 }
301
302 time_diff = TIME_WRAP_AROUND(tl->time_stamp, curr_time);
303 idx = time_diff / TID_QUEUE_CELL_SPACING;
304
305 /* The history is too long: remove data that is older than */
306 /* TID_MAX_TIME_DIFF */
307 if (idx >= TID_QUEUE_MAX_SIZE)
308 il4965_rs_tl_rm_old_stats(tl, curr_time);
309
310 idx = (tl->head + idx) % TID_QUEUE_MAX_SIZE;
311 tl->packet_count[idx] = tl->packet_count[idx] + 1;
312 tl->total = tl->total + 1;
313
314 if ((idx + 1) > tl->queue_count)
315 tl->queue_count = idx + 1;
316
317 return tid;
318}
319
320/*
321 get the traffic load value for tid
322*/
323static u32
324il4965_rs_tl_get_load(struct il_lq_sta *lq_data, u8 tid)
325{
326 u32 curr_time = jiffies_to_msecs(jiffies);
327 u32 time_diff;
328 s32 idx;
329 struct il_traffic_load *tl = NULL;
330
331 if (tid >= TID_MAX_LOAD_COUNT)
332 return 0;
333
334 tl = &(lq_data->load[tid]);
335
336 curr_time -= curr_time % TID_ROUND_VALUE;
337
338 if (!(tl->queue_count))
339 return 0;
340
341 time_diff = TIME_WRAP_AROUND(tl->time_stamp, curr_time);
342 idx = time_diff / TID_QUEUE_CELL_SPACING;
343
344 /* The history is too long: remove data that is older than */
345 /* TID_MAX_TIME_DIFF */
346 if (idx >= TID_QUEUE_MAX_SIZE)
347 il4965_rs_tl_rm_old_stats(tl, curr_time);
348
349 return tl->total;
350}
351
352static int
353il4965_rs_tl_turn_on_agg_for_tid(struct il_priv *il, struct il_lq_sta *lq_data,
354 u8 tid, struct ieee80211_sta *sta)
355{
356 int ret = -EAGAIN;
357 u32 load;
358
359 load = il4965_rs_tl_get_load(lq_data, tid);
360
361 if (load > IL_AGG_LOAD_THRESHOLD) {
362 D_HT("Starting Tx agg: STA: %pM tid: %d\n", sta->addr, tid);
363 ret = ieee80211_start_tx_ba_session(sta, tid, 5000);
364 if (ret == -EAGAIN) {
365 /*
366 * driver and mac80211 is out of sync
367 * this might be cause by reloading firmware
368 * stop the tx ba session here
369 */
370 IL_ERR("Fail start Tx agg on tid: %d\n", tid);
371 ieee80211_stop_tx_ba_session(sta, tid);
372 }
373 } else
374 D_HT("Aggregation not enabled for tid %d because load = %u\n",
375 tid, load);
376
377 return ret;
378}
379
380static void
381il4965_rs_tl_turn_on_agg(struct il_priv *il, u8 tid, struct il_lq_sta *lq_data,
382 struct ieee80211_sta *sta)
383{
384 if (tid < TID_MAX_LOAD_COUNT)
385 il4965_rs_tl_turn_on_agg_for_tid(il, lq_data, tid, sta);
386 else
387 IL_ERR("tid exceeds max load count: %d/%d\n", tid,
388 TID_MAX_LOAD_COUNT);
389}
390
391static inline int
392il4965_get_il4965_num_of_ant_from_rate(u32 rate_n_flags)
393{
394 return !!(rate_n_flags & RATE_MCS_ANT_A_MSK) +
395 !!(rate_n_flags & RATE_MCS_ANT_B_MSK) +
396 !!(rate_n_flags & RATE_MCS_ANT_C_MSK);
397}
398
399/*
400 * Static function to get the expected throughput from an il_scale_tbl_info
401 * that wraps a NULL pointer check
402 */
403static s32
404il4965_get_expected_tpt(struct il_scale_tbl_info *tbl, int rs_idx)
405{
406 if (tbl->expected_tpt)
407 return tbl->expected_tpt[rs_idx];
408 return 0;
409}
410
411/**
412 * il4965_rs_collect_tx_data - Update the success/failure sliding win
413 *
414 * We keep a sliding win of the last 62 packets transmitted
415 * at this rate. win->data contains the bitmask of successful
416 * packets.
417 */
418static int
419il4965_rs_collect_tx_data(struct il_scale_tbl_info *tbl, int scale_idx,
420 int attempts, int successes)
421{
422 struct il_rate_scale_data *win = NULL;
423 static const u64 mask = (((u64) 1) << (RATE_MAX_WINDOW - 1));
424 s32 fail_count, tpt;
425
426 if (scale_idx < 0 || scale_idx >= RATE_COUNT)
427 return -EINVAL;
428
429 /* Select win for current tx bit rate */
430 win = &(tbl->win[scale_idx]);
431
432 /* Get expected throughput */
433 tpt = il4965_get_expected_tpt(tbl, scale_idx);
434
435 /*
436 * Keep track of only the latest 62 tx frame attempts in this rate's
437 * history win; anything older isn't really relevant any more.
438 * If we have filled up the sliding win, drop the oldest attempt;
439 * if the oldest attempt (highest bit in bitmap) shows "success",
440 * subtract "1" from the success counter (this is the main reason
441 * we keep these bitmaps!).
442 */
443 while (attempts > 0) {
444 if (win->counter >= RATE_MAX_WINDOW) {
445
446 /* remove earliest */
447 win->counter = RATE_MAX_WINDOW - 1;
448
449 if (win->data & mask) {
450 win->data &= ~mask;
451 win->success_counter--;
452 }
453 }
454
455 /* Increment frames-attempted counter */
456 win->counter++;
457
458 /* Shift bitmap by one frame to throw away oldest history */
459 win->data <<= 1;
460
461 /* Mark the most recent #successes attempts as successful */
462 if (successes > 0) {
463 win->success_counter++;
464 win->data |= 0x1;
465 successes--;
466 }
467
468 attempts--;
469 }
470
471 /* Calculate current success ratio, avoid divide-by-0! */
472 if (win->counter > 0)
473 win->success_ratio =
474 128 * (100 * win->success_counter) / win->counter;
475 else
476 win->success_ratio = IL_INVALID_VALUE;
477
478 fail_count = win->counter - win->success_counter;
479
480 /* Calculate average throughput, if we have enough history. */
481 if (fail_count >= RATE_MIN_FAILURE_TH ||
482 win->success_counter >= RATE_MIN_SUCCESS_TH)
483 win->average_tpt = (win->success_ratio * tpt + 64) / 128;
484 else
485 win->average_tpt = IL_INVALID_VALUE;
486
487 /* Tag this win as having been updated */
488 win->stamp = jiffies;
489
490 return 0;
491}
492
493/*
494 * Fill uCode API rate_n_flags field, based on "search" or "active" table.
495 */
496static u32
497il4965_rate_n_flags_from_tbl(struct il_priv *il, struct il_scale_tbl_info *tbl,
498 int idx, u8 use_green)
499{
500 u32 rate_n_flags = 0;
501
502 if (is_legacy(tbl->lq_type)) {
503 rate_n_flags = il_rates[idx].plcp;
504 if (idx >= IL_FIRST_CCK_RATE && idx <= IL_LAST_CCK_RATE)
505 rate_n_flags |= RATE_MCS_CCK_MSK;
506
507 } else if (is_Ht(tbl->lq_type)) {
508 if (idx > IL_LAST_OFDM_RATE) {
509 IL_ERR("Invalid HT rate idx %d\n", idx);
510 idx = IL_LAST_OFDM_RATE;
511 }
512 rate_n_flags = RATE_MCS_HT_MSK;
513
514 if (is_siso(tbl->lq_type))
515 rate_n_flags |= il_rates[idx].plcp_siso;
516 else
517 rate_n_flags |= il_rates[idx].plcp_mimo2;
518 } else {
519 IL_ERR("Invalid tbl->lq_type %d\n", tbl->lq_type);
520 }
521
522 rate_n_flags |=
523 ((tbl->ant_type << RATE_MCS_ANT_POS) & RATE_MCS_ANT_ABC_MSK);
524
525 if (is_Ht(tbl->lq_type)) {
526 if (tbl->is_ht40) {
527 if (tbl->is_dup)
528 rate_n_flags |= RATE_MCS_DUP_MSK;
529 else
530 rate_n_flags |= RATE_MCS_HT40_MSK;
531 }
532 if (tbl->is_SGI)
533 rate_n_flags |= RATE_MCS_SGI_MSK;
534
535 if (use_green) {
536 rate_n_flags |= RATE_MCS_GF_MSK;
537 if (is_siso(tbl->lq_type) && tbl->is_SGI) {
538 rate_n_flags &= ~RATE_MCS_SGI_MSK;
539 IL_ERR("GF was set with SGI:SISO\n");
540 }
541 }
542 }
543 return rate_n_flags;
544}
545
546/*
547 * Interpret uCode API's rate_n_flags format,
548 * fill "search" or "active" tx mode table.
549 */
550static int
551il4965_rs_get_tbl_info_from_mcs(const u32 rate_n_flags,
552 enum ieee80211_band band,
553 struct il_scale_tbl_info *tbl, int *rate_idx)
554{
555 u32 ant_msk = (rate_n_flags & RATE_MCS_ANT_ABC_MSK);
556 u8 il4965_num_of_ant =
557 il4965_get_il4965_num_of_ant_from_rate(rate_n_flags);
558 u8 mcs;
559
560 memset(tbl, 0, sizeof(struct il_scale_tbl_info));
561 *rate_idx = il4965_hwrate_to_plcp_idx(rate_n_flags);
562
563 if (*rate_idx == RATE_INVALID) {
564 *rate_idx = -1;
565 return -EINVAL;
566 }
567 tbl->is_SGI = 0; /* default legacy setup */
568 tbl->is_ht40 = 0;
569 tbl->is_dup = 0;
570 tbl->ant_type = (ant_msk >> RATE_MCS_ANT_POS);
571 tbl->lq_type = LQ_NONE;
572 tbl->max_search = IL_MAX_SEARCH;
573
574 /* legacy rate format */
575 if (!(rate_n_flags & RATE_MCS_HT_MSK)) {
576 if (il4965_num_of_ant == 1) {
577 if (band == IEEE80211_BAND_5GHZ)
578 tbl->lq_type = LQ_A;
579 else
580 tbl->lq_type = LQ_G;
581 }
582 /* HT rate format */
583 } else {
584 if (rate_n_flags & RATE_MCS_SGI_MSK)
585 tbl->is_SGI = 1;
586
587 if ((rate_n_flags & RATE_MCS_HT40_MSK) ||
588 (rate_n_flags & RATE_MCS_DUP_MSK))
589 tbl->is_ht40 = 1;
590
591 if (rate_n_flags & RATE_MCS_DUP_MSK)
592 tbl->is_dup = 1;
593
594 mcs = il4965_rs_extract_rate(rate_n_flags);
595
596 /* SISO */
597 if (mcs <= RATE_SISO_60M_PLCP) {
598 if (il4965_num_of_ant == 1)
599 tbl->lq_type = LQ_SISO; /*else NONE */
600 /* MIMO2 */
601 } else {
602 if (il4965_num_of_ant == 2)
603 tbl->lq_type = LQ_MIMO2;
604 }
605 }
606 return 0;
607}
608
609/* switch to another antenna/antennas and return 1 */
610/* if no other valid antenna found, return 0 */
611static int
612il4965_rs_toggle_antenna(u32 valid_ant, u32 *rate_n_flags,
613 struct il_scale_tbl_info *tbl)
614{
615 u8 new_ant_type;
616
617 if (!tbl->ant_type || tbl->ant_type > ANT_ABC)
618 return 0;
619
620 if (!il4965_rs_is_valid_ant(valid_ant, tbl->ant_type))
621 return 0;
622
623 new_ant_type = ant_toggle_lookup[tbl->ant_type];
624
625 while (new_ant_type != tbl->ant_type &&
626 !il4965_rs_is_valid_ant(valid_ant, new_ant_type))
627 new_ant_type = ant_toggle_lookup[new_ant_type];
628
629 if (new_ant_type == tbl->ant_type)
630 return 0;
631
632 tbl->ant_type = new_ant_type;
633 *rate_n_flags &= ~RATE_MCS_ANT_ABC_MSK;
634 *rate_n_flags |= new_ant_type << RATE_MCS_ANT_POS;
635 return 1;
636}
637
638/**
639 * Green-field mode is valid if the station supports it and
640 * there are no non-GF stations present in the BSS.
641 */
642static bool
643il4965_rs_use_green(struct il_priv *il, struct ieee80211_sta *sta)
644{
645 return (sta->ht_cap.cap & IEEE80211_HT_CAP_GRN_FLD) &&
646 !il->ht.non_gf_sta_present;
647}
648
649/**
650 * il4965_rs_get_supported_rates - get the available rates
651 *
652 * if management frame or broadcast frame only return
653 * basic available rates.
654 *
655 */
656static u16
657il4965_rs_get_supported_rates(struct il_lq_sta *lq_sta,
658 struct ieee80211_hdr *hdr,
659 enum il_table_type rate_type)
660{
661 if (is_legacy(rate_type)) {
662 return lq_sta->active_legacy_rate;
663 } else {
664 if (is_siso(rate_type))
665 return lq_sta->active_siso_rate;
666 else
667 return lq_sta->active_mimo2_rate;
668 }
669}
670
671static u16
672il4965_rs_get_adjacent_rate(struct il_priv *il, u8 idx, u16 rate_mask,
673 int rate_type)
674{
675 u8 high = RATE_INVALID;
676 u8 low = RATE_INVALID;
677
678 /* 802.11A or ht walks to the next literal adjacent rate in
679 * the rate table */
680 if (is_a_band(rate_type) || !is_legacy(rate_type)) {
681 int i;
682 u32 mask;
683
684 /* Find the previous rate that is in the rate mask */
685 i = idx - 1;
686 for (mask = (1 << i); i >= 0; i--, mask >>= 1) {
687 if (rate_mask & mask) {
688 low = i;
689 break;
690 }
691 }
692
693 /* Find the next rate that is in the rate mask */
694 i = idx + 1;
695 for (mask = (1 << i); i < RATE_COUNT; i++, mask <<= 1) {
696 if (rate_mask & mask) {
697 high = i;
698 break;
699 }
700 }
701
702 return (high << 8) | low;
703 }
704
705 low = idx;
706 while (low != RATE_INVALID) {
707 low = il_rates[low].prev_rs;
708 if (low == RATE_INVALID)
709 break;
710 if (rate_mask & (1 << low))
711 break;
712 D_RATE("Skipping masked lower rate: %d\n", low);
713 }
714
715 high = idx;
716 while (high != RATE_INVALID) {
717 high = il_rates[high].next_rs;
718 if (high == RATE_INVALID)
719 break;
720 if (rate_mask & (1 << high))
721 break;
722 D_RATE("Skipping masked higher rate: %d\n", high);
723 }
724
725 return (high << 8) | low;
726}
727
728static u32
729il4965_rs_get_lower_rate(struct il_lq_sta *lq_sta,
730 struct il_scale_tbl_info *tbl, u8 scale_idx,
731 u8 ht_possible)
732{
733 s32 low;
734 u16 rate_mask;
735 u16 high_low;
736 u8 switch_to_legacy = 0;
737 u8 is_green = lq_sta->is_green;
738 struct il_priv *il = lq_sta->drv;
739
740 /* check if we need to switch from HT to legacy rates.
741 * assumption is that mandatory rates (1Mbps or 6Mbps)
742 * are always supported (spec demand) */
743 if (!is_legacy(tbl->lq_type) && (!ht_possible || !scale_idx)) {
744 switch_to_legacy = 1;
745 scale_idx = rs_ht_to_legacy[scale_idx];
746 if (lq_sta->band == IEEE80211_BAND_5GHZ)
747 tbl->lq_type = LQ_A;
748 else
749 tbl->lq_type = LQ_G;
750
751 if (il4965_num_of_ant(tbl->ant_type) > 1)
752 tbl->ant_type =
753 il4965_first_antenna(il->hw_params.valid_tx_ant);
754
755 tbl->is_ht40 = 0;
756 tbl->is_SGI = 0;
757 tbl->max_search = IL_MAX_SEARCH;
758 }
759
760 rate_mask = il4965_rs_get_supported_rates(lq_sta, NULL, tbl->lq_type);
761
762 /* Mask with station rate restriction */
763 if (is_legacy(tbl->lq_type)) {
764 /* supp_rates has no CCK bits in A mode */
765 if (lq_sta->band == IEEE80211_BAND_5GHZ)
766 rate_mask =
767 (u16) (rate_mask &
768 (lq_sta->supp_rates << IL_FIRST_OFDM_RATE));
769 else
770 rate_mask = (u16) (rate_mask & lq_sta->supp_rates);
771 }
772
773 /* If we switched from HT to legacy, check current rate */
774 if (switch_to_legacy && (rate_mask & (1 << scale_idx))) {
775 low = scale_idx;
776 goto out;
777 }
778
779 high_low =
780 il4965_rs_get_adjacent_rate(lq_sta->drv, scale_idx, rate_mask,
781 tbl->lq_type);
782 low = high_low & 0xff;
783
784 if (low == RATE_INVALID)
785 low = scale_idx;
786
787out:
788 return il4965_rate_n_flags_from_tbl(lq_sta->drv, tbl, low, is_green);
789}
790
791/*
792 * Simple function to compare two rate scale table types
793 */
794static bool
795il4965_table_type_matches(struct il_scale_tbl_info *a,
796 struct il_scale_tbl_info *b)
797{
798 return (a->lq_type == b->lq_type && a->ant_type == b->ant_type &&
799 a->is_SGI == b->is_SGI);
800}
801
802/*
803 * mac80211 sends us Tx status
804 */
805static void
806il4965_rs_tx_status(void *il_r, struct ieee80211_supported_band *sband,
807 struct ieee80211_sta *sta, void *il_sta,
808 struct sk_buff *skb)
809{
810 int legacy_success;
811 int retries;
812 int rs_idx, mac_idx, i;
813 struct il_lq_sta *lq_sta = il_sta;
814 struct il_link_quality_cmd *table;
815 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
816 struct il_priv *il = (struct il_priv *)il_r;
817 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
818 enum mac80211_rate_control_flags mac_flags;
819 u32 tx_rate;
820 struct il_scale_tbl_info tbl_type;
821 struct il_scale_tbl_info *curr_tbl, *other_tbl, *tmp_tbl;
822
823 D_RATE("get frame ack response, update rate scale win\n");
824
825 /* Treat uninitialized rate scaling data same as non-existing. */
826 if (!lq_sta) {
827 D_RATE("Station rate scaling not created yet.\n");
828 return;
829 } else if (!lq_sta->drv) {
830 D_RATE("Rate scaling not initialized yet.\n");
831 return;
832 }
833
834 if (!ieee80211_is_data(hdr->frame_control) ||
835 (info->flags & IEEE80211_TX_CTL_NO_ACK))
836 return;
837
838 /* This packet was aggregated but doesn't carry status info */
839 if ((info->flags & IEEE80211_TX_CTL_AMPDU) &&
840 !(info->flags & IEEE80211_TX_STAT_AMPDU))
841 return;
842
843 /*
844 * Ignore this Tx frame response if its initial rate doesn't match
845 * that of latest Link Quality command. There may be stragglers
846 * from a previous Link Quality command, but we're no longer interested
847 * in those; they're either from the "active" mode while we're trying
848 * to check "search" mode, or a prior "search" mode after we've moved
849 * to a new "search" mode (which might become the new "active" mode).
850 */
851 table = &lq_sta->lq;
852 tx_rate = le32_to_cpu(table->rs_table[0].rate_n_flags);
853 il4965_rs_get_tbl_info_from_mcs(tx_rate, il->band, &tbl_type, &rs_idx);
854 if (il->band == IEEE80211_BAND_5GHZ)
855 rs_idx -= IL_FIRST_OFDM_RATE;
856 mac_flags = info->status.rates[0].flags;
857 mac_idx = info->status.rates[0].idx;
858 /* For HT packets, map MCS to PLCP */
859 if (mac_flags & IEEE80211_TX_RC_MCS) {
860 mac_idx &= RATE_MCS_CODE_MSK; /* Remove # of streams */
861 if (mac_idx >= (RATE_9M_IDX - IL_FIRST_OFDM_RATE))
862 mac_idx++;
863 /*
864 * mac80211 HT idx is always zero-idxed; we need to move
865 * HT OFDM rates after CCK rates in 2.4 GHz band
866 */
867 if (il->band == IEEE80211_BAND_2GHZ)
868 mac_idx += IL_FIRST_OFDM_RATE;
869 }
870 /* Here we actually compare this rate to the latest LQ command */
871 if (mac_idx < 0 ||
872 tbl_type.is_SGI != !!(mac_flags & IEEE80211_TX_RC_SHORT_GI) ||
873 tbl_type.is_ht40 != !!(mac_flags & IEEE80211_TX_RC_40_MHZ_WIDTH) ||
874 tbl_type.is_dup != !!(mac_flags & IEEE80211_TX_RC_DUP_DATA) ||
875 tbl_type.ant_type != info->status.antenna ||
876 !!(tx_rate & RATE_MCS_HT_MSK) != !!(mac_flags & IEEE80211_TX_RC_MCS)
877 || !!(tx_rate & RATE_MCS_GF_MSK) !=
878 !!(mac_flags & IEEE80211_TX_RC_GREEN_FIELD) || rs_idx != mac_idx) {
879 D_RATE("initial rate %d does not match %d (0x%x)\n", mac_idx,
880 rs_idx, tx_rate);
881 /*
882 * Since rates mis-match, the last LQ command may have failed.
883 * After IL_MISSED_RATE_MAX mis-matches, resync the uCode with
884 * ... driver.
885 */
886 lq_sta->missed_rate_counter++;
887 if (lq_sta->missed_rate_counter > IL_MISSED_RATE_MAX) {
888 lq_sta->missed_rate_counter = 0;
889 il_send_lq_cmd(il, &lq_sta->lq, CMD_ASYNC, false);
890 }
891 /* Regardless, ignore this status info for outdated rate */
892 return;
893 } else
894 /* Rate did match, so reset the missed_rate_counter */
895 lq_sta->missed_rate_counter = 0;
896
897 /* Figure out if rate scale algorithm is in active or search table */
898 if (il4965_table_type_matches
899 (&tbl_type, &(lq_sta->lq_info[lq_sta->active_tbl]))) {
900 curr_tbl = &(lq_sta->lq_info[lq_sta->active_tbl]);
901 other_tbl = &(lq_sta->lq_info[1 - lq_sta->active_tbl]);
902 } else
903 if (il4965_table_type_matches
904 (&tbl_type, &lq_sta->lq_info[1 - lq_sta->active_tbl])) {
905 curr_tbl = &(lq_sta->lq_info[1 - lq_sta->active_tbl]);
906 other_tbl = &(lq_sta->lq_info[lq_sta->active_tbl]);
907 } else {
908 D_RATE("Neither active nor search matches tx rate\n");
909 tmp_tbl = &(lq_sta->lq_info[lq_sta->active_tbl]);
910 D_RATE("active- lq:%x, ant:%x, SGI:%d\n", tmp_tbl->lq_type,
911 tmp_tbl->ant_type, tmp_tbl->is_SGI);
912 tmp_tbl = &(lq_sta->lq_info[1 - lq_sta->active_tbl]);
913 D_RATE("search- lq:%x, ant:%x, SGI:%d\n", tmp_tbl->lq_type,
914 tmp_tbl->ant_type, tmp_tbl->is_SGI);
915 D_RATE("actual- lq:%x, ant:%x, SGI:%d\n", tbl_type.lq_type,
916 tbl_type.ant_type, tbl_type.is_SGI);
917 /*
918 * no matching table found, let's by-pass the data collection
919 * and continue to perform rate scale to find the rate table
920 */
921 il4965_rs_stay_in_table(lq_sta, true);
922 goto done;
923 }
924
925 /*
926 * Updating the frame history depends on whether packets were
927 * aggregated.
928 *
929 * For aggregation, all packets were transmitted at the same rate, the
930 * first idx into rate scale table.
931 */
932 if (info->flags & IEEE80211_TX_STAT_AMPDU) {
933 tx_rate = le32_to_cpu(table->rs_table[0].rate_n_flags);
934 il4965_rs_get_tbl_info_from_mcs(tx_rate, il->band, &tbl_type,
935 &rs_idx);
936 il4965_rs_collect_tx_data(curr_tbl, rs_idx,
937 info->status.ampdu_len,
938 info->status.ampdu_ack_len);
939
940 /* Update success/fail counts if not searching for new mode */
941 if (lq_sta->stay_in_tbl) {
942 lq_sta->total_success += info->status.ampdu_ack_len;
943 lq_sta->total_failed +=
944 (info->status.ampdu_len -
945 info->status.ampdu_ack_len);
946 }
947 } else {
948 /*
949 * For legacy, update frame history with for each Tx retry.
950 */
951 retries = info->status.rates[0].count - 1;
952 /* HW doesn't send more than 15 retries */
953 retries = min(retries, 15);
954
955 /* The last transmission may have been successful */
956 legacy_success = !!(info->flags & IEEE80211_TX_STAT_ACK);
957 /* Collect data for each rate used during failed TX attempts */
958 for (i = 0; i <= retries; ++i) {
959 tx_rate = le32_to_cpu(table->rs_table[i].rate_n_flags);
960 il4965_rs_get_tbl_info_from_mcs(tx_rate, il->band,
961 &tbl_type, &rs_idx);
962 /*
963 * Only collect stats if retried rate is in the same RS
964 * table as active/search.
965 */
966 if (il4965_table_type_matches(&tbl_type, curr_tbl))
967 tmp_tbl = curr_tbl;
968 else if (il4965_table_type_matches
969 (&tbl_type, other_tbl))
970 tmp_tbl = other_tbl;
971 else
972 continue;
973 il4965_rs_collect_tx_data(tmp_tbl, rs_idx, 1,
974 i <
975 retries ? 0 : legacy_success);
976 }
977
978 /* Update success/fail counts if not searching for new mode */
979 if (lq_sta->stay_in_tbl) {
980 lq_sta->total_success += legacy_success;
981 lq_sta->total_failed += retries + (1 - legacy_success);
982 }
983 }
984 /* The last TX rate is cached in lq_sta; it's set in if/else above */
985 lq_sta->last_rate_n_flags = tx_rate;
986done:
987 /* See if there's a better rate or modulation mode to try. */
988 if (sta->supp_rates[sband->band])
989 il4965_rs_rate_scale_perform(il, skb, sta, lq_sta);
990}
991
992/*
993 * Begin a period of staying with a selected modulation mode.
994 * Set "stay_in_tbl" flag to prevent any mode switches.
995 * Set frame tx success limits according to legacy vs. high-throughput,
996 * and reset overall (spanning all rates) tx success history stats.
997 * These control how long we stay using same modulation mode before
998 * searching for a new mode.
999 */
1000static void
1001il4965_rs_set_stay_in_table(struct il_priv *il, u8 is_legacy,
1002 struct il_lq_sta *lq_sta)
1003{
1004 D_RATE("we are staying in the same table\n");
1005 lq_sta->stay_in_tbl = 1; /* only place this gets set */
1006 if (is_legacy) {
1007 lq_sta->table_count_limit = IL_LEGACY_TBL_COUNT;
1008 lq_sta->max_failure_limit = IL_LEGACY_FAILURE_LIMIT;
1009 lq_sta->max_success_limit = IL_LEGACY_SUCCESS_LIMIT;
1010 } else {
1011 lq_sta->table_count_limit = IL_NONE_LEGACY_TBL_COUNT;
1012 lq_sta->max_failure_limit = IL_NONE_LEGACY_FAILURE_LIMIT;
1013 lq_sta->max_success_limit = IL_NONE_LEGACY_SUCCESS_LIMIT;
1014 }
1015 lq_sta->table_count = 0;
1016 lq_sta->total_failed = 0;
1017 lq_sta->total_success = 0;
1018 lq_sta->flush_timer = jiffies;
1019 lq_sta->action_counter = 0;
1020}
1021
1022/*
1023 * Find correct throughput table for given mode of modulation
1024 */
1025static void
1026il4965_rs_set_expected_tpt_table(struct il_lq_sta *lq_sta,
1027 struct il_scale_tbl_info *tbl)
1028{
1029 /* Used to choose among HT tables */
1030 s32(*ht_tbl_pointer)[RATE_COUNT];
1031
1032 /* Check for invalid LQ type */
1033 if (WARN_ON_ONCE(!is_legacy(tbl->lq_type) && !is_Ht(tbl->lq_type))) {
1034 tbl->expected_tpt = expected_tpt_legacy;
1035 return;
1036 }
1037
1038 /* Legacy rates have only one table */
1039 if (is_legacy(tbl->lq_type)) {
1040 tbl->expected_tpt = expected_tpt_legacy;
1041 return;
1042 }
1043
1044 /* Choose among many HT tables depending on number of streams
1045 * (SISO/MIMO2), channel width (20/40), SGI, and aggregation
1046 * status */
1047 if (is_siso(tbl->lq_type) && (!tbl->is_ht40 || lq_sta->is_dup))
1048 ht_tbl_pointer = expected_tpt_siso20MHz;
1049 else if (is_siso(tbl->lq_type))
1050 ht_tbl_pointer = expected_tpt_siso40MHz;
1051 else if (is_mimo2(tbl->lq_type) && (!tbl->is_ht40 || lq_sta->is_dup))
1052 ht_tbl_pointer = expected_tpt_mimo2_20MHz;
1053 else /* if (is_mimo2(tbl->lq_type)) <-- must be true */
1054 ht_tbl_pointer = expected_tpt_mimo2_40MHz;
1055
1056 if (!tbl->is_SGI && !lq_sta->is_agg) /* Normal */
1057 tbl->expected_tpt = ht_tbl_pointer[0];
1058 else if (tbl->is_SGI && !lq_sta->is_agg) /* SGI */
1059 tbl->expected_tpt = ht_tbl_pointer[1];
1060 else if (!tbl->is_SGI && lq_sta->is_agg) /* AGG */
1061 tbl->expected_tpt = ht_tbl_pointer[2];
1062 else /* AGG+SGI */
1063 tbl->expected_tpt = ht_tbl_pointer[3];
1064}
1065
1066/*
1067 * Find starting rate for new "search" high-throughput mode of modulation.
1068 * Goal is to find lowest expected rate (under perfect conditions) that is
1069 * above the current measured throughput of "active" mode, to give new mode
1070 * a fair chance to prove itself without too many challenges.
1071 *
1072 * This gets called when transitioning to more aggressive modulation
1073 * (i.e. legacy to SISO or MIMO, or SISO to MIMO), as well as less aggressive
1074 * (i.e. MIMO to SISO). When moving to MIMO, bit rate will typically need
1075 * to decrease to match "active" throughput. When moving from MIMO to SISO,
1076 * bit rate will typically need to increase, but not if performance was bad.
1077 */
1078static s32
1079il4965_rs_get_best_rate(struct il_priv *il, struct il_lq_sta *lq_sta,
1080 struct il_scale_tbl_info *tbl, /* "search" */
1081 u16 rate_mask, s8 idx)
1082{
1083 /* "active" values */
1084 struct il_scale_tbl_info *active_tbl =
1085 &(lq_sta->lq_info[lq_sta->active_tbl]);
1086 s32 active_sr = active_tbl->win[idx].success_ratio;
1087 s32 active_tpt = active_tbl->expected_tpt[idx];
1088
1089 /* expected "search" throughput */
1090 s32 *tpt_tbl = tbl->expected_tpt;
1091
1092 s32 new_rate, high, low, start_hi;
1093 u16 high_low;
1094 s8 rate = idx;
1095
1096 new_rate = high = low = start_hi = RATE_INVALID;
1097
1098 for (;;) {
1099 high_low =
1100 il4965_rs_get_adjacent_rate(il, rate, rate_mask,
1101 tbl->lq_type);
1102
1103 low = high_low & 0xff;
1104 high = (high_low >> 8) & 0xff;
1105
1106 /*
1107 * Lower the "search" bit rate, to give new "search" mode
1108 * approximately the same throughput as "active" if:
1109 *
1110 * 1) "Active" mode has been working modestly well (but not
1111 * great), and expected "search" throughput (under perfect
1112 * conditions) at candidate rate is above the actual
1113 * measured "active" throughput (but less than expected
1114 * "active" throughput under perfect conditions).
1115 * OR
1116 * 2) "Active" mode has been working perfectly or very well
1117 * and expected "search" throughput (under perfect
1118 * conditions) at candidate rate is above expected
1119 * "active" throughput (under perfect conditions).
1120 */
1121 if ((100 * tpt_tbl[rate] > lq_sta->last_tpt &&
1122 (active_sr > RATE_DECREASE_TH && active_sr <= RATE_HIGH_TH
1123 && tpt_tbl[rate] <= active_tpt)) ||
1124 (active_sr >= RATE_SCALE_SWITCH &&
1125 tpt_tbl[rate] > active_tpt)) {
1126
1127 /* (2nd or later pass)
1128 * If we've already tried to raise the rate, and are
1129 * now trying to lower it, use the higher rate. */
1130 if (start_hi != RATE_INVALID) {
1131 new_rate = start_hi;
1132 break;
1133 }
1134
1135 new_rate = rate;
1136
1137 /* Loop again with lower rate */
1138 if (low != RATE_INVALID)
1139 rate = low;
1140
1141 /* Lower rate not available, use the original */
1142 else
1143 break;
1144
1145 /* Else try to raise the "search" rate to match "active" */
1146 } else {
1147 /* (2nd or later pass)
1148 * If we've already tried to lower the rate, and are
1149 * now trying to raise it, use the lower rate. */
1150 if (new_rate != RATE_INVALID)
1151 break;
1152
1153 /* Loop again with higher rate */
1154 else if (high != RATE_INVALID) {
1155 start_hi = high;
1156 rate = high;
1157
1158 /* Higher rate not available, use the original */
1159 } else {
1160 new_rate = rate;
1161 break;
1162 }
1163 }
1164 }
1165
1166 return new_rate;
1167}
1168
1169/*
1170 * Set up search table for MIMO2
1171 */
1172static int
1173il4965_rs_switch_to_mimo2(struct il_priv *il, struct il_lq_sta *lq_sta,
1174 struct ieee80211_conf *conf,
1175 struct ieee80211_sta *sta,
1176 struct il_scale_tbl_info *tbl, int idx)
1177{
1178 u16 rate_mask;
1179 s32 rate;
1180 s8 is_green = lq_sta->is_green;
1181
1182 if (!conf_is_ht(conf) || !sta->ht_cap.ht_supported)
1183 return -1;
1184
1185 if (sta->smps_mode == IEEE80211_SMPS_STATIC)
1186 return -1;
1187
1188 /* Need both Tx chains/antennas to support MIMO */
1189 if (il->hw_params.tx_chains_num < 2)
1190 return -1;
1191
1192 D_RATE("LQ: try to switch to MIMO2\n");
1193
1194 tbl->lq_type = LQ_MIMO2;
1195 tbl->is_dup = lq_sta->is_dup;
1196 tbl->action = 0;
1197 tbl->max_search = IL_MAX_SEARCH;
1198 rate_mask = lq_sta->active_mimo2_rate;
1199
1200 if (il_is_ht40_tx_allowed(il, &sta->ht_cap))
1201 tbl->is_ht40 = 1;
1202 else
1203 tbl->is_ht40 = 0;
1204
1205 il4965_rs_set_expected_tpt_table(lq_sta, tbl);
1206
1207 rate = il4965_rs_get_best_rate(il, lq_sta, tbl, rate_mask, idx);
1208
1209 D_RATE("LQ: MIMO2 best rate %d mask %X\n", rate, rate_mask);
1210 if (rate == RATE_INVALID || !((1 << rate) & rate_mask)) {
1211 D_RATE("Can't switch with idx %d rate mask %x\n", rate,
1212 rate_mask);
1213 return -1;
1214 }
1215 tbl->current_rate =
1216 il4965_rate_n_flags_from_tbl(il, tbl, rate, is_green);
1217
1218 D_RATE("LQ: Switch to new mcs %X idx is green %X\n", tbl->current_rate,
1219 is_green);
1220 return 0;
1221}
1222
1223/*
1224 * Set up search table for SISO
1225 */
1226static int
1227il4965_rs_switch_to_siso(struct il_priv *il, struct il_lq_sta *lq_sta,
1228 struct ieee80211_conf *conf, struct ieee80211_sta *sta,
1229 struct il_scale_tbl_info *tbl, int idx)
1230{
1231 u16 rate_mask;
1232 u8 is_green = lq_sta->is_green;
1233 s32 rate;
1234
1235 if (!conf_is_ht(conf) || !sta->ht_cap.ht_supported)
1236 return -1;
1237
1238 D_RATE("LQ: try to switch to SISO\n");
1239
1240 tbl->is_dup = lq_sta->is_dup;
1241 tbl->lq_type = LQ_SISO;
1242 tbl->action = 0;
1243 tbl->max_search = IL_MAX_SEARCH;
1244 rate_mask = lq_sta->active_siso_rate;
1245
1246 if (il_is_ht40_tx_allowed(il, &sta->ht_cap))
1247 tbl->is_ht40 = 1;
1248 else
1249 tbl->is_ht40 = 0;
1250
1251 if (is_green)
1252 tbl->is_SGI = 0; /*11n spec: no SGI in SISO+Greenfield */
1253
1254 il4965_rs_set_expected_tpt_table(lq_sta, tbl);
1255 rate = il4965_rs_get_best_rate(il, lq_sta, tbl, rate_mask, idx);
1256
1257 D_RATE("LQ: get best rate %d mask %X\n", rate, rate_mask);
1258 if (rate == RATE_INVALID || !((1 << rate) & rate_mask)) {
1259 D_RATE("can not switch with idx %d rate mask %x\n", rate,
1260 rate_mask);
1261 return -1;
1262 }
1263 tbl->current_rate =
1264 il4965_rate_n_flags_from_tbl(il, tbl, rate, is_green);
1265 D_RATE("LQ: Switch to new mcs %X idx is green %X\n", tbl->current_rate,
1266 is_green);
1267 return 0;
1268}
1269
1270/*
1271 * Try to switch to new modulation mode from legacy
1272 */
1273static int
1274il4965_rs_move_legacy_other(struct il_priv *il, struct il_lq_sta *lq_sta,
1275 struct ieee80211_conf *conf,
1276 struct ieee80211_sta *sta, int idx)
1277{
1278 struct il_scale_tbl_info *tbl = &(lq_sta->lq_info[lq_sta->active_tbl]);
1279 struct il_scale_tbl_info *search_tbl =
1280 &(lq_sta->lq_info[(1 - lq_sta->active_tbl)]);
1281 struct il_rate_scale_data *win = &(tbl->win[idx]);
1282 u32 sz =
1283 (sizeof(struct il_scale_tbl_info) -
1284 (sizeof(struct il_rate_scale_data) * RATE_COUNT));
1285 u8 start_action;
1286 u8 valid_tx_ant = il->hw_params.valid_tx_ant;
1287 u8 tx_chains_num = il->hw_params.tx_chains_num;
1288 int ret = 0;
1289 u8 update_search_tbl_counter = 0;
1290
1291 tbl->action = IL_LEGACY_SWITCH_SISO;
1292
1293 start_action = tbl->action;
1294 for (;;) {
1295 lq_sta->action_counter++;
1296 switch (tbl->action) {
1297 case IL_LEGACY_SWITCH_ANTENNA1:
1298 case IL_LEGACY_SWITCH_ANTENNA2:
1299 D_RATE("LQ: Legacy toggle Antenna\n");
1300
1301 if ((tbl->action == IL_LEGACY_SWITCH_ANTENNA1 &&
1302 tx_chains_num <= 1) ||
1303 (tbl->action == IL_LEGACY_SWITCH_ANTENNA2 &&
1304 tx_chains_num <= 2))
1305 break;
1306
1307 /* Don't change antenna if success has been great */
1308 if (win->success_ratio >= IL_RS_GOOD_RATIO)
1309 break;
1310
1311 /* Set up search table to try other antenna */
1312 memcpy(search_tbl, tbl, sz);
1313
1314 if (il4965_rs_toggle_antenna
1315 (valid_tx_ant, &search_tbl->current_rate,
1316 search_tbl)) {
1317 update_search_tbl_counter = 1;
1318 il4965_rs_set_expected_tpt_table(lq_sta,
1319 search_tbl);
1320 goto out;
1321 }
1322 break;
1323 case IL_LEGACY_SWITCH_SISO:
1324 D_RATE("LQ: Legacy switch to SISO\n");
1325
1326 /* Set up search table to try SISO */
1327 memcpy(search_tbl, tbl, sz);
1328 search_tbl->is_SGI = 0;
1329 ret =
1330 il4965_rs_switch_to_siso(il, lq_sta, conf, sta,
1331 search_tbl, idx);
1332 if (!ret) {
1333 lq_sta->action_counter = 0;
1334 goto out;
1335 }
1336
1337 break;
1338 case IL_LEGACY_SWITCH_MIMO2_AB:
1339 case IL_LEGACY_SWITCH_MIMO2_AC:
1340 case IL_LEGACY_SWITCH_MIMO2_BC:
1341 D_RATE("LQ: Legacy switch to MIMO2\n");
1342
1343 /* Set up search table to try MIMO */
1344 memcpy(search_tbl, tbl, sz);
1345 search_tbl->is_SGI = 0;
1346
1347 if (tbl->action == IL_LEGACY_SWITCH_MIMO2_AB)
1348 search_tbl->ant_type = ANT_AB;
1349 else if (tbl->action == IL_LEGACY_SWITCH_MIMO2_AC)
1350 search_tbl->ant_type = ANT_AC;
1351 else
1352 search_tbl->ant_type = ANT_BC;
1353
1354 if (!il4965_rs_is_valid_ant
1355 (valid_tx_ant, search_tbl->ant_type))
1356 break;
1357
1358 ret =
1359 il4965_rs_switch_to_mimo2(il, lq_sta, conf, sta,
1360 search_tbl, idx);
1361 if (!ret) {
1362 lq_sta->action_counter = 0;
1363 goto out;
1364 }
1365 break;
1366 }
1367 tbl->action++;
1368 if (tbl->action > IL_LEGACY_SWITCH_MIMO2_BC)
1369 tbl->action = IL_LEGACY_SWITCH_ANTENNA1;
1370
1371 if (tbl->action == start_action)
1372 break;
1373
1374 }
1375 search_tbl->lq_type = LQ_NONE;
1376 return 0;
1377
1378out:
1379 lq_sta->search_better_tbl = 1;
1380 tbl->action++;
1381 if (tbl->action > IL_LEGACY_SWITCH_MIMO2_BC)
1382 tbl->action = IL_LEGACY_SWITCH_ANTENNA1;
1383 if (update_search_tbl_counter)
1384 search_tbl->action = tbl->action;
1385 return 0;
1386
1387}
1388
1389/*
1390 * Try to switch to new modulation mode from SISO
1391 */
1392static int
1393il4965_rs_move_siso_to_other(struct il_priv *il, struct il_lq_sta *lq_sta,
1394 struct ieee80211_conf *conf,
1395 struct ieee80211_sta *sta, int idx)
1396{
1397 u8 is_green = lq_sta->is_green;
1398 struct il_scale_tbl_info *tbl = &(lq_sta->lq_info[lq_sta->active_tbl]);
1399 struct il_scale_tbl_info *search_tbl =
1400 &(lq_sta->lq_info[(1 - lq_sta->active_tbl)]);
1401 struct il_rate_scale_data *win = &(tbl->win[idx]);
1402 struct ieee80211_sta_ht_cap *ht_cap = &sta->ht_cap;
1403 u32 sz =
1404 (sizeof(struct il_scale_tbl_info) -
1405 (sizeof(struct il_rate_scale_data) * RATE_COUNT));
1406 u8 start_action;
1407 u8 valid_tx_ant = il->hw_params.valid_tx_ant;
1408 u8 tx_chains_num = il->hw_params.tx_chains_num;
1409 u8 update_search_tbl_counter = 0;
1410 int ret;
1411
1412 start_action = tbl->action;
1413
1414 for (;;) {
1415 lq_sta->action_counter++;
1416 switch (tbl->action) {
1417 case IL_SISO_SWITCH_ANTENNA1:
1418 case IL_SISO_SWITCH_ANTENNA2:
1419 D_RATE("LQ: SISO toggle Antenna\n");
1420 if ((tbl->action == IL_SISO_SWITCH_ANTENNA1 &&
1421 tx_chains_num <= 1) ||
1422 (tbl->action == IL_SISO_SWITCH_ANTENNA2 &&
1423 tx_chains_num <= 2))
1424 break;
1425
1426 if (win->success_ratio >= IL_RS_GOOD_RATIO)
1427 break;
1428
1429 memcpy(search_tbl, tbl, sz);
1430 if (il4965_rs_toggle_antenna
1431 (valid_tx_ant, &search_tbl->current_rate,
1432 search_tbl)) {
1433 update_search_tbl_counter = 1;
1434 goto out;
1435 }
1436 break;
1437 case IL_SISO_SWITCH_MIMO2_AB:
1438 case IL_SISO_SWITCH_MIMO2_AC:
1439 case IL_SISO_SWITCH_MIMO2_BC:
1440 D_RATE("LQ: SISO switch to MIMO2\n");
1441 memcpy(search_tbl, tbl, sz);
1442 search_tbl->is_SGI = 0;
1443
1444 if (tbl->action == IL_SISO_SWITCH_MIMO2_AB)
1445 search_tbl->ant_type = ANT_AB;
1446 else if (tbl->action == IL_SISO_SWITCH_MIMO2_AC)
1447 search_tbl->ant_type = ANT_AC;
1448 else
1449 search_tbl->ant_type = ANT_BC;
1450
1451 if (!il4965_rs_is_valid_ant
1452 (valid_tx_ant, search_tbl->ant_type))
1453 break;
1454
1455 ret =
1456 il4965_rs_switch_to_mimo2(il, lq_sta, conf, sta,
1457 search_tbl, idx);
1458 if (!ret)
1459 goto out;
1460 break;
1461 case IL_SISO_SWITCH_GI:
1462 if (!tbl->is_ht40 &&
1463 !(ht_cap->cap & IEEE80211_HT_CAP_SGI_20))
1464 break;
1465 if (tbl->is_ht40 &&
1466 !(ht_cap->cap & IEEE80211_HT_CAP_SGI_40))
1467 break;
1468
1469 D_RATE("LQ: SISO toggle SGI/NGI\n");
1470
1471 memcpy(search_tbl, tbl, sz);
1472 if (is_green) {
1473 if (!tbl->is_SGI)
1474 break;
1475 else
1476 IL_ERR("SGI was set in GF+SISO\n");
1477 }
1478 search_tbl->is_SGI = !tbl->is_SGI;
1479 il4965_rs_set_expected_tpt_table(lq_sta, search_tbl);
1480 if (tbl->is_SGI) {
1481 s32 tpt = lq_sta->last_tpt / 100;
1482 if (tpt >= search_tbl->expected_tpt[idx])
1483 break;
1484 }
1485 search_tbl->current_rate =
1486 il4965_rate_n_flags_from_tbl(il, search_tbl, idx,
1487 is_green);
1488 update_search_tbl_counter = 1;
1489 goto out;
1490 }
1491 tbl->action++;
1492 if (tbl->action > IL_SISO_SWITCH_GI)
1493 tbl->action = IL_SISO_SWITCH_ANTENNA1;
1494
1495 if (tbl->action == start_action)
1496 break;
1497 }
1498 search_tbl->lq_type = LQ_NONE;
1499 return 0;
1500
1501out:
1502 lq_sta->search_better_tbl = 1;
1503 tbl->action++;
1504 if (tbl->action > IL_SISO_SWITCH_GI)
1505 tbl->action = IL_SISO_SWITCH_ANTENNA1;
1506 if (update_search_tbl_counter)
1507 search_tbl->action = tbl->action;
1508
1509 return 0;
1510}
1511
1512/*
1513 * Try to switch to new modulation mode from MIMO2
1514 */
1515static int
1516il4965_rs_move_mimo2_to_other(struct il_priv *il, struct il_lq_sta *lq_sta,
1517 struct ieee80211_conf *conf,
1518 struct ieee80211_sta *sta, int idx)
1519{
1520 s8 is_green = lq_sta->is_green;
1521 struct il_scale_tbl_info *tbl = &(lq_sta->lq_info[lq_sta->active_tbl]);
1522 struct il_scale_tbl_info *search_tbl =
1523 &(lq_sta->lq_info[(1 - lq_sta->active_tbl)]);
1524 struct il_rate_scale_data *win = &(tbl->win[idx]);
1525 struct ieee80211_sta_ht_cap *ht_cap = &sta->ht_cap;
1526 u32 sz =
1527 (sizeof(struct il_scale_tbl_info) -
1528 (sizeof(struct il_rate_scale_data) * RATE_COUNT));
1529 u8 start_action;
1530 u8 valid_tx_ant = il->hw_params.valid_tx_ant;
1531 u8 tx_chains_num = il->hw_params.tx_chains_num;
1532 u8 update_search_tbl_counter = 0;
1533 int ret;
1534
1535 start_action = tbl->action;
1536 for (;;) {
1537 lq_sta->action_counter++;
1538 switch (tbl->action) {
1539 case IL_MIMO2_SWITCH_ANTENNA1:
1540 case IL_MIMO2_SWITCH_ANTENNA2:
1541 D_RATE("LQ: MIMO2 toggle Antennas\n");
1542
1543 if (tx_chains_num <= 2)
1544 break;
1545
1546 if (win->success_ratio >= IL_RS_GOOD_RATIO)
1547 break;
1548
1549 memcpy(search_tbl, tbl, sz);
1550 if (il4965_rs_toggle_antenna
1551 (valid_tx_ant, &search_tbl->current_rate,
1552 search_tbl)) {
1553 update_search_tbl_counter = 1;
1554 goto out;
1555 }
1556 break;
1557 case IL_MIMO2_SWITCH_SISO_A:
1558 case IL_MIMO2_SWITCH_SISO_B:
1559 case IL_MIMO2_SWITCH_SISO_C:
1560 D_RATE("LQ: MIMO2 switch to SISO\n");
1561
1562 /* Set up new search table for SISO */
1563 memcpy(search_tbl, tbl, sz);
1564
1565 if (tbl->action == IL_MIMO2_SWITCH_SISO_A)
1566 search_tbl->ant_type = ANT_A;
1567 else if (tbl->action == IL_MIMO2_SWITCH_SISO_B)
1568 search_tbl->ant_type = ANT_B;
1569 else
1570 search_tbl->ant_type = ANT_C;
1571
1572 if (!il4965_rs_is_valid_ant
1573 (valid_tx_ant, search_tbl->ant_type))
1574 break;
1575
1576 ret =
1577 il4965_rs_switch_to_siso(il, lq_sta, conf, sta,
1578 search_tbl, idx);
1579 if (!ret)
1580 goto out;
1581
1582 break;
1583
1584 case IL_MIMO2_SWITCH_GI:
1585 if (!tbl->is_ht40 &&
1586 !(ht_cap->cap & IEEE80211_HT_CAP_SGI_20))
1587 break;
1588 if (tbl->is_ht40 &&
1589 !(ht_cap->cap & IEEE80211_HT_CAP_SGI_40))
1590 break;
1591
1592 D_RATE("LQ: MIMO2 toggle SGI/NGI\n");
1593
1594 /* Set up new search table for MIMO2 */
1595 memcpy(search_tbl, tbl, sz);
1596 search_tbl->is_SGI = !tbl->is_SGI;
1597 il4965_rs_set_expected_tpt_table(lq_sta, search_tbl);
1598 /*
1599 * If active table already uses the fastest possible
1600 * modulation (dual stream with short guard interval),
1601 * and it's working well, there's no need to look
1602 * for a better type of modulation!
1603 */
1604 if (tbl->is_SGI) {
1605 s32 tpt = lq_sta->last_tpt / 100;
1606 if (tpt >= search_tbl->expected_tpt[idx])
1607 break;
1608 }
1609 search_tbl->current_rate =
1610 il4965_rate_n_flags_from_tbl(il, search_tbl, idx,
1611 is_green);
1612 update_search_tbl_counter = 1;
1613 goto out;
1614
1615 }
1616 tbl->action++;
1617 if (tbl->action > IL_MIMO2_SWITCH_GI)
1618 tbl->action = IL_MIMO2_SWITCH_ANTENNA1;
1619
1620 if (tbl->action == start_action)
1621 break;
1622 }
1623 search_tbl->lq_type = LQ_NONE;
1624 return 0;
1625out:
1626 lq_sta->search_better_tbl = 1;
1627 tbl->action++;
1628 if (tbl->action > IL_MIMO2_SWITCH_GI)
1629 tbl->action = IL_MIMO2_SWITCH_ANTENNA1;
1630 if (update_search_tbl_counter)
1631 search_tbl->action = tbl->action;
1632
1633 return 0;
1634
1635}
1636
1637/*
1638 * Check whether we should continue using same modulation mode, or
1639 * begin search for a new mode, based on:
1640 * 1) # tx successes or failures while using this mode
1641 * 2) # times calling this function
1642 * 3) elapsed time in this mode (not used, for now)
1643 */
1644static void
1645il4965_rs_stay_in_table(struct il_lq_sta *lq_sta, bool force_search)
1646{
1647 struct il_scale_tbl_info *tbl;
1648 int i;
1649 int active_tbl;
1650 int flush_interval_passed = 0;
1651 struct il_priv *il;
1652
1653 il = lq_sta->drv;
1654 active_tbl = lq_sta->active_tbl;
1655
1656 tbl = &(lq_sta->lq_info[active_tbl]);
1657
1658 /* If we've been disallowing search, see if we should now allow it */
1659 if (lq_sta->stay_in_tbl) {
1660
1661 /* Elapsed time using current modulation mode */
1662 if (lq_sta->flush_timer)
1663 flush_interval_passed =
1664 time_after(jiffies,
1665 (unsigned long)(lq_sta->flush_timer +
1666 RATE_SCALE_FLUSH_INTVL));
1667
1668 /*
1669 * Check if we should allow search for new modulation mode.
1670 * If many frames have failed or succeeded, or we've used
1671 * this same modulation for a long time, allow search, and
1672 * reset history stats that keep track of whether we should
1673 * allow a new search. Also (below) reset all bitmaps and
1674 * stats in active history.
1675 */
1676 if (force_search ||
1677 lq_sta->total_failed > lq_sta->max_failure_limit ||
1678 lq_sta->total_success > lq_sta->max_success_limit ||
1679 (!lq_sta->search_better_tbl && lq_sta->flush_timer &&
1680 flush_interval_passed)) {
1681 D_RATE("LQ: stay is expired %d %d %d\n",
1682 lq_sta->total_failed, lq_sta->total_success,
1683 flush_interval_passed);
1684
1685 /* Allow search for new mode */
1686 lq_sta->stay_in_tbl = 0; /* only place reset */
1687 lq_sta->total_failed = 0;
1688 lq_sta->total_success = 0;
1689 lq_sta->flush_timer = 0;
1690
1691 /*
1692 * Else if we've used this modulation mode enough repetitions
1693 * (regardless of elapsed time or success/failure), reset
1694 * history bitmaps and rate-specific stats for all rates in
1695 * active table.
1696 */
1697 } else {
1698 lq_sta->table_count++;
1699 if (lq_sta->table_count >= lq_sta->table_count_limit) {
1700 lq_sta->table_count = 0;
1701
1702 D_RATE("LQ: stay in table clear win\n");
1703 for (i = 0; i < RATE_COUNT; i++)
1704 il4965_rs_rate_scale_clear_win(&
1705 (tbl->
1706 win
1707 [i]));
1708 }
1709 }
1710
1711 /* If transitioning to allow "search", reset all history
1712 * bitmaps and stats in active table (this will become the new
1713 * "search" table). */
1714 if (!lq_sta->stay_in_tbl) {
1715 for (i = 0; i < RATE_COUNT; i++)
1716 il4965_rs_rate_scale_clear_win(&(tbl->win[i]));
1717 }
1718 }
1719}
1720
1721/*
1722 * setup rate table in uCode
1723 */
1724static void
1725il4965_rs_update_rate_tbl(struct il_priv *il, struct il_lq_sta *lq_sta,
1726 struct il_scale_tbl_info *tbl, int idx, u8 is_green)
1727{
1728 u32 rate;
1729
1730 /* Update uCode's rate table. */
1731 rate = il4965_rate_n_flags_from_tbl(il, tbl, idx, is_green);
1732 il4965_rs_fill_link_cmd(il, lq_sta, rate);
1733 il_send_lq_cmd(il, &lq_sta->lq, CMD_ASYNC, false);
1734}
1735
1736/*
1737 * Do rate scaling and search for new modulation mode.
1738 */
1739static void
1740il4965_rs_rate_scale_perform(struct il_priv *il, struct sk_buff *skb,
1741 struct ieee80211_sta *sta,
1742 struct il_lq_sta *lq_sta)
1743{
1744 struct ieee80211_hw *hw = il->hw;
1745 struct ieee80211_conf *conf = &hw->conf;
1746 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
1747 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
1748 int low = RATE_INVALID;
1749 int high = RATE_INVALID;
1750 int idx;
1751 int i;
1752 struct il_rate_scale_data *win = NULL;
1753 int current_tpt = IL_INVALID_VALUE;
1754 int low_tpt = IL_INVALID_VALUE;
1755 int high_tpt = IL_INVALID_VALUE;
1756 u32 fail_count;
1757 s8 scale_action = 0;
1758 u16 rate_mask;
1759 u8 update_lq = 0;
1760 struct il_scale_tbl_info *tbl, *tbl1;
1761 u16 rate_scale_idx_msk = 0;
1762 u8 is_green = 0;
1763 u8 active_tbl = 0;
1764 u8 done_search = 0;
1765 u16 high_low;
1766 s32 sr;
1767 u8 tid = MAX_TID_COUNT;
1768 struct il_tid_data *tid_data;
1769
1770 D_RATE("rate scale calculate new rate for skb\n");
1771
1772 /* Send management frames and NO_ACK data using lowest rate. */
1773 /* TODO: this could probably be improved.. */
1774 if (!ieee80211_is_data(hdr->frame_control) ||
1775 (info->flags & IEEE80211_TX_CTL_NO_ACK))
1776 return;
1777
1778 lq_sta->supp_rates = sta->supp_rates[lq_sta->band];
1779
1780 tid = il4965_rs_tl_add_packet(lq_sta, hdr);
1781 if (tid != MAX_TID_COUNT && (lq_sta->tx_agg_tid_en & (1 << tid))) {
1782 tid_data = &il->stations[lq_sta->lq.sta_id].tid[tid];
1783 if (tid_data->agg.state == IL_AGG_OFF)
1784 lq_sta->is_agg = 0;
1785 else
1786 lq_sta->is_agg = 1;
1787 } else
1788 lq_sta->is_agg = 0;
1789
1790 /*
1791 * Select rate-scale / modulation-mode table to work with in
1792 * the rest of this function: "search" if searching for better
1793 * modulation mode, or "active" if doing rate scaling within a mode.
1794 */
1795 if (!lq_sta->search_better_tbl)
1796 active_tbl = lq_sta->active_tbl;
1797 else
1798 active_tbl = 1 - lq_sta->active_tbl;
1799
1800 tbl = &(lq_sta->lq_info[active_tbl]);
1801 if (is_legacy(tbl->lq_type))
1802 lq_sta->is_green = 0;
1803 else
1804 lq_sta->is_green = il4965_rs_use_green(il, sta);
1805 is_green = lq_sta->is_green;
1806
1807 /* current tx rate */
1808 idx = lq_sta->last_txrate_idx;
1809
1810 D_RATE("Rate scale idx %d for type %d\n", idx, tbl->lq_type);
1811
1812 /* rates available for this association, and for modulation mode */
1813 rate_mask = il4965_rs_get_supported_rates(lq_sta, hdr, tbl->lq_type);
1814
1815 D_RATE("mask 0x%04X\n", rate_mask);
1816
1817 /* mask with station rate restriction */
1818 if (is_legacy(tbl->lq_type)) {
1819 if (lq_sta->band == IEEE80211_BAND_5GHZ)
1820 /* supp_rates has no CCK bits in A mode */
1821 rate_scale_idx_msk =
1822 (u16) (rate_mask &
1823 (lq_sta->supp_rates << IL_FIRST_OFDM_RATE));
1824 else
1825 rate_scale_idx_msk =
1826 (u16) (rate_mask & lq_sta->supp_rates);
1827
1828 } else
1829 rate_scale_idx_msk = rate_mask;
1830
1831 if (!rate_scale_idx_msk)
1832 rate_scale_idx_msk = rate_mask;
1833
1834 if (!((1 << idx) & rate_scale_idx_msk)) {
1835 IL_ERR("Current Rate is not valid\n");
1836 if (lq_sta->search_better_tbl) {
1837 /* revert to active table if search table is not valid */
1838 tbl->lq_type = LQ_NONE;
1839 lq_sta->search_better_tbl = 0;
1840 tbl = &(lq_sta->lq_info[lq_sta->active_tbl]);
1841 /* get "active" rate info */
1842 idx = il4965_hwrate_to_plcp_idx(tbl->current_rate);
1843 il4965_rs_update_rate_tbl(il, lq_sta, tbl, idx,
1844 is_green);
1845 }
1846 return;
1847 }
1848
1849 /* Get expected throughput table and history win for current rate */
1850 if (!tbl->expected_tpt) {
1851 IL_ERR("tbl->expected_tpt is NULL\n");
1852 return;
1853 }
1854
1855 /* force user max rate if set by user */
1856 if (lq_sta->max_rate_idx != -1 && lq_sta->max_rate_idx < idx) {
1857 idx = lq_sta->max_rate_idx;
1858 update_lq = 1;
1859 win = &(tbl->win[idx]);
1860 goto lq_update;
1861 }
1862
1863 win = &(tbl->win[idx]);
1864
1865 /*
1866 * If there is not enough history to calculate actual average
1867 * throughput, keep analyzing results of more tx frames, without
1868 * changing rate or mode (bypass most of the rest of this function).
1869 * Set up new rate table in uCode only if old rate is not supported
1870 * in current association (use new rate found above).
1871 */
1872 fail_count = win->counter - win->success_counter;
1873 if (fail_count < RATE_MIN_FAILURE_TH &&
1874 win->success_counter < RATE_MIN_SUCCESS_TH) {
1875 D_RATE("LQ: still below TH. succ=%d total=%d " "for idx %d\n",
1876 win->success_counter, win->counter, idx);
1877
1878 /* Can't calculate this yet; not enough history */
1879 win->average_tpt = IL_INVALID_VALUE;
1880
1881 /* Should we stay with this modulation mode,
1882 * or search for a new one? */
1883 il4965_rs_stay_in_table(lq_sta, false);
1884
1885 goto out;
1886 }
1887 /* Else we have enough samples; calculate estimate of
1888 * actual average throughput */
1889 if (win->average_tpt !=
1890 ((win->success_ratio * tbl->expected_tpt[idx] + 64) / 128)) {
1891 IL_ERR("expected_tpt should have been calculated by now\n");
1892 win->average_tpt =
1893 ((win->success_ratio * tbl->expected_tpt[idx] + 64) / 128);
1894 }
1895
1896 /* If we are searching for better modulation mode, check success. */
1897 if (lq_sta->search_better_tbl) {
1898 /* If good success, continue using the "search" mode;
1899 * no need to send new link quality command, since we're
1900 * continuing to use the setup that we've been trying. */
1901 if (win->average_tpt > lq_sta->last_tpt) {
1902
1903 D_RATE("LQ: SWITCHING TO NEW TBL "
1904 "suc=%d cur-tpt=%d old-tpt=%d\n",
1905 win->success_ratio, win->average_tpt,
1906 lq_sta->last_tpt);
1907
1908 if (!is_legacy(tbl->lq_type))
1909 lq_sta->enable_counter = 1;
1910
1911 /* Swap tables; "search" becomes "active" */
1912 lq_sta->active_tbl = active_tbl;
1913 current_tpt = win->average_tpt;
1914
1915 /* Else poor success; go back to mode in "active" table */
1916 } else {
1917
1918 D_RATE("LQ: GOING BACK TO THE OLD TBL "
1919 "suc=%d cur-tpt=%d old-tpt=%d\n",
1920 win->success_ratio, win->average_tpt,
1921 lq_sta->last_tpt);
1922
1923 /* Nullify "search" table */
1924 tbl->lq_type = LQ_NONE;
1925
1926 /* Revert to "active" table */
1927 active_tbl = lq_sta->active_tbl;
1928 tbl = &(lq_sta->lq_info[active_tbl]);
1929
1930 /* Revert to "active" rate and throughput info */
1931 idx = il4965_hwrate_to_plcp_idx(tbl->current_rate);
1932 current_tpt = lq_sta->last_tpt;
1933
1934 /* Need to set up a new rate table in uCode */
1935 update_lq = 1;
1936 }
1937
1938 /* Either way, we've made a decision; modulation mode
1939 * search is done, allow rate adjustment next time. */
1940 lq_sta->search_better_tbl = 0;
1941 done_search = 1; /* Don't switch modes below! */
1942 goto lq_update;
1943 }
1944
1945 /* (Else) not in search of better modulation mode, try for better
1946 * starting rate, while staying in this mode. */
1947 high_low =
1948 il4965_rs_get_adjacent_rate(il, idx, rate_scale_idx_msk,
1949 tbl->lq_type);
1950 low = high_low & 0xff;
1951 high = (high_low >> 8) & 0xff;
1952
1953 /* If user set max rate, dont allow higher than user constrain */
1954 if (lq_sta->max_rate_idx != -1 && lq_sta->max_rate_idx < high)
1955 high = RATE_INVALID;
1956
1957 sr = win->success_ratio;
1958
1959 /* Collect measured throughputs for current and adjacent rates */
1960 current_tpt = win->average_tpt;
1961 if (low != RATE_INVALID)
1962 low_tpt = tbl->win[low].average_tpt;
1963 if (high != RATE_INVALID)
1964 high_tpt = tbl->win[high].average_tpt;
1965
1966 scale_action = 0;
1967
1968 /* Too many failures, decrease rate */
1969 if (sr <= RATE_DECREASE_TH || current_tpt == 0) {
1970 D_RATE("decrease rate because of low success_ratio\n");
1971 scale_action = -1;
1972
1973 /* No throughput measured yet for adjacent rates; try increase. */
1974 } else if (low_tpt == IL_INVALID_VALUE && high_tpt == IL_INVALID_VALUE) {
1975
1976 if (high != RATE_INVALID && sr >= RATE_INCREASE_TH)
1977 scale_action = 1;
1978 else if (low != RATE_INVALID)
1979 scale_action = 0;
1980 }
1981
1982 /* Both adjacent throughputs are measured, but neither one has better
1983 * throughput; we're using the best rate, don't change it! */
1984 else if (low_tpt != IL_INVALID_VALUE && high_tpt != IL_INVALID_VALUE &&
1985 low_tpt < current_tpt && high_tpt < current_tpt)
1986 scale_action = 0;
1987
1988 /* At least one adjacent rate's throughput is measured,
1989 * and may have better performance. */
1990 else {
1991 /* Higher adjacent rate's throughput is measured */
1992 if (high_tpt != IL_INVALID_VALUE) {
1993 /* Higher rate has better throughput */
1994 if (high_tpt > current_tpt && sr >= RATE_INCREASE_TH)
1995 scale_action = 1;
1996 else
1997 scale_action = 0;
1998
1999 /* Lower adjacent rate's throughput is measured */
2000 } else if (low_tpt != IL_INVALID_VALUE) {
2001 /* Lower rate has better throughput */
2002 if (low_tpt > current_tpt) {
2003 D_RATE("decrease rate because of low tpt\n");
2004 scale_action = -1;
2005 } else if (sr >= RATE_INCREASE_TH) {
2006 scale_action = 1;
2007 }
2008 }
2009 }
2010
2011 /* Sanity check; asked for decrease, but success rate or throughput
2012 * has been good at old rate. Don't change it. */
2013 if (scale_action == -1 && low != RATE_INVALID &&
2014 (sr > RATE_HIGH_TH || current_tpt > 100 * tbl->expected_tpt[low]))
2015 scale_action = 0;
2016
2017 switch (scale_action) {
2018 case -1:
2019 /* Decrease starting rate, update uCode's rate table */
2020 if (low != RATE_INVALID) {
2021 update_lq = 1;
2022 idx = low;
2023 }
2024
2025 break;
2026 case 1:
2027 /* Increase starting rate, update uCode's rate table */
2028 if (high != RATE_INVALID) {
2029 update_lq = 1;
2030 idx = high;
2031 }
2032
2033 break;
2034 case 0:
2035 /* No change */
2036 default:
2037 break;
2038 }
2039
2040 D_RATE("choose rate scale idx %d action %d low %d " "high %d type %d\n",
2041 idx, scale_action, low, high, tbl->lq_type);
2042
2043lq_update:
2044 /* Replace uCode's rate table for the destination station. */
2045 if (update_lq)
2046 il4965_rs_update_rate_tbl(il, lq_sta, tbl, idx, is_green);
2047
2048 /* Should we stay with this modulation mode,
2049 * or search for a new one? */
2050 il4965_rs_stay_in_table(lq_sta, false);
2051
2052 /*
2053 * Search for new modulation mode if we're:
2054 * 1) Not changing rates right now
2055 * 2) Not just finishing up a search
2056 * 3) Allowing a new search
2057 */
2058 if (!update_lq && !done_search && !lq_sta->stay_in_tbl && win->counter) {
2059 /* Save current throughput to compare with "search" throughput */
2060 lq_sta->last_tpt = current_tpt;
2061
2062 /* Select a new "search" modulation mode to try.
2063 * If one is found, set up the new "search" table. */
2064 if (is_legacy(tbl->lq_type))
2065 il4965_rs_move_legacy_other(il, lq_sta, conf, sta, idx);
2066 else if (is_siso(tbl->lq_type))
2067 il4965_rs_move_siso_to_other(il, lq_sta, conf, sta,
2068 idx);
2069 else /* (is_mimo2(tbl->lq_type)) */
2070 il4965_rs_move_mimo2_to_other(il, lq_sta, conf, sta,
2071 idx);
2072
2073 /* If new "search" mode was selected, set up in uCode table */
2074 if (lq_sta->search_better_tbl) {
2075 /* Access the "search" table, clear its history. */
2076 tbl = &(lq_sta->lq_info[(1 - lq_sta->active_tbl)]);
2077 for (i = 0; i < RATE_COUNT; i++)
2078 il4965_rs_rate_scale_clear_win(&(tbl->win[i]));
2079
2080 /* Use new "search" start rate */
2081 idx = il4965_hwrate_to_plcp_idx(tbl->current_rate);
2082
2083 D_RATE("Switch current mcs: %X idx: %d\n",
2084 tbl->current_rate, idx);
2085 il4965_rs_fill_link_cmd(il, lq_sta, tbl->current_rate);
2086 il_send_lq_cmd(il, &lq_sta->lq, CMD_ASYNC, false);
2087 } else
2088 done_search = 1;
2089 }
2090
2091 if (done_search && !lq_sta->stay_in_tbl) {
2092 /* If the "active" (non-search) mode was legacy,
2093 * and we've tried switching antennas,
2094 * but we haven't been able to try HT modes (not available),
2095 * stay with best antenna legacy modulation for a while
2096 * before next round of mode comparisons. */
2097 tbl1 = &(lq_sta->lq_info[lq_sta->active_tbl]);
2098 if (is_legacy(tbl1->lq_type) && !conf_is_ht(conf) &&
2099 lq_sta->action_counter > tbl1->max_search) {
2100 D_RATE("LQ: STAY in legacy table\n");
2101 il4965_rs_set_stay_in_table(il, 1, lq_sta);
2102 }
2103
2104 /* If we're in an HT mode, and all 3 mode switch actions
2105 * have been tried and compared, stay in this best modulation
2106 * mode for a while before next round of mode comparisons. */
2107 if (lq_sta->enable_counter &&
2108 lq_sta->action_counter >= tbl1->max_search) {
2109 if (lq_sta->last_tpt > IL_AGG_TPT_THREHOLD &&
2110 (lq_sta->tx_agg_tid_en & (1 << tid)) &&
2111 tid != MAX_TID_COUNT) {
2112 tid_data =
2113 &il->stations[lq_sta->lq.sta_id].tid[tid];
2114 if (tid_data->agg.state == IL_AGG_OFF) {
2115 D_RATE("try to aggregate tid %d\n",
2116 tid);
2117 il4965_rs_tl_turn_on_agg(il, tid,
2118 lq_sta, sta);
2119 }
2120 }
2121 il4965_rs_set_stay_in_table(il, 0, lq_sta);
2122 }
2123 }
2124
2125out:
2126 tbl->current_rate =
2127 il4965_rate_n_flags_from_tbl(il, tbl, idx, is_green);
2128 i = idx;
2129 lq_sta->last_txrate_idx = i;
2130}
2131
2132/**
2133 * il4965_rs_initialize_lq - Initialize a station's hardware rate table
2134 *
2135 * The uCode's station table contains a table of fallback rates
2136 * for automatic fallback during transmission.
2137 *
2138 * NOTE: This sets up a default set of values. These will be replaced later
2139 * if the driver's iwl-4965-rs rate scaling algorithm is used, instead of
2140 * rc80211_simple.
2141 *
2142 * NOTE: Run C_ADD_STA command to set up station table entry, before
2143 * calling this function (which runs C_TX_LINK_QUALITY_CMD,
2144 * which requires station table entry to exist).
2145 */
2146static void
2147il4965_rs_initialize_lq(struct il_priv *il, struct ieee80211_conf *conf,
2148 struct ieee80211_sta *sta, struct il_lq_sta *lq_sta)
2149{
2150 struct il_scale_tbl_info *tbl;
2151 int rate_idx;
2152 int i;
2153 u32 rate;
2154 u8 use_green;
2155 u8 active_tbl = 0;
2156 u8 valid_tx_ant;
2157 struct il_station_priv *sta_priv;
2158
2159 if (!sta || !lq_sta)
2160 return;
2161
2162 use_green = il4965_rs_use_green(il, sta);
2163 sta_priv = (void *)sta->drv_priv;
2164
2165 i = lq_sta->last_txrate_idx;
2166
2167 valid_tx_ant = il->hw_params.valid_tx_ant;
2168
2169 if (!lq_sta->search_better_tbl)
2170 active_tbl = lq_sta->active_tbl;
2171 else
2172 active_tbl = 1 - lq_sta->active_tbl;
2173
2174 tbl = &(lq_sta->lq_info[active_tbl]);
2175
2176 if (i < 0 || i >= RATE_COUNT)
2177 i = 0;
2178
2179 rate = il_rates[i].plcp;
2180 tbl->ant_type = il4965_first_antenna(valid_tx_ant);
2181 rate |= tbl->ant_type << RATE_MCS_ANT_POS;
2182
2183 if (i >= IL_FIRST_CCK_RATE && i <= IL_LAST_CCK_RATE)
2184 rate |= RATE_MCS_CCK_MSK;
2185
2186 il4965_rs_get_tbl_info_from_mcs(rate, il->band, tbl, &rate_idx);
2187 if (!il4965_rs_is_valid_ant(valid_tx_ant, tbl->ant_type))
2188 il4965_rs_toggle_antenna(valid_tx_ant, &rate, tbl);
2189
2190 rate = il4965_rate_n_flags_from_tbl(il, tbl, rate_idx, use_green);
2191 tbl->current_rate = rate;
2192 il4965_rs_set_expected_tpt_table(lq_sta, tbl);
2193 il4965_rs_fill_link_cmd(NULL, lq_sta, rate);
2194 il->stations[lq_sta->lq.sta_id].lq = &lq_sta->lq;
2195 il_send_lq_cmd(il, &lq_sta->lq, CMD_SYNC, true);
2196}
2197
2198static void
2199il4965_rs_get_rate(void *il_r, struct ieee80211_sta *sta, void *il_sta,
2200 struct ieee80211_tx_rate_control *txrc)
2201{
2202
2203 struct sk_buff *skb = txrc->skb;
2204 struct ieee80211_supported_band *sband = txrc->sband;
2205 struct il_priv *il __maybe_unused = (struct il_priv *)il_r;
2206 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
2207 struct il_lq_sta *lq_sta = il_sta;
2208 int rate_idx;
2209
2210 D_RATE("rate scale calculate new rate for skb\n");
2211
2212 /* Get max rate if user set max rate */
2213 if (lq_sta) {
2214 lq_sta->max_rate_idx = txrc->max_rate_idx;
2215 if (sband->band == IEEE80211_BAND_5GHZ &&
2216 lq_sta->max_rate_idx != -1)
2217 lq_sta->max_rate_idx += IL_FIRST_OFDM_RATE;
2218 if (lq_sta->max_rate_idx < 0 ||
2219 lq_sta->max_rate_idx >= RATE_COUNT)
2220 lq_sta->max_rate_idx = -1;
2221 }
2222
2223 /* Treat uninitialized rate scaling data same as non-existing. */
2224 if (lq_sta && !lq_sta->drv) {
2225 D_RATE("Rate scaling not initialized yet.\n");
2226 il_sta = NULL;
2227 }
2228
2229 /* Send management frames and NO_ACK data using lowest rate. */
2230 if (rate_control_send_low(sta, il_sta, txrc))
2231 return;
2232
2233 if (!lq_sta)
2234 return;
2235
2236 rate_idx = lq_sta->last_txrate_idx;
2237
2238 if (lq_sta->last_rate_n_flags & RATE_MCS_HT_MSK) {
2239 rate_idx -= IL_FIRST_OFDM_RATE;
2240 /* 6M and 9M shared same MCS idx */
2241 rate_idx = (rate_idx > 0) ? (rate_idx - 1) : 0;
2242 if (il4965_rs_extract_rate(lq_sta->last_rate_n_flags) >=
2243 RATE_MIMO2_6M_PLCP)
2244 rate_idx = rate_idx + MCS_IDX_PER_STREAM;
2245 info->control.rates[0].flags = IEEE80211_TX_RC_MCS;
2246 if (lq_sta->last_rate_n_flags & RATE_MCS_SGI_MSK)
2247 info->control.rates[0].flags |=
2248 IEEE80211_TX_RC_SHORT_GI;
2249 if (lq_sta->last_rate_n_flags & RATE_MCS_DUP_MSK)
2250 info->control.rates[0].flags |=
2251 IEEE80211_TX_RC_DUP_DATA;
2252 if (lq_sta->last_rate_n_flags & RATE_MCS_HT40_MSK)
2253 info->control.rates[0].flags |=
2254 IEEE80211_TX_RC_40_MHZ_WIDTH;
2255 if (lq_sta->last_rate_n_flags & RATE_MCS_GF_MSK)
2256 info->control.rates[0].flags |=
2257 IEEE80211_TX_RC_GREEN_FIELD;
2258 } else {
2259 /* Check for invalid rates */
2260 if (rate_idx < 0 || rate_idx >= RATE_COUNT_LEGACY ||
2261 (sband->band == IEEE80211_BAND_5GHZ &&
2262 rate_idx < IL_FIRST_OFDM_RATE))
2263 rate_idx = rate_lowest_index(sband, sta);
2264 /* On valid 5 GHz rate, adjust idx */
2265 else if (sband->band == IEEE80211_BAND_5GHZ)
2266 rate_idx -= IL_FIRST_OFDM_RATE;
2267 info->control.rates[0].flags = 0;
2268 }
2269 info->control.rates[0].idx = rate_idx;
2270 info->control.rates[0].count = 1;
2271}
2272
2273static void *
2274il4965_rs_alloc_sta(void *il_rate, struct ieee80211_sta *sta, gfp_t gfp)
2275{
2276 struct il_station_priv *sta_priv =
2277 (struct il_station_priv *)sta->drv_priv;
2278 struct il_priv *il;
2279
2280 il = (struct il_priv *)il_rate;
2281 D_RATE("create station rate scale win\n");
2282
2283 return &sta_priv->lq_sta;
2284}
2285
2286/*
2287 * Called after adding a new station to initialize rate scaling
2288 */
2289void
2290il4965_rs_rate_init(struct il_priv *il, struct ieee80211_sta *sta, u8 sta_id)
2291{
2292 int i, j;
2293 struct ieee80211_hw *hw = il->hw;
2294 struct ieee80211_conf *conf = &il->hw->conf;
2295 struct ieee80211_sta_ht_cap *ht_cap = &sta->ht_cap;
2296 struct il_station_priv *sta_priv;
2297 struct il_lq_sta *lq_sta;
2298 struct ieee80211_supported_band *sband;
2299
2300 sta_priv = (struct il_station_priv *)sta->drv_priv;
2301 lq_sta = &sta_priv->lq_sta;
2302 sband = hw->wiphy->bands[conf->chandef.chan->band];
2303
2304 lq_sta->lq.sta_id = sta_id;
2305
2306 for (j = 0; j < LQ_SIZE; j++)
2307 for (i = 0; i < RATE_COUNT; i++)
2308 il4965_rs_rate_scale_clear_win(&lq_sta->lq_info[j].
2309 win[i]);
2310
2311 lq_sta->flush_timer = 0;
2312 lq_sta->supp_rates = sta->supp_rates[sband->band];
2313 for (j = 0; j < LQ_SIZE; j++)
2314 for (i = 0; i < RATE_COUNT; i++)
2315 il4965_rs_rate_scale_clear_win(&lq_sta->lq_info[j].
2316 win[i]);
2317
2318 D_RATE("LQ:" "*** rate scale station global init for station %d ***\n",
2319 sta_id);
2320 /* TODO: what is a good starting rate for STA? About middle? Maybe not
2321 * the lowest or the highest rate.. Could consider using RSSI from
2322 * previous packets? Need to have IEEE 802.1X auth succeed immediately
2323 * after assoc.. */
2324
2325 lq_sta->is_dup = 0;
2326 lq_sta->max_rate_idx = -1;
2327 lq_sta->missed_rate_counter = IL_MISSED_RATE_MAX;
2328 lq_sta->is_green = il4965_rs_use_green(il, sta);
2329 lq_sta->active_legacy_rate = il->active_rate & ~(0x1000);
2330 lq_sta->band = il->band;
2331 /*
2332 * active_siso_rate mask includes 9 MBits (bit 5), and CCK (bits 0-3),
2333 * supp_rates[] does not; shift to convert format, force 9 MBits off.
2334 */
2335 lq_sta->active_siso_rate = ht_cap->mcs.rx_mask[0] << 1;
2336 lq_sta->active_siso_rate |= ht_cap->mcs.rx_mask[0] & 0x1;
2337 lq_sta->active_siso_rate &= ~((u16) 0x2);
2338 lq_sta->active_siso_rate <<= IL_FIRST_OFDM_RATE;
2339
2340 /* Same here */
2341 lq_sta->active_mimo2_rate = ht_cap->mcs.rx_mask[1] << 1;
2342 lq_sta->active_mimo2_rate |= ht_cap->mcs.rx_mask[1] & 0x1;
2343 lq_sta->active_mimo2_rate &= ~((u16) 0x2);
2344 lq_sta->active_mimo2_rate <<= IL_FIRST_OFDM_RATE;
2345
2346 /* These values will be overridden later */
2347 lq_sta->lq.general_params.single_stream_ant_msk =
2348 il4965_first_antenna(il->hw_params.valid_tx_ant);
2349 lq_sta->lq.general_params.dual_stream_ant_msk =
2350 il->hw_params.valid_tx_ant & ~il4965_first_antenna(il->hw_params.
2351 valid_tx_ant);
2352 if (!lq_sta->lq.general_params.dual_stream_ant_msk) {
2353 lq_sta->lq.general_params.dual_stream_ant_msk = ANT_AB;
2354 } else if (il4965_num_of_ant(il->hw_params.valid_tx_ant) == 2) {
2355 lq_sta->lq.general_params.dual_stream_ant_msk =
2356 il->hw_params.valid_tx_ant;
2357 }
2358
2359 /* as default allow aggregation for all tids */
2360 lq_sta->tx_agg_tid_en = IL_AGG_ALL_TID;
2361 lq_sta->drv = il;
2362
2363 /* Set last_txrate_idx to lowest rate */
2364 lq_sta->last_txrate_idx = rate_lowest_index(sband, sta);
2365 if (sband->band == IEEE80211_BAND_5GHZ)
2366 lq_sta->last_txrate_idx += IL_FIRST_OFDM_RATE;
2367 lq_sta->is_agg = 0;
2368
2369#ifdef CONFIG_MAC80211_DEBUGFS
2370 lq_sta->dbg_fixed_rate = 0;
2371#endif
2372
2373 il4965_rs_initialize_lq(il, conf, sta, lq_sta);
2374}
2375
2376static void
2377il4965_rs_fill_link_cmd(struct il_priv *il, struct il_lq_sta *lq_sta,
2378 u32 new_rate)
2379{
2380 struct il_scale_tbl_info tbl_type;
2381 int idx = 0;
2382 int rate_idx;
2383 int repeat_rate = 0;
2384 u8 ant_toggle_cnt = 0;
2385 u8 use_ht_possible = 1;
2386 u8 valid_tx_ant = 0;
2387 struct il_link_quality_cmd *lq_cmd = &lq_sta->lq;
2388
2389 /* Override starting rate (idx 0) if needed for debug purposes */
2390 il4965_rs_dbgfs_set_mcs(lq_sta, &new_rate, idx);
2391
2392 /* Interpret new_rate (rate_n_flags) */
2393 il4965_rs_get_tbl_info_from_mcs(new_rate, lq_sta->band, &tbl_type,
2394 &rate_idx);
2395
2396 /* How many times should we repeat the initial rate? */
2397 if (is_legacy(tbl_type.lq_type)) {
2398 ant_toggle_cnt = 1;
2399 repeat_rate = IL_NUMBER_TRY;
2400 } else {
2401 repeat_rate = IL_HT_NUMBER_TRY;
2402 }
2403
2404 lq_cmd->general_params.mimo_delimiter =
2405 is_mimo(tbl_type.lq_type) ? 1 : 0;
2406
2407 /* Fill 1st table entry (idx 0) */
2408 lq_cmd->rs_table[idx].rate_n_flags = cpu_to_le32(new_rate);
2409
2410 if (il4965_num_of_ant(tbl_type.ant_type) == 1) {
2411 lq_cmd->general_params.single_stream_ant_msk =
2412 tbl_type.ant_type;
2413 } else if (il4965_num_of_ant(tbl_type.ant_type) == 2) {
2414 lq_cmd->general_params.dual_stream_ant_msk = tbl_type.ant_type;
2415 }
2416 /* otherwise we don't modify the existing value */
2417 idx++;
2418 repeat_rate--;
2419 if (il)
2420 valid_tx_ant = il->hw_params.valid_tx_ant;
2421
2422 /* Fill rest of rate table */
2423 while (idx < LINK_QUAL_MAX_RETRY_NUM) {
2424 /* Repeat initial/next rate.
2425 * For legacy IL_NUMBER_TRY == 1, this loop will not execute.
2426 * For HT IL_HT_NUMBER_TRY == 3, this executes twice. */
2427 while (repeat_rate > 0 && idx < LINK_QUAL_MAX_RETRY_NUM) {
2428 if (is_legacy(tbl_type.lq_type)) {
2429 if (ant_toggle_cnt < NUM_TRY_BEFORE_ANT_TOGGLE)
2430 ant_toggle_cnt++;
2431 else if (il &&
2432 il4965_rs_toggle_antenna(valid_tx_ant,
2433 &new_rate,
2434 &tbl_type))
2435 ant_toggle_cnt = 1;
2436 }
2437
2438 /* Override next rate if needed for debug purposes */
2439 il4965_rs_dbgfs_set_mcs(lq_sta, &new_rate, idx);
2440
2441 /* Fill next table entry */
2442 lq_cmd->rs_table[idx].rate_n_flags =
2443 cpu_to_le32(new_rate);
2444 repeat_rate--;
2445 idx++;
2446 }
2447
2448 il4965_rs_get_tbl_info_from_mcs(new_rate, lq_sta->band,
2449 &tbl_type, &rate_idx);
2450
2451 /* Indicate to uCode which entries might be MIMO.
2452 * If initial rate was MIMO, this will finally end up
2453 * as (IL_HT_NUMBER_TRY * 2), after 2nd pass, otherwise 0. */
2454 if (is_mimo(tbl_type.lq_type))
2455 lq_cmd->general_params.mimo_delimiter = idx;
2456
2457 /* Get next rate */
2458 new_rate =
2459 il4965_rs_get_lower_rate(lq_sta, &tbl_type, rate_idx,
2460 use_ht_possible);
2461
2462 /* How many times should we repeat the next rate? */
2463 if (is_legacy(tbl_type.lq_type)) {
2464 if (ant_toggle_cnt < NUM_TRY_BEFORE_ANT_TOGGLE)
2465 ant_toggle_cnt++;
2466 else if (il &&
2467 il4965_rs_toggle_antenna(valid_tx_ant,
2468 &new_rate, &tbl_type))
2469 ant_toggle_cnt = 1;
2470
2471 repeat_rate = IL_NUMBER_TRY;
2472 } else {
2473 repeat_rate = IL_HT_NUMBER_TRY;
2474 }
2475
2476 /* Don't allow HT rates after next pass.
2477 * il4965_rs_get_lower_rate() will change type to LQ_A or LQ_G. */
2478 use_ht_possible = 0;
2479
2480 /* Override next rate if needed for debug purposes */
2481 il4965_rs_dbgfs_set_mcs(lq_sta, &new_rate, idx);
2482
2483 /* Fill next table entry */
2484 lq_cmd->rs_table[idx].rate_n_flags = cpu_to_le32(new_rate);
2485
2486 idx++;
2487 repeat_rate--;
2488 }
2489
2490 lq_cmd->agg_params.agg_frame_cnt_limit = LINK_QUAL_AGG_FRAME_LIMIT_DEF;
2491 lq_cmd->agg_params.agg_dis_start_th = LINK_QUAL_AGG_DISABLE_START_DEF;
2492
2493 lq_cmd->agg_params.agg_time_limit =
2494 cpu_to_le16(LINK_QUAL_AGG_TIME_LIMIT_DEF);
2495}
2496
2497static void *
2498il4965_rs_alloc(struct ieee80211_hw *hw, struct dentry *debugfsdir)
2499{
2500 return hw->priv;
2501}
2502
2503/* rate scale requires free function to be implemented */
2504static void
2505il4965_rs_free(void *il_rate)
2506{
2507 return;
2508}
2509
2510static void
2511il4965_rs_free_sta(void *il_r, struct ieee80211_sta *sta, void *il_sta)
2512{
2513 struct il_priv *il __maybe_unused = il_r;
2514
2515 D_RATE("enter\n");
2516 D_RATE("leave\n");
2517}
2518
2519#ifdef CONFIG_MAC80211_DEBUGFS
2520
2521static void
2522il4965_rs_dbgfs_set_mcs(struct il_lq_sta *lq_sta, u32 * rate_n_flags, int idx)
2523{
2524 struct il_priv *il;
2525 u8 valid_tx_ant;
2526 u8 ant_sel_tx;
2527
2528 il = lq_sta->drv;
2529 valid_tx_ant = il->hw_params.valid_tx_ant;
2530 if (lq_sta->dbg_fixed_rate) {
2531 ant_sel_tx =
2532 ((lq_sta->
2533 dbg_fixed_rate & RATE_MCS_ANT_ABC_MSK) >>
2534 RATE_MCS_ANT_POS);
2535 if ((valid_tx_ant & ant_sel_tx) == ant_sel_tx) {
2536 *rate_n_flags = lq_sta->dbg_fixed_rate;
2537 D_RATE("Fixed rate ON\n");
2538 } else {
2539 lq_sta->dbg_fixed_rate = 0;
2540 IL_ERR
2541 ("Invalid antenna selection 0x%X, Valid is 0x%X\n",
2542 ant_sel_tx, valid_tx_ant);
2543 D_RATE("Fixed rate OFF\n");
2544 }
2545 } else {
2546 D_RATE("Fixed rate OFF\n");
2547 }
2548}
2549
2550static ssize_t
2551il4965_rs_sta_dbgfs_scale_table_write(struct file *file,
2552 const char __user *user_buf,
2553 size_t count, loff_t *ppos)
2554{
2555 struct il_lq_sta *lq_sta = file->private_data;
2556 struct il_priv *il;
2557 char buf[64];
2558 size_t buf_size;
2559 u32 parsed_rate;
2560
2561 il = lq_sta->drv;
2562 memset(buf, 0, sizeof(buf));
2563 buf_size = min(count, sizeof(buf) - 1);
2564 if (copy_from_user(buf, user_buf, buf_size))
2565 return -EFAULT;
2566
2567 if (sscanf(buf, "%x", &parsed_rate) == 1)
2568 lq_sta->dbg_fixed_rate = parsed_rate;
2569 else
2570 lq_sta->dbg_fixed_rate = 0;
2571
2572 lq_sta->active_legacy_rate = 0x0FFF; /* 1 - 54 MBits, includes CCK */
2573 lq_sta->active_siso_rate = 0x1FD0; /* 6 - 60 MBits, no 9, no CCK */
2574 lq_sta->active_mimo2_rate = 0x1FD0; /* 6 - 60 MBits, no 9, no CCK */
2575
2576 D_RATE("sta_id %d rate 0x%X\n", lq_sta->lq.sta_id,
2577 lq_sta->dbg_fixed_rate);
2578
2579 if (lq_sta->dbg_fixed_rate) {
2580 il4965_rs_fill_link_cmd(NULL, lq_sta, lq_sta->dbg_fixed_rate);
2581 il_send_lq_cmd(lq_sta->drv, &lq_sta->lq, CMD_ASYNC, false);
2582 }
2583
2584 return count;
2585}
2586
2587static ssize_t
2588il4965_rs_sta_dbgfs_scale_table_read(struct file *file, char __user *user_buf,
2589 size_t count, loff_t *ppos)
2590{
2591 char *buff;
2592 int desc = 0;
2593 int i = 0;
2594 int idx = 0;
2595 ssize_t ret;
2596
2597 struct il_lq_sta *lq_sta = file->private_data;
2598 struct il_priv *il;
2599 struct il_scale_tbl_info *tbl = &(lq_sta->lq_info[lq_sta->active_tbl]);
2600
2601 il = lq_sta->drv;
2602 buff = kmalloc(1024, GFP_KERNEL);
2603 if (!buff)
2604 return -ENOMEM;
2605
2606 desc += sprintf(buff + desc, "sta_id %d\n", lq_sta->lq.sta_id);
2607 desc +=
2608 sprintf(buff + desc, "failed=%d success=%d rate=0%X\n",
2609 lq_sta->total_failed, lq_sta->total_success,
2610 lq_sta->active_legacy_rate);
2611 desc +=
2612 sprintf(buff + desc, "fixed rate 0x%X\n", lq_sta->dbg_fixed_rate);
2613 desc +=
2614 sprintf(buff + desc, "valid_tx_ant %s%s%s\n",
2615 (il->hw_params.valid_tx_ant & ANT_A) ? "ANT_A," : "",
2616 (il->hw_params.valid_tx_ant & ANT_B) ? "ANT_B," : "",
2617 (il->hw_params.valid_tx_ant & ANT_C) ? "ANT_C" : "");
2618 desc +=
2619 sprintf(buff + desc, "lq type %s\n",
2620 (is_legacy(tbl->lq_type)) ? "legacy" : "HT");
2621 if (is_Ht(tbl->lq_type)) {
2622 desc +=
2623 sprintf(buff + desc, " %s",
2624 (is_siso(tbl->lq_type)) ? "SISO" : "MIMO2");
2625 desc +=
2626 sprintf(buff + desc, " %s",
2627 (tbl->is_ht40) ? "40MHz" : "20MHz");
2628 desc +=
2629 sprintf(buff + desc, " %s %s %s\n",
2630 (tbl->is_SGI) ? "SGI" : "",
2631 (lq_sta->is_green) ? "GF enabled" : "",
2632 (lq_sta->is_agg) ? "AGG on" : "");
2633 }
2634 desc +=
2635 sprintf(buff + desc, "last tx rate=0x%X\n",
2636 lq_sta->last_rate_n_flags);
2637 desc +=
2638 sprintf(buff + desc,
2639 "general:" "flags=0x%X mimo-d=%d s-ant0x%x d-ant=0x%x\n",
2640 lq_sta->lq.general_params.flags,
2641 lq_sta->lq.general_params.mimo_delimiter,
2642 lq_sta->lq.general_params.single_stream_ant_msk,
2643 lq_sta->lq.general_params.dual_stream_ant_msk);
2644
2645 desc +=
2646 sprintf(buff + desc,
2647 "agg:"
2648 "time_limit=%d dist_start_th=%d frame_cnt_limit=%d\n",
2649 le16_to_cpu(lq_sta->lq.agg_params.agg_time_limit),
2650 lq_sta->lq.agg_params.agg_dis_start_th,
2651 lq_sta->lq.agg_params.agg_frame_cnt_limit);
2652
2653 desc +=
2654 sprintf(buff + desc,
2655 "Start idx [0]=0x%x [1]=0x%x [2]=0x%x [3]=0x%x\n",
2656 lq_sta->lq.general_params.start_rate_idx[0],
2657 lq_sta->lq.general_params.start_rate_idx[1],
2658 lq_sta->lq.general_params.start_rate_idx[2],
2659 lq_sta->lq.general_params.start_rate_idx[3]);
2660
2661 for (i = 0; i < LINK_QUAL_MAX_RETRY_NUM; i++) {
2662 idx =
2663 il4965_hwrate_to_plcp_idx(le32_to_cpu
2664 (lq_sta->lq.rs_table[i].
2665 rate_n_flags));
2666 if (is_legacy(tbl->lq_type)) {
2667 desc +=
2668 sprintf(buff + desc, " rate[%d] 0x%X %smbps\n", i,
2669 le32_to_cpu(lq_sta->lq.rs_table[i].
2670 rate_n_flags),
2671 il_rate_mcs[idx].mbps);
2672 } else {
2673 desc +=
2674 sprintf(buff + desc, " rate[%d] 0x%X %smbps (%s)\n",
2675 i,
2676 le32_to_cpu(lq_sta->lq.rs_table[i].
2677 rate_n_flags),
2678 il_rate_mcs[idx].mbps,
2679 il_rate_mcs[idx].mcs);
2680 }
2681 }
2682
2683 ret = simple_read_from_buffer(user_buf, count, ppos, buff, desc);
2684 kfree(buff);
2685 return ret;
2686}
2687
2688static const struct file_operations rs_sta_dbgfs_scale_table_ops = {
2689 .write = il4965_rs_sta_dbgfs_scale_table_write,
2690 .read = il4965_rs_sta_dbgfs_scale_table_read,
2691 .open = simple_open,
2692 .llseek = default_llseek,
2693};
2694
2695static ssize_t
2696il4965_rs_sta_dbgfs_stats_table_read(struct file *file, char __user *user_buf,
2697 size_t count, loff_t *ppos)
2698{
2699 char *buff;
2700 int desc = 0;
2701 int i, j;
2702 ssize_t ret;
2703
2704 struct il_lq_sta *lq_sta = file->private_data;
2705
2706 buff = kmalloc(1024, GFP_KERNEL);
2707 if (!buff)
2708 return -ENOMEM;
2709
2710 for (i = 0; i < LQ_SIZE; i++) {
2711 desc +=
2712 sprintf(buff + desc,
2713 "%s type=%d SGI=%d HT40=%d DUP=%d GF=%d\n"
2714 "rate=0x%X\n", lq_sta->active_tbl == i ? "*" : "x",
2715 lq_sta->lq_info[i].lq_type,
2716 lq_sta->lq_info[i].is_SGI,
2717 lq_sta->lq_info[i].is_ht40,
2718 lq_sta->lq_info[i].is_dup, lq_sta->is_green,
2719 lq_sta->lq_info[i].current_rate);
2720 for (j = 0; j < RATE_COUNT; j++) {
2721 desc +=
2722 sprintf(buff + desc,
2723 "counter=%d success=%d %%=%d\n",
2724 lq_sta->lq_info[i].win[j].counter,
2725 lq_sta->lq_info[i].win[j].success_counter,
2726 lq_sta->lq_info[i].win[j].success_ratio);
2727 }
2728 }
2729 ret = simple_read_from_buffer(user_buf, count, ppos, buff, desc);
2730 kfree(buff);
2731 return ret;
2732}
2733
2734static const struct file_operations rs_sta_dbgfs_stats_table_ops = {
2735 .read = il4965_rs_sta_dbgfs_stats_table_read,
2736 .open = simple_open,
2737 .llseek = default_llseek,
2738};
2739
2740static ssize_t
2741il4965_rs_sta_dbgfs_rate_scale_data_read(struct file *file,
2742 char __user *user_buf, size_t count,
2743 loff_t *ppos)
2744{
2745 char buff[120];
2746 int desc = 0;
2747 struct il_lq_sta *lq_sta = file->private_data;
2748 struct il_scale_tbl_info *tbl = &lq_sta->lq_info[lq_sta->active_tbl];
2749
2750 if (is_Ht(tbl->lq_type))
2751 desc +=
2752 sprintf(buff + desc, "Bit Rate= %d Mb/s\n",
2753 tbl->expected_tpt[lq_sta->last_txrate_idx]);
2754 else
2755 desc +=
2756 sprintf(buff + desc, "Bit Rate= %d Mb/s\n",
2757 il_rates[lq_sta->last_txrate_idx].ieee >> 1);
2758
2759 return simple_read_from_buffer(user_buf, count, ppos, buff, desc);
2760}
2761
2762static const struct file_operations rs_sta_dbgfs_rate_scale_data_ops = {
2763 .read = il4965_rs_sta_dbgfs_rate_scale_data_read,
2764 .open = simple_open,
2765 .llseek = default_llseek,
2766};
2767
2768static void
2769il4965_rs_add_debugfs(void *il, void *il_sta, struct dentry *dir)
2770{
2771 struct il_lq_sta *lq_sta = il_sta;
2772 lq_sta->rs_sta_dbgfs_scale_table_file =
2773 debugfs_create_file("rate_scale_table", S_IRUSR | S_IWUSR, dir,
2774 lq_sta, &rs_sta_dbgfs_scale_table_ops);
2775 lq_sta->rs_sta_dbgfs_stats_table_file =
2776 debugfs_create_file("rate_stats_table", S_IRUSR, dir, lq_sta,
2777 &rs_sta_dbgfs_stats_table_ops);
2778 lq_sta->rs_sta_dbgfs_rate_scale_data_file =
2779 debugfs_create_file("rate_scale_data", S_IRUSR, dir, lq_sta,
2780 &rs_sta_dbgfs_rate_scale_data_ops);
2781 lq_sta->rs_sta_dbgfs_tx_agg_tid_en_file =
2782 debugfs_create_u8("tx_agg_tid_enable", S_IRUSR | S_IWUSR, dir,
2783 &lq_sta->tx_agg_tid_en);
2784
2785}
2786
2787static void
2788il4965_rs_remove_debugfs(void *il, void *il_sta)
2789{
2790 struct il_lq_sta *lq_sta = il_sta;
2791 debugfs_remove(lq_sta->rs_sta_dbgfs_scale_table_file);
2792 debugfs_remove(lq_sta->rs_sta_dbgfs_stats_table_file);
2793 debugfs_remove(lq_sta->rs_sta_dbgfs_rate_scale_data_file);
2794 debugfs_remove(lq_sta->rs_sta_dbgfs_tx_agg_tid_en_file);
2795}
2796#endif
2797
2798/*
2799 * Initialization of rate scaling information is done by driver after
2800 * the station is added. Since mac80211 calls this function before a
2801 * station is added we ignore it.
2802 */
2803static void
2804il4965_rs_rate_init_stub(void *il_r, struct ieee80211_supported_band *sband,
2805 struct cfg80211_chan_def *chandef,
2806 struct ieee80211_sta *sta, void *il_sta)
2807{
2808}
2809
2810static const struct rate_control_ops rs_4965_ops = {
2811 .name = IL4965_RS_NAME,
2812 .tx_status = il4965_rs_tx_status,
2813 .get_rate = il4965_rs_get_rate,
2814 .rate_init = il4965_rs_rate_init_stub,
2815 .alloc = il4965_rs_alloc,
2816 .free = il4965_rs_free,
2817 .alloc_sta = il4965_rs_alloc_sta,
2818 .free_sta = il4965_rs_free_sta,
2819#ifdef CONFIG_MAC80211_DEBUGFS
2820 .add_sta_debugfs = il4965_rs_add_debugfs,
2821 .remove_sta_debugfs = il4965_rs_remove_debugfs,
2822#endif
2823};
2824
2825int
2826il4965_rate_control_register(void)
2827{
2828 return ieee80211_rate_control_register(&rs_4965_ops);
2829}
2830
2831void
2832il4965_rate_control_unregister(void)
2833{
2834 ieee80211_rate_control_unregister(&rs_4965_ops);
2835}
diff --git a/drivers/net/wireless/intel/iwlegacy/4965.c b/drivers/net/wireless/intel/iwlegacy/4965.c
new file mode 100644
index 000000000000..fe47db9c20cd
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlegacy/4965.c
@@ -0,0 +1,1950 @@
1/******************************************************************************
2 *
3 * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of version 2 of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
17 *
18 * The full GNU General Public License is included in this distribution in the
19 * file called LICENSE.
20 *
21 * Contact Information:
22 * Intel Linux Wireless <ilw@linux.intel.com>
23 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
24 *
25 *****************************************************************************/
26
27#include <linux/kernel.h>
28#include <linux/module.h>
29#include <linux/pci.h>
30#include <linux/dma-mapping.h>
31#include <linux/delay.h>
32#include <linux/sched.h>
33#include <linux/skbuff.h>
34#include <linux/netdevice.h>
35#include <net/mac80211.h>
36#include <linux/etherdevice.h>
37#include <asm/unaligned.h>
38
39#include "common.h"
40#include "4965.h"
41
42/**
43 * il_verify_inst_sparse - verify runtime uCode image in card vs. host,
44 * using sample data 100 bytes apart. If these sample points are good,
45 * it's a pretty good bet that everything between them is good, too.
46 */
47static int
48il4965_verify_inst_sparse(struct il_priv *il, __le32 * image, u32 len)
49{
50 u32 val;
51 int ret = 0;
52 u32 errcnt = 0;
53 u32 i;
54
55 D_INFO("ucode inst image size is %u\n", len);
56
57 for (i = 0; i < len; i += 100, image += 100 / sizeof(u32)) {
58 /* read data comes through single port, auto-incr addr */
59 /* NOTE: Use the debugless read so we don't flood kernel log
60 * if IL_DL_IO is set */
61 il_wr(il, HBUS_TARG_MEM_RADDR, i + IL4965_RTC_INST_LOWER_BOUND);
62 val = _il_rd(il, HBUS_TARG_MEM_RDAT);
63 if (val != le32_to_cpu(*image)) {
64 ret = -EIO;
65 errcnt++;
66 if (errcnt >= 3)
67 break;
68 }
69 }
70
71 return ret;
72}
73
74/**
75 * il4965_verify_inst_full - verify runtime uCode image in card vs. host,
76 * looking at all data.
77 */
78static int
79il4965_verify_inst_full(struct il_priv *il, __le32 * image, u32 len)
80{
81 u32 val;
82 u32 save_len = len;
83 int ret = 0;
84 u32 errcnt;
85
86 D_INFO("ucode inst image size is %u\n", len);
87
88 il_wr(il, HBUS_TARG_MEM_RADDR, IL4965_RTC_INST_LOWER_BOUND);
89
90 errcnt = 0;
91 for (; len > 0; len -= sizeof(u32), image++) {
92 /* read data comes through single port, auto-incr addr */
93 /* NOTE: Use the debugless read so we don't flood kernel log
94 * if IL_DL_IO is set */
95 val = _il_rd(il, HBUS_TARG_MEM_RDAT);
96 if (val != le32_to_cpu(*image)) {
97 IL_ERR("uCode INST section is invalid at "
98 "offset 0x%x, is 0x%x, s/b 0x%x\n",
99 save_len - len, val, le32_to_cpu(*image));
100 ret = -EIO;
101 errcnt++;
102 if (errcnt >= 20)
103 break;
104 }
105 }
106
107 if (!errcnt)
108 D_INFO("ucode image in INSTRUCTION memory is good\n");
109
110 return ret;
111}
112
113/**
114 * il4965_verify_ucode - determine which instruction image is in SRAM,
115 * and verify its contents
116 */
117int
118il4965_verify_ucode(struct il_priv *il)
119{
120 __le32 *image;
121 u32 len;
122 int ret;
123
124 /* Try bootstrap */
125 image = (__le32 *) il->ucode_boot.v_addr;
126 len = il->ucode_boot.len;
127 ret = il4965_verify_inst_sparse(il, image, len);
128 if (!ret) {
129 D_INFO("Bootstrap uCode is good in inst SRAM\n");
130 return 0;
131 }
132
133 /* Try initialize */
134 image = (__le32 *) il->ucode_init.v_addr;
135 len = il->ucode_init.len;
136 ret = il4965_verify_inst_sparse(il, image, len);
137 if (!ret) {
138 D_INFO("Initialize uCode is good in inst SRAM\n");
139 return 0;
140 }
141
142 /* Try runtime/protocol */
143 image = (__le32 *) il->ucode_code.v_addr;
144 len = il->ucode_code.len;
145 ret = il4965_verify_inst_sparse(il, image, len);
146 if (!ret) {
147 D_INFO("Runtime uCode is good in inst SRAM\n");
148 return 0;
149 }
150
151 IL_ERR("NO VALID UCODE IMAGE IN INSTRUCTION SRAM!!\n");
152
153 /* Since nothing seems to match, show first several data entries in
154 * instruction SRAM, so maybe visual inspection will give a clue.
155 * Selection of bootstrap image (vs. other images) is arbitrary. */
156 image = (__le32 *) il->ucode_boot.v_addr;
157 len = il->ucode_boot.len;
158 ret = il4965_verify_inst_full(il, image, len);
159
160 return ret;
161}
162
163/******************************************************************************
164 *
165 * EEPROM related functions
166 *
167******************************************************************************/
168
169/*
170 * The device's EEPROM semaphore prevents conflicts between driver and uCode
171 * when accessing the EEPROM; each access is a series of pulses to/from the
172 * EEPROM chip, not a single event, so even reads could conflict if they
173 * weren't arbitrated by the semaphore.
174 */
175int
176il4965_eeprom_acquire_semaphore(struct il_priv *il)
177{
178 u16 count;
179 int ret;
180
181 for (count = 0; count < EEPROM_SEM_RETRY_LIMIT; count++) {
182 /* Request semaphore */
183 il_set_bit(il, CSR_HW_IF_CONFIG_REG,
184 CSR_HW_IF_CONFIG_REG_BIT_EEPROM_OWN_SEM);
185
186 /* See if we got it */
187 ret =
188 _il_poll_bit(il, CSR_HW_IF_CONFIG_REG,
189 CSR_HW_IF_CONFIG_REG_BIT_EEPROM_OWN_SEM,
190 CSR_HW_IF_CONFIG_REG_BIT_EEPROM_OWN_SEM,
191 EEPROM_SEM_TIMEOUT);
192 if (ret >= 0)
193 return ret;
194 }
195
196 return ret;
197}
198
199void
200il4965_eeprom_release_semaphore(struct il_priv *il)
201{
202 il_clear_bit(il, CSR_HW_IF_CONFIG_REG,
203 CSR_HW_IF_CONFIG_REG_BIT_EEPROM_OWN_SEM);
204
205}
206
207int
208il4965_eeprom_check_version(struct il_priv *il)
209{
210 u16 eeprom_ver;
211 u16 calib_ver;
212
213 eeprom_ver = il_eeprom_query16(il, EEPROM_VERSION);
214 calib_ver = il_eeprom_query16(il, EEPROM_4965_CALIB_VERSION_OFFSET);
215
216 if (eeprom_ver < il->cfg->eeprom_ver ||
217 calib_ver < il->cfg->eeprom_calib_ver)
218 goto err;
219
220 IL_INFO("device EEPROM VER=0x%x, CALIB=0x%x\n", eeprom_ver, calib_ver);
221
222 return 0;
223err:
224 IL_ERR("Unsupported (too old) EEPROM VER=0x%x < 0x%x "
225 "CALIB=0x%x < 0x%x\n", eeprom_ver, il->cfg->eeprom_ver,
226 calib_ver, il->cfg->eeprom_calib_ver);
227 return -EINVAL;
228
229}
230
231void
232il4965_eeprom_get_mac(const struct il_priv *il, u8 * mac)
233{
234 const u8 *addr = il_eeprom_query_addr(il,
235 EEPROM_MAC_ADDRESS);
236 memcpy(mac, addr, ETH_ALEN);
237}
238
239/* Send led command */
240static int
241il4965_send_led_cmd(struct il_priv *il, struct il_led_cmd *led_cmd)
242{
243 struct il_host_cmd cmd = {
244 .id = C_LEDS,
245 .len = sizeof(struct il_led_cmd),
246 .data = led_cmd,
247 .flags = CMD_ASYNC,
248 .callback = NULL,
249 };
250 u32 reg;
251
252 reg = _il_rd(il, CSR_LED_REG);
253 if (reg != (reg & CSR_LED_BSM_CTRL_MSK))
254 _il_wr(il, CSR_LED_REG, reg & CSR_LED_BSM_CTRL_MSK);
255
256 return il_send_cmd(il, &cmd);
257}
258
259/* Set led register off */
260void
261il4965_led_enable(struct il_priv *il)
262{
263 _il_wr(il, CSR_LED_REG, CSR_LED_REG_TRUN_ON);
264}
265
266static int il4965_send_tx_power(struct il_priv *il);
267static int il4965_hw_get_temperature(struct il_priv *il);
268
269/* Highest firmware API version supported */
270#define IL4965_UCODE_API_MAX 2
271
272/* Lowest firmware API version supported */
273#define IL4965_UCODE_API_MIN 2
274
275#define IL4965_FW_PRE "iwlwifi-4965-"
276#define _IL4965_MODULE_FIRMWARE(api) IL4965_FW_PRE #api ".ucode"
277#define IL4965_MODULE_FIRMWARE(api) _IL4965_MODULE_FIRMWARE(api)
278
279/* check contents of special bootstrap uCode SRAM */
280static int
281il4965_verify_bsm(struct il_priv *il)
282{
283 __le32 *image = il->ucode_boot.v_addr;
284 u32 len = il->ucode_boot.len;
285 u32 reg;
286 u32 val;
287
288 D_INFO("Begin verify bsm\n");
289
290 /* verify BSM SRAM contents */
291 val = il_rd_prph(il, BSM_WR_DWCOUNT_REG);
292 for (reg = BSM_SRAM_LOWER_BOUND; reg < BSM_SRAM_LOWER_BOUND + len;
293 reg += sizeof(u32), image++) {
294 val = il_rd_prph(il, reg);
295 if (val != le32_to_cpu(*image)) {
296 IL_ERR("BSM uCode verification failed at "
297 "addr 0x%08X+%u (of %u), is 0x%x, s/b 0x%x\n",
298 BSM_SRAM_LOWER_BOUND, reg - BSM_SRAM_LOWER_BOUND,
299 len, val, le32_to_cpu(*image));
300 return -EIO;
301 }
302 }
303
304 D_INFO("BSM bootstrap uCode image OK\n");
305
306 return 0;
307}
308
309/**
310 * il4965_load_bsm - Load bootstrap instructions
311 *
312 * BSM operation:
313 *
314 * The Bootstrap State Machine (BSM) stores a short bootstrap uCode program
315 * in special SRAM that does not power down during RFKILL. When powering back
316 * up after power-saving sleeps (or during initial uCode load), the BSM loads
317 * the bootstrap program into the on-board processor, and starts it.
318 *
319 * The bootstrap program loads (via DMA) instructions and data for a new
320 * program from host DRAM locations indicated by the host driver in the
321 * BSM_DRAM_* registers. Once the new program is loaded, it starts
322 * automatically.
323 *
324 * When initializing the NIC, the host driver points the BSM to the
325 * "initialize" uCode image. This uCode sets up some internal data, then
326 * notifies host via "initialize alive" that it is complete.
327 *
328 * The host then replaces the BSM_DRAM_* pointer values to point to the
329 * normal runtime uCode instructions and a backup uCode data cache buffer
330 * (filled initially with starting data values for the on-board processor),
331 * then triggers the "initialize" uCode to load and launch the runtime uCode,
332 * which begins normal operation.
333 *
334 * When doing a power-save shutdown, runtime uCode saves data SRAM into
335 * the backup data cache in DRAM before SRAM is powered down.
336 *
337 * When powering back up, the BSM loads the bootstrap program. This reloads
338 * the runtime uCode instructions and the backup data cache into SRAM,
339 * and re-launches the runtime uCode from where it left off.
340 */
341static int
342il4965_load_bsm(struct il_priv *il)
343{
344 __le32 *image = il->ucode_boot.v_addr;
345 u32 len = il->ucode_boot.len;
346 dma_addr_t pinst;
347 dma_addr_t pdata;
348 u32 inst_len;
349 u32 data_len;
350 int i;
351 u32 done;
352 u32 reg_offset;
353 int ret;
354
355 D_INFO("Begin load bsm\n");
356
357 il->ucode_type = UCODE_RT;
358
359 /* make sure bootstrap program is no larger than BSM's SRAM size */
360 if (len > IL49_MAX_BSM_SIZE)
361 return -EINVAL;
362
363 /* Tell bootstrap uCode where to find the "Initialize" uCode
364 * in host DRAM ... host DRAM physical address bits 35:4 for 4965.
365 * NOTE: il_init_alive_start() will replace these values,
366 * after the "initialize" uCode has run, to point to
367 * runtime/protocol instructions and backup data cache.
368 */
369 pinst = il->ucode_init.p_addr >> 4;
370 pdata = il->ucode_init_data.p_addr >> 4;
371 inst_len = il->ucode_init.len;
372 data_len = il->ucode_init_data.len;
373
374 il_wr_prph(il, BSM_DRAM_INST_PTR_REG, pinst);
375 il_wr_prph(il, BSM_DRAM_DATA_PTR_REG, pdata);
376 il_wr_prph(il, BSM_DRAM_INST_BYTECOUNT_REG, inst_len);
377 il_wr_prph(il, BSM_DRAM_DATA_BYTECOUNT_REG, data_len);
378
379 /* Fill BSM memory with bootstrap instructions */
380 for (reg_offset = BSM_SRAM_LOWER_BOUND;
381 reg_offset < BSM_SRAM_LOWER_BOUND + len;
382 reg_offset += sizeof(u32), image++)
383 _il_wr_prph(il, reg_offset, le32_to_cpu(*image));
384
385 ret = il4965_verify_bsm(il);
386 if (ret)
387 return ret;
388
389 /* Tell BSM to copy from BSM SRAM into instruction SRAM, when asked */
390 il_wr_prph(il, BSM_WR_MEM_SRC_REG, 0x0);
391 il_wr_prph(il, BSM_WR_MEM_DST_REG, IL49_RTC_INST_LOWER_BOUND);
392 il_wr_prph(il, BSM_WR_DWCOUNT_REG, len / sizeof(u32));
393
394 /* Load bootstrap code into instruction SRAM now,
395 * to prepare to load "initialize" uCode */
396 il_wr_prph(il, BSM_WR_CTRL_REG, BSM_WR_CTRL_REG_BIT_START);
397
398 /* Wait for load of bootstrap uCode to finish */
399 for (i = 0; i < 100; i++) {
400 done = il_rd_prph(il, BSM_WR_CTRL_REG);
401 if (!(done & BSM_WR_CTRL_REG_BIT_START))
402 break;
403 udelay(10);
404 }
405 if (i < 100)
406 D_INFO("BSM write complete, poll %d iterations\n", i);
407 else {
408 IL_ERR("BSM write did not complete!\n");
409 return -EIO;
410 }
411
412 /* Enable future boot loads whenever power management unit triggers it
413 * (e.g. when powering back up after power-save shutdown) */
414 il_wr_prph(il, BSM_WR_CTRL_REG, BSM_WR_CTRL_REG_BIT_START_EN);
415
416 return 0;
417}
418
419/**
420 * il4965_set_ucode_ptrs - Set uCode address location
421 *
422 * Tell initialization uCode where to find runtime uCode.
423 *
424 * BSM registers initially contain pointers to initialization uCode.
425 * We need to replace them to load runtime uCode inst and data,
426 * and to save runtime data when powering down.
427 */
428static int
429il4965_set_ucode_ptrs(struct il_priv *il)
430{
431 dma_addr_t pinst;
432 dma_addr_t pdata;
433 int ret = 0;
434
435 /* bits 35:4 for 4965 */
436 pinst = il->ucode_code.p_addr >> 4;
437 pdata = il->ucode_data_backup.p_addr >> 4;
438
439 /* Tell bootstrap uCode where to find image to load */
440 il_wr_prph(il, BSM_DRAM_INST_PTR_REG, pinst);
441 il_wr_prph(il, BSM_DRAM_DATA_PTR_REG, pdata);
442 il_wr_prph(il, BSM_DRAM_DATA_BYTECOUNT_REG, il->ucode_data.len);
443
444 /* Inst byte count must be last to set up, bit 31 signals uCode
445 * that all new ptr/size info is in place */
446 il_wr_prph(il, BSM_DRAM_INST_BYTECOUNT_REG,
447 il->ucode_code.len | BSM_DRAM_INST_LOAD);
448 D_INFO("Runtime uCode pointers are set.\n");
449
450 return ret;
451}
452
453/**
454 * il4965_init_alive_start - Called after N_ALIVE notification received
455 *
456 * Called after N_ALIVE notification received from "initialize" uCode.
457 *
458 * The 4965 "initialize" ALIVE reply contains calibration data for:
459 * Voltage, temperature, and MIMO tx gain correction, now stored in il
460 * (3945 does not contain this data).
461 *
462 * Tell "initialize" uCode to go ahead and load the runtime uCode.
463*/
464static void
465il4965_init_alive_start(struct il_priv *il)
466{
467 /* Bootstrap uCode has loaded initialize uCode ... verify inst image.
468 * This is a paranoid check, because we would not have gotten the
469 * "initialize" alive if code weren't properly loaded. */
470 if (il4965_verify_ucode(il)) {
471 /* Runtime instruction load was bad;
472 * take it all the way back down so we can try again */
473 D_INFO("Bad \"initialize\" uCode load.\n");
474 goto restart;
475 }
476
477 /* Calculate temperature */
478 il->temperature = il4965_hw_get_temperature(il);
479
480 /* Send pointers to protocol/runtime uCode image ... init code will
481 * load and launch runtime uCode, which will send us another "Alive"
482 * notification. */
483 D_INFO("Initialization Alive received.\n");
484 if (il4965_set_ucode_ptrs(il)) {
485 /* Runtime instruction load won't happen;
486 * take it all the way back down so we can try again */
487 D_INFO("Couldn't set up uCode pointers.\n");
488 goto restart;
489 }
490 return;
491
492restart:
493 queue_work(il->workqueue, &il->restart);
494}
495
496static bool
497iw4965_is_ht40_channel(__le32 rxon_flags)
498{
499 int chan_mod =
500 le32_to_cpu(rxon_flags & RXON_FLG_CHANNEL_MODE_MSK) >>
501 RXON_FLG_CHANNEL_MODE_POS;
502 return (chan_mod == CHANNEL_MODE_PURE_40 ||
503 chan_mod == CHANNEL_MODE_MIXED);
504}
505
506void
507il4965_nic_config(struct il_priv *il)
508{
509 unsigned long flags;
510 u16 radio_cfg;
511
512 spin_lock_irqsave(&il->lock, flags);
513
514 radio_cfg = il_eeprom_query16(il, EEPROM_RADIO_CONFIG);
515
516 /* write radio config values to register */
517 if (EEPROM_RF_CFG_TYPE_MSK(radio_cfg) == EEPROM_4965_RF_CFG_TYPE_MAX)
518 il_set_bit(il, CSR_HW_IF_CONFIG_REG,
519 EEPROM_RF_CFG_TYPE_MSK(radio_cfg) |
520 EEPROM_RF_CFG_STEP_MSK(radio_cfg) |
521 EEPROM_RF_CFG_DASH_MSK(radio_cfg));
522
523 /* set CSR_HW_CONFIG_REG for uCode use */
524 il_set_bit(il, CSR_HW_IF_CONFIG_REG,
525 CSR_HW_IF_CONFIG_REG_BIT_RADIO_SI |
526 CSR_HW_IF_CONFIG_REG_BIT_MAC_SI);
527
528 il->calib_info =
529 (struct il_eeprom_calib_info *)
530 il_eeprom_query_addr(il, EEPROM_4965_CALIB_TXPOWER_OFFSET);
531
532 spin_unlock_irqrestore(&il->lock, flags);
533}
534
535/* Reset differential Rx gains in NIC to prepare for chain noise calibration.
536 * Called after every association, but this runs only once!
537 * ... once chain noise is calibrated the first time, it's good forever. */
538static void
539il4965_chain_noise_reset(struct il_priv *il)
540{
541 struct il_chain_noise_data *data = &(il->chain_noise_data);
542
543 if (data->state == IL_CHAIN_NOISE_ALIVE && il_is_any_associated(il)) {
544 struct il_calib_diff_gain_cmd cmd;
545
546 /* clear data for chain noise calibration algorithm */
547 data->chain_noise_a = 0;
548 data->chain_noise_b = 0;
549 data->chain_noise_c = 0;
550 data->chain_signal_a = 0;
551 data->chain_signal_b = 0;
552 data->chain_signal_c = 0;
553 data->beacon_count = 0;
554
555 memset(&cmd, 0, sizeof(cmd));
556 cmd.hdr.op_code = IL_PHY_CALIBRATE_DIFF_GAIN_CMD;
557 cmd.diff_gain_a = 0;
558 cmd.diff_gain_b = 0;
559 cmd.diff_gain_c = 0;
560 if (il_send_cmd_pdu(il, C_PHY_CALIBRATION, sizeof(cmd), &cmd))
561 IL_ERR("Could not send C_PHY_CALIBRATION\n");
562 data->state = IL_CHAIN_NOISE_ACCUMULATE;
563 D_CALIB("Run chain_noise_calibrate\n");
564 }
565}
566
567static s32
568il4965_math_div_round(s32 num, s32 denom, s32 * res)
569{
570 s32 sign = 1;
571
572 if (num < 0) {
573 sign = -sign;
574 num = -num;
575 }
576 if (denom < 0) {
577 sign = -sign;
578 denom = -denom;
579 }
580 *res = 1;
581 *res = ((num * 2 + denom) / (denom * 2)) * sign;
582
583 return 1;
584}
585
586/**
587 * il4965_get_voltage_compensation - Power supply voltage comp for txpower
588 *
589 * Determines power supply voltage compensation for txpower calculations.
590 * Returns number of 1/2-dB steps to subtract from gain table idx,
591 * to compensate for difference between power supply voltage during
592 * factory measurements, vs. current power supply voltage.
593 *
594 * Voltage indication is higher for lower voltage.
595 * Lower voltage requires more gain (lower gain table idx).
596 */
597static s32
598il4965_get_voltage_compensation(s32 eeprom_voltage, s32 current_voltage)
599{
600 s32 comp = 0;
601
602 if (TX_POWER_IL_ILLEGAL_VOLTAGE == eeprom_voltage ||
603 TX_POWER_IL_ILLEGAL_VOLTAGE == current_voltage)
604 return 0;
605
606 il4965_math_div_round(current_voltage - eeprom_voltage,
607 TX_POWER_IL_VOLTAGE_CODES_PER_03V, &comp);
608
609 if (current_voltage > eeprom_voltage)
610 comp *= 2;
611 if ((comp < -2) || (comp > 2))
612 comp = 0;
613
614 return comp;
615}
616
617static s32
618il4965_get_tx_atten_grp(u16 channel)
619{
620 if (channel >= CALIB_IL_TX_ATTEN_GR5_FCH &&
621 channel <= CALIB_IL_TX_ATTEN_GR5_LCH)
622 return CALIB_CH_GROUP_5;
623
624 if (channel >= CALIB_IL_TX_ATTEN_GR1_FCH &&
625 channel <= CALIB_IL_TX_ATTEN_GR1_LCH)
626 return CALIB_CH_GROUP_1;
627
628 if (channel >= CALIB_IL_TX_ATTEN_GR2_FCH &&
629 channel <= CALIB_IL_TX_ATTEN_GR2_LCH)
630 return CALIB_CH_GROUP_2;
631
632 if (channel >= CALIB_IL_TX_ATTEN_GR3_FCH &&
633 channel <= CALIB_IL_TX_ATTEN_GR3_LCH)
634 return CALIB_CH_GROUP_3;
635
636 if (channel >= CALIB_IL_TX_ATTEN_GR4_FCH &&
637 channel <= CALIB_IL_TX_ATTEN_GR4_LCH)
638 return CALIB_CH_GROUP_4;
639
640 return -EINVAL;
641}
642
643static u32
644il4965_get_sub_band(const struct il_priv *il, u32 channel)
645{
646 s32 b = -1;
647
648 for (b = 0; b < EEPROM_TX_POWER_BANDS; b++) {
649 if (il->calib_info->band_info[b].ch_from == 0)
650 continue;
651
652 if (channel >= il->calib_info->band_info[b].ch_from &&
653 channel <= il->calib_info->band_info[b].ch_to)
654 break;
655 }
656
657 return b;
658}
659
660static s32
661il4965_interpolate_value(s32 x, s32 x1, s32 y1, s32 x2, s32 y2)
662{
663 s32 val;
664
665 if (x2 == x1)
666 return y1;
667 else {
668 il4965_math_div_round((x2 - x) * (y1 - y2), (x2 - x1), &val);
669 return val + y2;
670 }
671}
672
673/**
674 * il4965_interpolate_chan - Interpolate factory measurements for one channel
675 *
676 * Interpolates factory measurements from the two sample channels within a
677 * sub-band, to apply to channel of interest. Interpolation is proportional to
678 * differences in channel frequencies, which is proportional to differences
679 * in channel number.
680 */
681static int
682il4965_interpolate_chan(struct il_priv *il, u32 channel,
683 struct il_eeprom_calib_ch_info *chan_info)
684{
685 s32 s = -1;
686 u32 c;
687 u32 m;
688 const struct il_eeprom_calib_measure *m1;
689 const struct il_eeprom_calib_measure *m2;
690 struct il_eeprom_calib_measure *omeas;
691 u32 ch_i1;
692 u32 ch_i2;
693
694 s = il4965_get_sub_band(il, channel);
695 if (s >= EEPROM_TX_POWER_BANDS) {
696 IL_ERR("Tx Power can not find channel %d\n", channel);
697 return -1;
698 }
699
700 ch_i1 = il->calib_info->band_info[s].ch1.ch_num;
701 ch_i2 = il->calib_info->band_info[s].ch2.ch_num;
702 chan_info->ch_num = (u8) channel;
703
704 D_TXPOWER("channel %d subband %d factory cal ch %d & %d\n", channel, s,
705 ch_i1, ch_i2);
706
707 for (c = 0; c < EEPROM_TX_POWER_TX_CHAINS; c++) {
708 for (m = 0; m < EEPROM_TX_POWER_MEASUREMENTS; m++) {
709 m1 = &(il->calib_info->band_info[s].ch1.
710 measurements[c][m]);
711 m2 = &(il->calib_info->band_info[s].ch2.
712 measurements[c][m]);
713 omeas = &(chan_info->measurements[c][m]);
714
715 omeas->actual_pow =
716 (u8) il4965_interpolate_value(channel, ch_i1,
717 m1->actual_pow, ch_i2,
718 m2->actual_pow);
719 omeas->gain_idx =
720 (u8) il4965_interpolate_value(channel, ch_i1,
721 m1->gain_idx, ch_i2,
722 m2->gain_idx);
723 omeas->temperature =
724 (u8) il4965_interpolate_value(channel, ch_i1,
725 m1->temperature,
726 ch_i2,
727 m2->temperature);
728 omeas->pa_det =
729 (s8) il4965_interpolate_value(channel, ch_i1,
730 m1->pa_det, ch_i2,
731 m2->pa_det);
732
733 D_TXPOWER("chain %d meas %d AP1=%d AP2=%d AP=%d\n", c,
734 m, m1->actual_pow, m2->actual_pow,
735 omeas->actual_pow);
736 D_TXPOWER("chain %d meas %d NI1=%d NI2=%d NI=%d\n", c,
737 m, m1->gain_idx, m2->gain_idx,
738 omeas->gain_idx);
739 D_TXPOWER("chain %d meas %d PA1=%d PA2=%d PA=%d\n", c,
740 m, m1->pa_det, m2->pa_det, omeas->pa_det);
741 D_TXPOWER("chain %d meas %d T1=%d T2=%d T=%d\n", c,
742 m, m1->temperature, m2->temperature,
743 omeas->temperature);
744 }
745 }
746
747 return 0;
748}
749
750/* bit-rate-dependent table to prevent Tx distortion, in half-dB units,
751 * for OFDM 6, 12, 18, 24, 36, 48, 54, 60 MBit, and CCK all rates. */
752static s32 back_off_table[] = {
753 10, 10, 10, 10, 10, 15, 17, 20, /* OFDM SISO 20 MHz */
754 10, 10, 10, 10, 10, 15, 17, 20, /* OFDM MIMO 20 MHz */
755 10, 10, 10, 10, 10, 15, 17, 20, /* OFDM SISO 40 MHz */
756 10, 10, 10, 10, 10, 15, 17, 20, /* OFDM MIMO 40 MHz */
757 10 /* CCK */
758};
759
760/* Thermal compensation values for txpower for various frequency ranges ...
761 * ratios from 3:1 to 4.5:1 of degrees (Celsius) per half-dB gain adjust */
762static struct il4965_txpower_comp_entry {
763 s32 degrees_per_05db_a;
764 s32 degrees_per_05db_a_denom;
765} tx_power_cmp_tble[CALIB_CH_GROUP_MAX] = {
766 {
767 9, 2}, /* group 0 5.2, ch 34-43 */
768 {
769 4, 1}, /* group 1 5.2, ch 44-70 */
770 {
771 4, 1}, /* group 2 5.2, ch 71-124 */
772 {
773 4, 1}, /* group 3 5.2, ch 125-200 */
774 {
775 3, 1} /* group 4 2.4, ch all */
776};
777
778static s32
779get_min_power_idx(s32 rate_power_idx, u32 band)
780{
781 if (!band) {
782 if ((rate_power_idx & 7) <= 4)
783 return MIN_TX_GAIN_IDX_52GHZ_EXT;
784 }
785 return MIN_TX_GAIN_IDX;
786}
787
788struct gain_entry {
789 u8 dsp;
790 u8 radio;
791};
792
793static const struct gain_entry gain_table[2][108] = {
794 /* 5.2GHz power gain idx table */
795 {
796 {123, 0x3F}, /* highest txpower */
797 {117, 0x3F},
798 {110, 0x3F},
799 {104, 0x3F},
800 {98, 0x3F},
801 {110, 0x3E},
802 {104, 0x3E},
803 {98, 0x3E},
804 {110, 0x3D},
805 {104, 0x3D},
806 {98, 0x3D},
807 {110, 0x3C},
808 {104, 0x3C},
809 {98, 0x3C},
810 {110, 0x3B},
811 {104, 0x3B},
812 {98, 0x3B},
813 {110, 0x3A},
814 {104, 0x3A},
815 {98, 0x3A},
816 {110, 0x39},
817 {104, 0x39},
818 {98, 0x39},
819 {110, 0x38},
820 {104, 0x38},
821 {98, 0x38},
822 {110, 0x37},
823 {104, 0x37},
824 {98, 0x37},
825 {110, 0x36},
826 {104, 0x36},
827 {98, 0x36},
828 {110, 0x35},
829 {104, 0x35},
830 {98, 0x35},
831 {110, 0x34},
832 {104, 0x34},
833 {98, 0x34},
834 {110, 0x33},
835 {104, 0x33},
836 {98, 0x33},
837 {110, 0x32},
838 {104, 0x32},
839 {98, 0x32},
840 {110, 0x31},
841 {104, 0x31},
842 {98, 0x31},
843 {110, 0x30},
844 {104, 0x30},
845 {98, 0x30},
846 {110, 0x25},
847 {104, 0x25},
848 {98, 0x25},
849 {110, 0x24},
850 {104, 0x24},
851 {98, 0x24},
852 {110, 0x23},
853 {104, 0x23},
854 {98, 0x23},
855 {110, 0x22},
856 {104, 0x18},
857 {98, 0x18},
858 {110, 0x17},
859 {104, 0x17},
860 {98, 0x17},
861 {110, 0x16},
862 {104, 0x16},
863 {98, 0x16},
864 {110, 0x15},
865 {104, 0x15},
866 {98, 0x15},
867 {110, 0x14},
868 {104, 0x14},
869 {98, 0x14},
870 {110, 0x13},
871 {104, 0x13},
872 {98, 0x13},
873 {110, 0x12},
874 {104, 0x08},
875 {98, 0x08},
876 {110, 0x07},
877 {104, 0x07},
878 {98, 0x07},
879 {110, 0x06},
880 {104, 0x06},
881 {98, 0x06},
882 {110, 0x05},
883 {104, 0x05},
884 {98, 0x05},
885 {110, 0x04},
886 {104, 0x04},
887 {98, 0x04},
888 {110, 0x03},
889 {104, 0x03},
890 {98, 0x03},
891 {110, 0x02},
892 {104, 0x02},
893 {98, 0x02},
894 {110, 0x01},
895 {104, 0x01},
896 {98, 0x01},
897 {110, 0x00},
898 {104, 0x00},
899 {98, 0x00},
900 {93, 0x00},
901 {88, 0x00},
902 {83, 0x00},
903 {78, 0x00},
904 },
905 /* 2.4GHz power gain idx table */
906 {
907 {110, 0x3f}, /* highest txpower */
908 {104, 0x3f},
909 {98, 0x3f},
910 {110, 0x3e},
911 {104, 0x3e},
912 {98, 0x3e},
913 {110, 0x3d},
914 {104, 0x3d},
915 {98, 0x3d},
916 {110, 0x3c},
917 {104, 0x3c},
918 {98, 0x3c},
919 {110, 0x3b},
920 {104, 0x3b},
921 {98, 0x3b},
922 {110, 0x3a},
923 {104, 0x3a},
924 {98, 0x3a},
925 {110, 0x39},
926 {104, 0x39},
927 {98, 0x39},
928 {110, 0x38},
929 {104, 0x38},
930 {98, 0x38},
931 {110, 0x37},
932 {104, 0x37},
933 {98, 0x37},
934 {110, 0x36},
935 {104, 0x36},
936 {98, 0x36},
937 {110, 0x35},
938 {104, 0x35},
939 {98, 0x35},
940 {110, 0x34},
941 {104, 0x34},
942 {98, 0x34},
943 {110, 0x33},
944 {104, 0x33},
945 {98, 0x33},
946 {110, 0x32},
947 {104, 0x32},
948 {98, 0x32},
949 {110, 0x31},
950 {104, 0x31},
951 {98, 0x31},
952 {110, 0x30},
953 {104, 0x30},
954 {98, 0x30},
955 {110, 0x6},
956 {104, 0x6},
957 {98, 0x6},
958 {110, 0x5},
959 {104, 0x5},
960 {98, 0x5},
961 {110, 0x4},
962 {104, 0x4},
963 {98, 0x4},
964 {110, 0x3},
965 {104, 0x3},
966 {98, 0x3},
967 {110, 0x2},
968 {104, 0x2},
969 {98, 0x2},
970 {110, 0x1},
971 {104, 0x1},
972 {98, 0x1},
973 {110, 0x0},
974 {104, 0x0},
975 {98, 0x0},
976 {97, 0},
977 {96, 0},
978 {95, 0},
979 {94, 0},
980 {93, 0},
981 {92, 0},
982 {91, 0},
983 {90, 0},
984 {89, 0},
985 {88, 0},
986 {87, 0},
987 {86, 0},
988 {85, 0},
989 {84, 0},
990 {83, 0},
991 {82, 0},
992 {81, 0},
993 {80, 0},
994 {79, 0},
995 {78, 0},
996 {77, 0},
997 {76, 0},
998 {75, 0},
999 {74, 0},
1000 {73, 0},
1001 {72, 0},
1002 {71, 0},
1003 {70, 0},
1004 {69, 0},
1005 {68, 0},
1006 {67, 0},
1007 {66, 0},
1008 {65, 0},
1009 {64, 0},
1010 {63, 0},
1011 {62, 0},
1012 {61, 0},
1013 {60, 0},
1014 {59, 0},
1015 }
1016};
1017
1018static int
1019il4965_fill_txpower_tbl(struct il_priv *il, u8 band, u16 channel, u8 is_ht40,
1020 u8 ctrl_chan_high,
1021 struct il4965_tx_power_db *tx_power_tbl)
1022{
1023 u8 saturation_power;
1024 s32 target_power;
1025 s32 user_target_power;
1026 s32 power_limit;
1027 s32 current_temp;
1028 s32 reg_limit;
1029 s32 current_regulatory;
1030 s32 txatten_grp = CALIB_CH_GROUP_MAX;
1031 int i;
1032 int c;
1033 const struct il_channel_info *ch_info = NULL;
1034 struct il_eeprom_calib_ch_info ch_eeprom_info;
1035 const struct il_eeprom_calib_measure *measurement;
1036 s16 voltage;
1037 s32 init_voltage;
1038 s32 voltage_compensation;
1039 s32 degrees_per_05db_num;
1040 s32 degrees_per_05db_denom;
1041 s32 factory_temp;
1042 s32 temperature_comp[2];
1043 s32 factory_gain_idx[2];
1044 s32 factory_actual_pwr[2];
1045 s32 power_idx;
1046
1047 /* tx_power_user_lmt is in dBm, convert to half-dBm (half-dB units
1048 * are used for idxing into txpower table) */
1049 user_target_power = 2 * il->tx_power_user_lmt;
1050
1051 /* Get current (RXON) channel, band, width */
1052 D_TXPOWER("chan %d band %d is_ht40 %d\n", channel, band, is_ht40);
1053
1054 ch_info = il_get_channel_info(il, il->band, channel);
1055
1056 if (!il_is_channel_valid(ch_info))
1057 return -EINVAL;
1058
1059 /* get txatten group, used to select 1) thermal txpower adjustment
1060 * and 2) mimo txpower balance between Tx chains. */
1061 txatten_grp = il4965_get_tx_atten_grp(channel);
1062 if (txatten_grp < 0) {
1063 IL_ERR("Can't find txatten group for channel %d.\n", channel);
1064 return txatten_grp;
1065 }
1066
1067 D_TXPOWER("channel %d belongs to txatten group %d\n", channel,
1068 txatten_grp);
1069
1070 if (is_ht40) {
1071 if (ctrl_chan_high)
1072 channel -= 2;
1073 else
1074 channel += 2;
1075 }
1076
1077 /* hardware txpower limits ...
1078 * saturation (clipping distortion) txpowers are in half-dBm */
1079 if (band)
1080 saturation_power = il->calib_info->saturation_power24;
1081 else
1082 saturation_power = il->calib_info->saturation_power52;
1083
1084 if (saturation_power < IL_TX_POWER_SATURATION_MIN ||
1085 saturation_power > IL_TX_POWER_SATURATION_MAX) {
1086 if (band)
1087 saturation_power = IL_TX_POWER_DEFAULT_SATURATION_24;
1088 else
1089 saturation_power = IL_TX_POWER_DEFAULT_SATURATION_52;
1090 }
1091
1092 /* regulatory txpower limits ... reg_limit values are in half-dBm,
1093 * max_power_avg values are in dBm, convert * 2 */
1094 if (is_ht40)
1095 reg_limit = ch_info->ht40_max_power_avg * 2;
1096 else
1097 reg_limit = ch_info->max_power_avg * 2;
1098
1099 if ((reg_limit < IL_TX_POWER_REGULATORY_MIN) ||
1100 (reg_limit > IL_TX_POWER_REGULATORY_MAX)) {
1101 if (band)
1102 reg_limit = IL_TX_POWER_DEFAULT_REGULATORY_24;
1103 else
1104 reg_limit = IL_TX_POWER_DEFAULT_REGULATORY_52;
1105 }
1106
1107 /* Interpolate txpower calibration values for this channel,
1108 * based on factory calibration tests on spaced channels. */
1109 il4965_interpolate_chan(il, channel, &ch_eeprom_info);
1110
1111 /* calculate tx gain adjustment based on power supply voltage */
1112 voltage = le16_to_cpu(il->calib_info->voltage);
1113 init_voltage = (s32) le32_to_cpu(il->card_alive_init.voltage);
1114 voltage_compensation =
1115 il4965_get_voltage_compensation(voltage, init_voltage);
1116
1117 D_TXPOWER("curr volt %d eeprom volt %d volt comp %d\n", init_voltage,
1118 voltage, voltage_compensation);
1119
1120 /* get current temperature (Celsius) */
1121 current_temp = max(il->temperature, IL_TX_POWER_TEMPERATURE_MIN);
1122 current_temp = min(il->temperature, IL_TX_POWER_TEMPERATURE_MAX);
1123 current_temp = KELVIN_TO_CELSIUS(current_temp);
1124
1125 /* select thermal txpower adjustment params, based on channel group
1126 * (same frequency group used for mimo txatten adjustment) */
1127 degrees_per_05db_num =
1128 tx_power_cmp_tble[txatten_grp].degrees_per_05db_a;
1129 degrees_per_05db_denom =
1130 tx_power_cmp_tble[txatten_grp].degrees_per_05db_a_denom;
1131
1132 /* get per-chain txpower values from factory measurements */
1133 for (c = 0; c < 2; c++) {
1134 measurement = &ch_eeprom_info.measurements[c][1];
1135
1136 /* txgain adjustment (in half-dB steps) based on difference
1137 * between factory and current temperature */
1138 factory_temp = measurement->temperature;
1139 il4965_math_div_round((current_temp -
1140 factory_temp) * degrees_per_05db_denom,
1141 degrees_per_05db_num,
1142 &temperature_comp[c]);
1143
1144 factory_gain_idx[c] = measurement->gain_idx;
1145 factory_actual_pwr[c] = measurement->actual_pow;
1146
1147 D_TXPOWER("chain = %d\n", c);
1148 D_TXPOWER("fctry tmp %d, " "curr tmp %d, comp %d steps\n",
1149 factory_temp, current_temp, temperature_comp[c]);
1150
1151 D_TXPOWER("fctry idx %d, fctry pwr %d\n", factory_gain_idx[c],
1152 factory_actual_pwr[c]);
1153 }
1154
1155 /* for each of 33 bit-rates (including 1 for CCK) */
1156 for (i = 0; i < POWER_TBL_NUM_ENTRIES; i++) {
1157 u8 is_mimo_rate;
1158 union il4965_tx_power_dual_stream tx_power;
1159
1160 /* for mimo, reduce each chain's txpower by half
1161 * (3dB, 6 steps), so total output power is regulatory
1162 * compliant. */
1163 if (i & 0x8) {
1164 current_regulatory =
1165 reg_limit -
1166 IL_TX_POWER_MIMO_REGULATORY_COMPENSATION;
1167 is_mimo_rate = 1;
1168 } else {
1169 current_regulatory = reg_limit;
1170 is_mimo_rate = 0;
1171 }
1172
1173 /* find txpower limit, either hardware or regulatory */
1174 power_limit = saturation_power - back_off_table[i];
1175 if (power_limit > current_regulatory)
1176 power_limit = current_regulatory;
1177
1178 /* reduce user's txpower request if necessary
1179 * for this rate on this channel */
1180 target_power = user_target_power;
1181 if (target_power > power_limit)
1182 target_power = power_limit;
1183
1184 D_TXPOWER("rate %d sat %d reg %d usr %d tgt %d\n", i,
1185 saturation_power - back_off_table[i],
1186 current_regulatory, user_target_power, target_power);
1187
1188 /* for each of 2 Tx chains (radio transmitters) */
1189 for (c = 0; c < 2; c++) {
1190 s32 atten_value;
1191
1192 if (is_mimo_rate)
1193 atten_value =
1194 (s32) le32_to_cpu(il->card_alive_init.
1195 tx_atten[txatten_grp][c]);
1196 else
1197 atten_value = 0;
1198
1199 /* calculate idx; higher idx means lower txpower */
1200 power_idx =
1201 (u8) (factory_gain_idx[c] -
1202 (target_power - factory_actual_pwr[c]) -
1203 temperature_comp[c] - voltage_compensation +
1204 atten_value);
1205
1206/* D_TXPOWER("calculated txpower idx %d\n",
1207 power_idx); */
1208
1209 if (power_idx < get_min_power_idx(i, band))
1210 power_idx = get_min_power_idx(i, band);
1211
1212 /* adjust 5 GHz idx to support negative idxes */
1213 if (!band)
1214 power_idx += 9;
1215
1216 /* CCK, rate 32, reduce txpower for CCK */
1217 if (i == POWER_TBL_CCK_ENTRY)
1218 power_idx +=
1219 IL_TX_POWER_CCK_COMPENSATION_C_STEP;
1220
1221 /* stay within the table! */
1222 if (power_idx > 107) {
1223 IL_WARN("txpower idx %d > 107\n", power_idx);
1224 power_idx = 107;
1225 }
1226 if (power_idx < 0) {
1227 IL_WARN("txpower idx %d < 0\n", power_idx);
1228 power_idx = 0;
1229 }
1230
1231 /* fill txpower command for this rate/chain */
1232 tx_power.s.radio_tx_gain[c] =
1233 gain_table[band][power_idx].radio;
1234 tx_power.s.dsp_predis_atten[c] =
1235 gain_table[band][power_idx].dsp;
1236
1237 D_TXPOWER("chain %d mimo %d idx %d "
1238 "gain 0x%02x dsp %d\n", c, atten_value,
1239 power_idx, tx_power.s.radio_tx_gain[c],
1240 tx_power.s.dsp_predis_atten[c]);
1241 } /* for each chain */
1242
1243 tx_power_tbl->power_tbl[i].dw = cpu_to_le32(tx_power.dw);
1244
1245 } /* for each rate */
1246
1247 return 0;
1248}
1249
1250/**
1251 * il4965_send_tx_power - Configure the TXPOWER level user limit
1252 *
1253 * Uses the active RXON for channel, band, and characteristics (ht40, high)
1254 * The power limit is taken from il->tx_power_user_lmt.
1255 */
1256static int
1257il4965_send_tx_power(struct il_priv *il)
1258{
1259 struct il4965_txpowertable_cmd cmd = { 0 };
1260 int ret;
1261 u8 band = 0;
1262 bool is_ht40 = false;
1263 u8 ctrl_chan_high = 0;
1264
1265 if (WARN_ONCE
1266 (test_bit(S_SCAN_HW, &il->status),
1267 "TX Power requested while scanning!\n"))
1268 return -EAGAIN;
1269
1270 band = il->band == IEEE80211_BAND_2GHZ;
1271
1272 is_ht40 = iw4965_is_ht40_channel(il->active.flags);
1273
1274 if (is_ht40 && (il->active.flags & RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK))
1275 ctrl_chan_high = 1;
1276
1277 cmd.band = band;
1278 cmd.channel = il->active.channel;
1279
1280 ret =
1281 il4965_fill_txpower_tbl(il, band, le16_to_cpu(il->active.channel),
1282 is_ht40, ctrl_chan_high, &cmd.tx_power);
1283 if (ret)
1284 goto out;
1285
1286 ret = il_send_cmd_pdu(il, C_TX_PWR_TBL, sizeof(cmd), &cmd);
1287
1288out:
1289 return ret;
1290}
1291
1292static int
1293il4965_send_rxon_assoc(struct il_priv *il)
1294{
1295 int ret = 0;
1296 struct il4965_rxon_assoc_cmd rxon_assoc;
1297 const struct il_rxon_cmd *rxon1 = &il->staging;
1298 const struct il_rxon_cmd *rxon2 = &il->active;
1299
1300 if (rxon1->flags == rxon2->flags &&
1301 rxon1->filter_flags == rxon2->filter_flags &&
1302 rxon1->cck_basic_rates == rxon2->cck_basic_rates &&
1303 rxon1->ofdm_ht_single_stream_basic_rates ==
1304 rxon2->ofdm_ht_single_stream_basic_rates &&
1305 rxon1->ofdm_ht_dual_stream_basic_rates ==
1306 rxon2->ofdm_ht_dual_stream_basic_rates &&
1307 rxon1->rx_chain == rxon2->rx_chain &&
1308 rxon1->ofdm_basic_rates == rxon2->ofdm_basic_rates) {
1309 D_INFO("Using current RXON_ASSOC. Not resending.\n");
1310 return 0;
1311 }
1312
1313 rxon_assoc.flags = il->staging.flags;
1314 rxon_assoc.filter_flags = il->staging.filter_flags;
1315 rxon_assoc.ofdm_basic_rates = il->staging.ofdm_basic_rates;
1316 rxon_assoc.cck_basic_rates = il->staging.cck_basic_rates;
1317 rxon_assoc.reserved = 0;
1318 rxon_assoc.ofdm_ht_single_stream_basic_rates =
1319 il->staging.ofdm_ht_single_stream_basic_rates;
1320 rxon_assoc.ofdm_ht_dual_stream_basic_rates =
1321 il->staging.ofdm_ht_dual_stream_basic_rates;
1322 rxon_assoc.rx_chain_select_flags = il->staging.rx_chain;
1323
1324 ret =
1325 il_send_cmd_pdu_async(il, C_RXON_ASSOC, sizeof(rxon_assoc),
1326 &rxon_assoc, NULL);
1327
1328 return ret;
1329}
1330
1331static int
1332il4965_commit_rxon(struct il_priv *il)
1333{
1334 /* cast away the const for active_rxon in this function */
1335 struct il_rxon_cmd *active_rxon = (void *)&il->active;
1336 int ret;
1337 bool new_assoc = !!(il->staging.filter_flags & RXON_FILTER_ASSOC_MSK);
1338
1339 if (!il_is_alive(il))
1340 return -EBUSY;
1341
1342 /* always get timestamp with Rx frame */
1343 il->staging.flags |= RXON_FLG_TSF2HOST_MSK;
1344
1345 ret = il_check_rxon_cmd(il);
1346 if (ret) {
1347 IL_ERR("Invalid RXON configuration. Not committing.\n");
1348 return -EINVAL;
1349 }
1350
1351 /*
1352 * receive commit_rxon request
1353 * abort any previous channel switch if still in process
1354 */
1355 if (test_bit(S_CHANNEL_SWITCH_PENDING, &il->status) &&
1356 il->switch_channel != il->staging.channel) {
1357 D_11H("abort channel switch on %d\n",
1358 le16_to_cpu(il->switch_channel));
1359 il_chswitch_done(il, false);
1360 }
1361
1362 /* If we don't need to send a full RXON, we can use
1363 * il_rxon_assoc_cmd which is used to reconfigure filter
1364 * and other flags for the current radio configuration. */
1365 if (!il_full_rxon_required(il)) {
1366 ret = il_send_rxon_assoc(il);
1367 if (ret) {
1368 IL_ERR("Error setting RXON_ASSOC (%d)\n", ret);
1369 return ret;
1370 }
1371
1372 memcpy(active_rxon, &il->staging, sizeof(*active_rxon));
1373 il_print_rx_config_cmd(il);
1374 /*
1375 * We do not commit tx power settings while channel changing,
1376 * do it now if tx power changed.
1377 */
1378 il_set_tx_power(il, il->tx_power_next, false);
1379 return 0;
1380 }
1381
1382 /* If we are currently associated and the new config requires
1383 * an RXON_ASSOC and the new config wants the associated mask enabled,
1384 * we must clear the associated from the active configuration
1385 * before we apply the new config */
1386 if (il_is_associated(il) && new_assoc) {
1387 D_INFO("Toggling associated bit on current RXON\n");
1388 active_rxon->filter_flags &= ~RXON_FILTER_ASSOC_MSK;
1389
1390 ret =
1391 il_send_cmd_pdu(il, C_RXON,
1392 sizeof(struct il_rxon_cmd), active_rxon);
1393
1394 /* If the mask clearing failed then we set
1395 * active_rxon back to what it was previously */
1396 if (ret) {
1397 active_rxon->filter_flags |= RXON_FILTER_ASSOC_MSK;
1398 IL_ERR("Error clearing ASSOC_MSK (%d)\n", ret);
1399 return ret;
1400 }
1401 il_clear_ucode_stations(il);
1402 il_restore_stations(il);
1403 ret = il4965_restore_default_wep_keys(il);
1404 if (ret) {
1405 IL_ERR("Failed to restore WEP keys (%d)\n", ret);
1406 return ret;
1407 }
1408 }
1409
1410 D_INFO("Sending RXON\n" "* with%s RXON_FILTER_ASSOC_MSK\n"
1411 "* channel = %d\n" "* bssid = %pM\n", (new_assoc ? "" : "out"),
1412 le16_to_cpu(il->staging.channel), il->staging.bssid_addr);
1413
1414 il_set_rxon_hwcrypto(il, !il->cfg->mod_params->sw_crypto);
1415
1416 /* Apply the new configuration
1417 * RXON unassoc clears the station table in uCode so restoration of
1418 * stations is needed after it (the RXON command) completes
1419 */
1420 if (!new_assoc) {
1421 ret =
1422 il_send_cmd_pdu(il, C_RXON,
1423 sizeof(struct il_rxon_cmd), &il->staging);
1424 if (ret) {
1425 IL_ERR("Error setting new RXON (%d)\n", ret);
1426 return ret;
1427 }
1428 D_INFO("Return from !new_assoc RXON.\n");
1429 memcpy(active_rxon, &il->staging, sizeof(*active_rxon));
1430 il_clear_ucode_stations(il);
1431 il_restore_stations(il);
1432 ret = il4965_restore_default_wep_keys(il);
1433 if (ret) {
1434 IL_ERR("Failed to restore WEP keys (%d)\n", ret);
1435 return ret;
1436 }
1437 }
1438 if (new_assoc) {
1439 il->start_calib = 0;
1440 /* Apply the new configuration
1441 * RXON assoc doesn't clear the station table in uCode,
1442 */
1443 ret =
1444 il_send_cmd_pdu(il, C_RXON,
1445 sizeof(struct il_rxon_cmd), &il->staging);
1446 if (ret) {
1447 IL_ERR("Error setting new RXON (%d)\n", ret);
1448 return ret;
1449 }
1450 memcpy(active_rxon, &il->staging, sizeof(*active_rxon));
1451 }
1452 il_print_rx_config_cmd(il);
1453
1454 il4965_init_sensitivity(il);
1455
1456 /* If we issue a new RXON command which required a tune then we must
1457 * send a new TXPOWER command or we won't be able to Tx any frames */
1458 ret = il_set_tx_power(il, il->tx_power_next, true);
1459 if (ret) {
1460 IL_ERR("Error sending TX power (%d)\n", ret);
1461 return ret;
1462 }
1463
1464 return 0;
1465}
1466
1467static int
1468il4965_hw_channel_switch(struct il_priv *il,
1469 struct ieee80211_channel_switch *ch_switch)
1470{
1471 int rc;
1472 u8 band = 0;
1473 bool is_ht40 = false;
1474 u8 ctrl_chan_high = 0;
1475 struct il4965_channel_switch_cmd cmd;
1476 const struct il_channel_info *ch_info;
1477 u32 switch_time_in_usec, ucode_switch_time;
1478 u16 ch;
1479 u32 tsf_low;
1480 u8 switch_count;
1481 u16 beacon_interval = le16_to_cpu(il->timing.beacon_interval);
1482 struct ieee80211_vif *vif = il->vif;
1483 band = (il->band == IEEE80211_BAND_2GHZ);
1484
1485 if (WARN_ON_ONCE(vif == NULL))
1486 return -EIO;
1487
1488 is_ht40 = iw4965_is_ht40_channel(il->staging.flags);
1489
1490 if (is_ht40 && (il->staging.flags & RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK))
1491 ctrl_chan_high = 1;
1492
1493 cmd.band = band;
1494 cmd.expect_beacon = 0;
1495 ch = ch_switch->chandef.chan->hw_value;
1496 cmd.channel = cpu_to_le16(ch);
1497 cmd.rxon_flags = il->staging.flags;
1498 cmd.rxon_filter_flags = il->staging.filter_flags;
1499 switch_count = ch_switch->count;
1500 tsf_low = ch_switch->timestamp & 0x0ffffffff;
1501 /*
1502 * calculate the ucode channel switch time
1503 * adding TSF as one of the factor for when to switch
1504 */
1505 if (il->ucode_beacon_time > tsf_low && beacon_interval) {
1506 if (switch_count >
1507 ((il->ucode_beacon_time - tsf_low) / beacon_interval)) {
1508 switch_count -=
1509 (il->ucode_beacon_time - tsf_low) / beacon_interval;
1510 } else
1511 switch_count = 0;
1512 }
1513 if (switch_count <= 1)
1514 cmd.switch_time = cpu_to_le32(il->ucode_beacon_time);
1515 else {
1516 switch_time_in_usec =
1517 vif->bss_conf.beacon_int * switch_count * TIME_UNIT;
1518 ucode_switch_time =
1519 il_usecs_to_beacons(il, switch_time_in_usec,
1520 beacon_interval);
1521 cmd.switch_time =
1522 il_add_beacon_time(il, il->ucode_beacon_time,
1523 ucode_switch_time, beacon_interval);
1524 }
1525 D_11H("uCode time for the switch is 0x%x\n", cmd.switch_time);
1526 ch_info = il_get_channel_info(il, il->band, ch);
1527 if (ch_info)
1528 cmd.expect_beacon = il_is_channel_radar(ch_info);
1529 else {
1530 IL_ERR("invalid channel switch from %u to %u\n",
1531 il->active.channel, ch);
1532 return -EFAULT;
1533 }
1534
1535 rc = il4965_fill_txpower_tbl(il, band, ch, is_ht40, ctrl_chan_high,
1536 &cmd.tx_power);
1537 if (rc) {
1538 D_11H("error:%d fill txpower_tbl\n", rc);
1539 return rc;
1540 }
1541
1542 return il_send_cmd_pdu(il, C_CHANNEL_SWITCH, sizeof(cmd), &cmd);
1543}
1544
1545/**
1546 * il4965_txq_update_byte_cnt_tbl - Set up entry in Tx byte-count array
1547 */
1548static void
1549il4965_txq_update_byte_cnt_tbl(struct il_priv *il, struct il_tx_queue *txq,
1550 u16 byte_cnt)
1551{
1552 struct il4965_scd_bc_tbl *scd_bc_tbl = il->scd_bc_tbls.addr;
1553 int txq_id = txq->q.id;
1554 int write_ptr = txq->q.write_ptr;
1555 int len = byte_cnt + IL_TX_CRC_SIZE + IL_TX_DELIMITER_SIZE;
1556 __le16 bc_ent;
1557
1558 WARN_ON(len > 0xFFF || write_ptr >= TFD_QUEUE_SIZE_MAX);
1559
1560 bc_ent = cpu_to_le16(len & 0xFFF);
1561 /* Set up byte count within first 256 entries */
1562 scd_bc_tbl[txq_id].tfd_offset[write_ptr] = bc_ent;
1563
1564 /* If within first 64 entries, duplicate at end */
1565 if (write_ptr < TFD_QUEUE_SIZE_BC_DUP)
1566 scd_bc_tbl[txq_id].tfd_offset[TFD_QUEUE_SIZE_MAX + write_ptr] =
1567 bc_ent;
1568}
1569
1570/**
1571 * il4965_hw_get_temperature - return the calibrated temperature (in Kelvin)
1572 * @stats: Provides the temperature reading from the uCode
1573 *
1574 * A return of <0 indicates bogus data in the stats
1575 */
1576static int
1577il4965_hw_get_temperature(struct il_priv *il)
1578{
1579 s32 temperature;
1580 s32 vt;
1581 s32 R1, R2, R3;
1582 u32 R4;
1583
1584 if (test_bit(S_TEMPERATURE, &il->status) &&
1585 (il->_4965.stats.flag & STATS_REPLY_FLG_HT40_MODE_MSK)) {
1586 D_TEMP("Running HT40 temperature calibration\n");
1587 R1 = (s32) le32_to_cpu(il->card_alive_init.therm_r1[1]);
1588 R2 = (s32) le32_to_cpu(il->card_alive_init.therm_r2[1]);
1589 R3 = (s32) le32_to_cpu(il->card_alive_init.therm_r3[1]);
1590 R4 = le32_to_cpu(il->card_alive_init.therm_r4[1]);
1591 } else {
1592 D_TEMP("Running temperature calibration\n");
1593 R1 = (s32) le32_to_cpu(il->card_alive_init.therm_r1[0]);
1594 R2 = (s32) le32_to_cpu(il->card_alive_init.therm_r2[0]);
1595 R3 = (s32) le32_to_cpu(il->card_alive_init.therm_r3[0]);
1596 R4 = le32_to_cpu(il->card_alive_init.therm_r4[0]);
1597 }
1598
1599 /*
1600 * Temperature is only 23 bits, so sign extend out to 32.
1601 *
1602 * NOTE If we haven't received a stats notification yet
1603 * with an updated temperature, use R4 provided to us in the
1604 * "initialize" ALIVE response.
1605 */
1606 if (!test_bit(S_TEMPERATURE, &il->status))
1607 vt = sign_extend32(R4, 23);
1608 else
1609 vt = sign_extend32(le32_to_cpu
1610 (il->_4965.stats.general.common.temperature),
1611 23);
1612
1613 D_TEMP("Calib values R[1-3]: %d %d %d R4: %d\n", R1, R2, R3, vt);
1614
1615 if (R3 == R1) {
1616 IL_ERR("Calibration conflict R1 == R3\n");
1617 return -1;
1618 }
1619
1620 /* Calculate temperature in degrees Kelvin, adjust by 97%.
1621 * Add offset to center the adjustment around 0 degrees Centigrade. */
1622 temperature = TEMPERATURE_CALIB_A_VAL * (vt - R2);
1623 temperature /= (R3 - R1);
1624 temperature =
1625 (temperature * 97) / 100 + TEMPERATURE_CALIB_KELVIN_OFFSET;
1626
1627 D_TEMP("Calibrated temperature: %dK, %dC\n", temperature,
1628 KELVIN_TO_CELSIUS(temperature));
1629
1630 return temperature;
1631}
1632
1633/* Adjust Txpower only if temperature variance is greater than threshold. */
1634#define IL_TEMPERATURE_THRESHOLD 3
1635
1636/**
1637 * il4965_is_temp_calib_needed - determines if new calibration is needed
1638 *
1639 * If the temperature changed has changed sufficiently, then a recalibration
1640 * is needed.
1641 *
1642 * Assumes caller will replace il->last_temperature once calibration
1643 * executed.
1644 */
1645static int
1646il4965_is_temp_calib_needed(struct il_priv *il)
1647{
1648 int temp_diff;
1649
1650 if (!test_bit(S_STATS, &il->status)) {
1651 D_TEMP("Temperature not updated -- no stats.\n");
1652 return 0;
1653 }
1654
1655 temp_diff = il->temperature - il->last_temperature;
1656
1657 /* get absolute value */
1658 if (temp_diff < 0) {
1659 D_POWER("Getting cooler, delta %d\n", temp_diff);
1660 temp_diff = -temp_diff;
1661 } else if (temp_diff == 0)
1662 D_POWER("Temperature unchanged\n");
1663 else
1664 D_POWER("Getting warmer, delta %d\n", temp_diff);
1665
1666 if (temp_diff < IL_TEMPERATURE_THRESHOLD) {
1667 D_POWER(" => thermal txpower calib not needed\n");
1668 return 0;
1669 }
1670
1671 D_POWER(" => thermal txpower calib needed\n");
1672
1673 return 1;
1674}
1675
1676void
1677il4965_temperature_calib(struct il_priv *il)
1678{
1679 s32 temp;
1680
1681 temp = il4965_hw_get_temperature(il);
1682 if (IL_TX_POWER_TEMPERATURE_OUT_OF_RANGE(temp))
1683 return;
1684
1685 if (il->temperature != temp) {
1686 if (il->temperature)
1687 D_TEMP("Temperature changed " "from %dC to %dC\n",
1688 KELVIN_TO_CELSIUS(il->temperature),
1689 KELVIN_TO_CELSIUS(temp));
1690 else
1691 D_TEMP("Temperature " "initialized to %dC\n",
1692 KELVIN_TO_CELSIUS(temp));
1693 }
1694
1695 il->temperature = temp;
1696 set_bit(S_TEMPERATURE, &il->status);
1697
1698 if (!il->disable_tx_power_cal &&
1699 unlikely(!test_bit(S_SCANNING, &il->status)) &&
1700 il4965_is_temp_calib_needed(il))
1701 queue_work(il->workqueue, &il->txpower_work);
1702}
1703
1704static u16
1705il4965_get_hcmd_size(u8 cmd_id, u16 len)
1706{
1707 switch (cmd_id) {
1708 case C_RXON:
1709 return (u16) sizeof(struct il4965_rxon_cmd);
1710 default:
1711 return len;
1712 }
1713}
1714
1715static u16
1716il4965_build_addsta_hcmd(const struct il_addsta_cmd *cmd, u8 * data)
1717{
1718 struct il4965_addsta_cmd *addsta = (struct il4965_addsta_cmd *)data;
1719 addsta->mode = cmd->mode;
1720 memcpy(&addsta->sta, &cmd->sta, sizeof(struct sta_id_modify));
1721 memcpy(&addsta->key, &cmd->key, sizeof(struct il4965_keyinfo));
1722 addsta->station_flags = cmd->station_flags;
1723 addsta->station_flags_msk = cmd->station_flags_msk;
1724 addsta->tid_disable_tx = cmd->tid_disable_tx;
1725 addsta->add_immediate_ba_tid = cmd->add_immediate_ba_tid;
1726 addsta->remove_immediate_ba_tid = cmd->remove_immediate_ba_tid;
1727 addsta->add_immediate_ba_ssn = cmd->add_immediate_ba_ssn;
1728 addsta->sleep_tx_count = cmd->sleep_tx_count;
1729 addsta->reserved1 = cpu_to_le16(0);
1730 addsta->reserved2 = cpu_to_le16(0);
1731
1732 return (u16) sizeof(struct il4965_addsta_cmd);
1733}
1734
1735static void
1736il4965_post_scan(struct il_priv *il)
1737{
1738 /*
1739 * Since setting the RXON may have been deferred while
1740 * performing the scan, fire one off if needed
1741 */
1742 if (memcmp(&il->staging, &il->active, sizeof(il->staging)))
1743 il_commit_rxon(il);
1744}
1745
1746static void
1747il4965_post_associate(struct il_priv *il)
1748{
1749 struct ieee80211_vif *vif = il->vif;
1750 int ret = 0;
1751
1752 if (!vif || !il->is_open)
1753 return;
1754
1755 if (test_bit(S_EXIT_PENDING, &il->status))
1756 return;
1757
1758 il_scan_cancel_timeout(il, 200);
1759
1760 il->staging.filter_flags &= ~RXON_FILTER_ASSOC_MSK;
1761 il_commit_rxon(il);
1762
1763 ret = il_send_rxon_timing(il);
1764 if (ret)
1765 IL_WARN("RXON timing - " "Attempting to continue.\n");
1766
1767 il->staging.filter_flags |= RXON_FILTER_ASSOC_MSK;
1768
1769 il_set_rxon_ht(il, &il->current_ht_config);
1770
1771 if (il->ops->set_rxon_chain)
1772 il->ops->set_rxon_chain(il);
1773
1774 il->staging.assoc_id = cpu_to_le16(vif->bss_conf.aid);
1775
1776 D_ASSOC("assoc id %d beacon interval %d\n", vif->bss_conf.aid,
1777 vif->bss_conf.beacon_int);
1778
1779 if (vif->bss_conf.use_short_preamble)
1780 il->staging.flags |= RXON_FLG_SHORT_PREAMBLE_MSK;
1781 else
1782 il->staging.flags &= ~RXON_FLG_SHORT_PREAMBLE_MSK;
1783
1784 if (il->staging.flags & RXON_FLG_BAND_24G_MSK) {
1785 if (vif->bss_conf.use_short_slot)
1786 il->staging.flags |= RXON_FLG_SHORT_SLOT_MSK;
1787 else
1788 il->staging.flags &= ~RXON_FLG_SHORT_SLOT_MSK;
1789 }
1790
1791 il_commit_rxon(il);
1792
1793 D_ASSOC("Associated as %d to: %pM\n", vif->bss_conf.aid,
1794 il->active.bssid_addr);
1795
1796 switch (vif->type) {
1797 case NL80211_IFTYPE_STATION:
1798 break;
1799 case NL80211_IFTYPE_ADHOC:
1800 il4965_send_beacon_cmd(il);
1801 break;
1802 default:
1803 IL_ERR("%s Should not be called in %d mode\n", __func__,
1804 vif->type);
1805 break;
1806 }
1807
1808 /* the chain noise calibration will enabled PM upon completion
1809 * If chain noise has already been run, then we need to enable
1810 * power management here */
1811 if (il->chain_noise_data.state == IL_CHAIN_NOISE_DONE)
1812 il_power_update_mode(il, false);
1813
1814 /* Enable Rx differential gain and sensitivity calibrations */
1815 il4965_chain_noise_reset(il);
1816 il->start_calib = 1;
1817}
1818
1819static void
1820il4965_config_ap(struct il_priv *il)
1821{
1822 struct ieee80211_vif *vif = il->vif;
1823 int ret = 0;
1824
1825 lockdep_assert_held(&il->mutex);
1826
1827 if (test_bit(S_EXIT_PENDING, &il->status))
1828 return;
1829
1830 /* The following should be done only at AP bring up */
1831 if (!il_is_associated(il)) {
1832
1833 /* RXON - unassoc (to set timing command) */
1834 il->staging.filter_flags &= ~RXON_FILTER_ASSOC_MSK;
1835 il_commit_rxon(il);
1836
1837 /* RXON Timing */
1838 ret = il_send_rxon_timing(il);
1839 if (ret)
1840 IL_WARN("RXON timing failed - "
1841 "Attempting to continue.\n");
1842
1843 /* AP has all antennas */
1844 il->chain_noise_data.active_chains = il->hw_params.valid_rx_ant;
1845 il_set_rxon_ht(il, &il->current_ht_config);
1846 if (il->ops->set_rxon_chain)
1847 il->ops->set_rxon_chain(il);
1848
1849 il->staging.assoc_id = 0;
1850
1851 if (vif->bss_conf.use_short_preamble)
1852 il->staging.flags |= RXON_FLG_SHORT_PREAMBLE_MSK;
1853 else
1854 il->staging.flags &= ~RXON_FLG_SHORT_PREAMBLE_MSK;
1855
1856 if (il->staging.flags & RXON_FLG_BAND_24G_MSK) {
1857 if (vif->bss_conf.use_short_slot)
1858 il->staging.flags |= RXON_FLG_SHORT_SLOT_MSK;
1859 else
1860 il->staging.flags &= ~RXON_FLG_SHORT_SLOT_MSK;
1861 }
1862 /* need to send beacon cmd before committing assoc RXON! */
1863 il4965_send_beacon_cmd(il);
1864 /* restore RXON assoc */
1865 il->staging.filter_flags |= RXON_FILTER_ASSOC_MSK;
1866 il_commit_rxon(il);
1867 }
1868 il4965_send_beacon_cmd(il);
1869}
1870
1871const struct il_ops il4965_ops = {
1872 .txq_update_byte_cnt_tbl = il4965_txq_update_byte_cnt_tbl,
1873 .txq_attach_buf_to_tfd = il4965_hw_txq_attach_buf_to_tfd,
1874 .txq_free_tfd = il4965_hw_txq_free_tfd,
1875 .txq_init = il4965_hw_tx_queue_init,
1876 .is_valid_rtc_data_addr = il4965_hw_valid_rtc_data_addr,
1877 .init_alive_start = il4965_init_alive_start,
1878 .load_ucode = il4965_load_bsm,
1879 .dump_nic_error_log = il4965_dump_nic_error_log,
1880 .dump_fh = il4965_dump_fh,
1881 .set_channel_switch = il4965_hw_channel_switch,
1882 .apm_init = il_apm_init,
1883 .send_tx_power = il4965_send_tx_power,
1884 .update_chain_flags = il4965_update_chain_flags,
1885 .eeprom_acquire_semaphore = il4965_eeprom_acquire_semaphore,
1886 .eeprom_release_semaphore = il4965_eeprom_release_semaphore,
1887
1888 .rxon_assoc = il4965_send_rxon_assoc,
1889 .commit_rxon = il4965_commit_rxon,
1890 .set_rxon_chain = il4965_set_rxon_chain,
1891
1892 .get_hcmd_size = il4965_get_hcmd_size,
1893 .build_addsta_hcmd = il4965_build_addsta_hcmd,
1894 .request_scan = il4965_request_scan,
1895 .post_scan = il4965_post_scan,
1896
1897 .post_associate = il4965_post_associate,
1898 .config_ap = il4965_config_ap,
1899 .manage_ibss_station = il4965_manage_ibss_station,
1900 .update_bcast_stations = il4965_update_bcast_stations,
1901
1902 .send_led_cmd = il4965_send_led_cmd,
1903};
1904
1905struct il_cfg il4965_cfg = {
1906 .name = "Intel(R) Wireless WiFi Link 4965AGN",
1907 .fw_name_pre = IL4965_FW_PRE,
1908 .ucode_api_max = IL4965_UCODE_API_MAX,
1909 .ucode_api_min = IL4965_UCODE_API_MIN,
1910 .sku = IL_SKU_A | IL_SKU_G | IL_SKU_N,
1911 .valid_tx_ant = ANT_AB,
1912 .valid_rx_ant = ANT_ABC,
1913 .eeprom_ver = EEPROM_4965_EEPROM_VERSION,
1914 .eeprom_calib_ver = EEPROM_4965_TX_POWER_VERSION,
1915 .mod_params = &il4965_mod_params,
1916 .led_mode = IL_LED_BLINK,
1917 /*
1918 * Force use of chains B and C for scan RX on 5 GHz band
1919 * because the device has off-channel reception on chain A.
1920 */
1921 .scan_rx_antennas[IEEE80211_BAND_5GHZ] = ANT_BC,
1922
1923 .eeprom_size = IL4965_EEPROM_IMG_SIZE,
1924 .num_of_queues = IL49_NUM_QUEUES,
1925 .num_of_ampdu_queues = IL49_NUM_AMPDU_QUEUES,
1926 .pll_cfg_val = 0,
1927 .set_l0s = true,
1928 .use_bsm = true,
1929 .led_compensation = 61,
1930 .chain_noise_num_beacons = IL4965_CAL_NUM_BEACONS,
1931 .wd_timeout = IL_DEF_WD_TIMEOUT,
1932 .temperature_kelvin = true,
1933 .ucode_tracing = true,
1934 .sensitivity_calib_by_driver = true,
1935 .chain_noise_calib_by_driver = true,
1936
1937 .regulatory_bands = {
1938 EEPROM_REGULATORY_BAND_1_CHANNELS,
1939 EEPROM_REGULATORY_BAND_2_CHANNELS,
1940 EEPROM_REGULATORY_BAND_3_CHANNELS,
1941 EEPROM_REGULATORY_BAND_4_CHANNELS,
1942 EEPROM_REGULATORY_BAND_5_CHANNELS,
1943 EEPROM_4965_REGULATORY_BAND_24_HT40_CHANNELS,
1944 EEPROM_4965_REGULATORY_BAND_52_HT40_CHANNELS
1945 },
1946
1947};
1948
1949/* Module firmware */
1950MODULE_FIRMWARE(IL4965_MODULE_FIRMWARE(IL4965_UCODE_API_MAX));
diff --git a/drivers/net/wireless/intel/iwlegacy/4965.h b/drivers/net/wireless/intel/iwlegacy/4965.h
new file mode 100644
index 000000000000..8ab8706f9422
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlegacy/4965.h
@@ -0,0 +1,1285 @@
1/******************************************************************************
2 *
3 * GPL LICENSE SUMMARY
4 *
5 * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of version 2 of the GNU General Public License as
9 * published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful, but
12 * WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
19 * USA
20 *
21 * The full GNU General Public License is included in this distribution
22 * in the file called LICENSE.GPL.
23 *
24 * Contact Information:
25 * Intel Linux Wireless <ilw@linux.intel.com>
26 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
27 *
28 *****************************************************************************/
29
30#ifndef __il_4965_h__
31#define __il_4965_h__
32
33struct il_rx_queue;
34struct il_rx_buf;
35struct il_rx_pkt;
36struct il_tx_queue;
37struct il_rxon_context;
38
39/* configuration for the _4965 devices */
40extern struct il_cfg il4965_cfg;
41extern const struct il_ops il4965_ops;
42
43extern struct il_mod_params il4965_mod_params;
44
45/* tx queue */
46void il4965_free_tfds_in_queue(struct il_priv *il, int sta_id, int tid,
47 int freed);
48
49/* RXON */
50void il4965_set_rxon_chain(struct il_priv *il);
51
52/* uCode */
53int il4965_verify_ucode(struct il_priv *il);
54
55/* lib */
56void il4965_check_abort_status(struct il_priv *il, u8 frame_count, u32 status);
57
58void il4965_rx_queue_reset(struct il_priv *il, struct il_rx_queue *rxq);
59int il4965_rx_init(struct il_priv *il, struct il_rx_queue *rxq);
60int il4965_hw_nic_init(struct il_priv *il);
61int il4965_dump_fh(struct il_priv *il, char **buf, bool display);
62
63void il4965_nic_config(struct il_priv *il);
64
65/* rx */
66void il4965_rx_queue_restock(struct il_priv *il);
67void il4965_rx_replenish(struct il_priv *il);
68void il4965_rx_replenish_now(struct il_priv *il);
69void il4965_rx_queue_free(struct il_priv *il, struct il_rx_queue *rxq);
70int il4965_rxq_stop(struct il_priv *il);
71int il4965_hwrate_to_mac80211_idx(u32 rate_n_flags, enum ieee80211_band band);
72void il4965_rx_handle(struct il_priv *il);
73
74/* tx */
75void il4965_hw_txq_free_tfd(struct il_priv *il, struct il_tx_queue *txq);
76int il4965_hw_txq_attach_buf_to_tfd(struct il_priv *il, struct il_tx_queue *txq,
77 dma_addr_t addr, u16 len, u8 reset, u8 pad);
78int il4965_hw_tx_queue_init(struct il_priv *il, struct il_tx_queue *txq);
79void il4965_hwrate_to_tx_control(struct il_priv *il, u32 rate_n_flags,
80 struct ieee80211_tx_info *info);
81int il4965_tx_skb(struct il_priv *il,
82 struct ieee80211_sta *sta,
83 struct sk_buff *skb);
84int il4965_tx_agg_start(struct il_priv *il, struct ieee80211_vif *vif,
85 struct ieee80211_sta *sta, u16 tid, u16 * ssn);
86int il4965_tx_agg_stop(struct il_priv *il, struct ieee80211_vif *vif,
87 struct ieee80211_sta *sta, u16 tid);
88int il4965_txq_check_empty(struct il_priv *il, int sta_id, u8 tid, int txq_id);
89int il4965_tx_queue_reclaim(struct il_priv *il, int txq_id, int idx);
90void il4965_hw_txq_ctx_free(struct il_priv *il);
91int il4965_txq_ctx_alloc(struct il_priv *il);
92void il4965_txq_ctx_reset(struct il_priv *il);
93void il4965_txq_ctx_stop(struct il_priv *il);
94void il4965_txq_set_sched(struct il_priv *il, u32 mask);
95
96/*
97 * Acquire il->lock before calling this function !
98 */
99void il4965_set_wr_ptrs(struct il_priv *il, int txq_id, u32 idx);
100/**
101 * il4965_tx_queue_set_status - (optionally) start Tx/Cmd queue
102 * @tx_fifo_id: Tx DMA/FIFO channel (range 0-7) that the queue will feed
103 * @scd_retry: (1) Indicates queue will be used in aggregation mode
104 *
105 * NOTE: Acquire il->lock before calling this function !
106 */
107void il4965_tx_queue_set_status(struct il_priv *il, struct il_tx_queue *txq,
108 int tx_fifo_id, int scd_retry);
109
110/* scan */
111int il4965_request_scan(struct il_priv *il, struct ieee80211_vif *vif);
112
113/* station mgmt */
114int il4965_manage_ibss_station(struct il_priv *il, struct ieee80211_vif *vif,
115 bool add);
116
117/* hcmd */
118int il4965_send_beacon_cmd(struct il_priv *il);
119
120#ifdef CONFIG_IWLEGACY_DEBUG
121const char *il4965_get_tx_fail_reason(u32 status);
122#else
123static inline const char *
124il4965_get_tx_fail_reason(u32 status)
125{
126 return "";
127}
128#endif
129
130/* station management */
131int il4965_alloc_bcast_station(struct il_priv *il);
132int il4965_add_bssid_station(struct il_priv *il, const u8 *addr, u8 *sta_id_r);
133int il4965_remove_default_wep_key(struct il_priv *il,
134 struct ieee80211_key_conf *key);
135int il4965_set_default_wep_key(struct il_priv *il,
136 struct ieee80211_key_conf *key);
137int il4965_restore_default_wep_keys(struct il_priv *il);
138int il4965_set_dynamic_key(struct il_priv *il,
139 struct ieee80211_key_conf *key, u8 sta_id);
140int il4965_remove_dynamic_key(struct il_priv *il,
141 struct ieee80211_key_conf *key, u8 sta_id);
142void il4965_update_tkip_key(struct il_priv *il,
143 struct ieee80211_key_conf *keyconf,
144 struct ieee80211_sta *sta, u32 iv32,
145 u16 *phase1key);
146int il4965_sta_tx_modify_enable_tid(struct il_priv *il, int sta_id, int tid);
147int il4965_sta_rx_agg_start(struct il_priv *il, struct ieee80211_sta *sta,
148 int tid, u16 ssn);
149int il4965_sta_rx_agg_stop(struct il_priv *il, struct ieee80211_sta *sta,
150 int tid);
151void il4965_sta_modify_sleep_tx_count(struct il_priv *il, int sta_id, int cnt);
152int il4965_update_bcast_stations(struct il_priv *il);
153
154/* rate */
155static inline u8
156il4965_hw_get_rate(__le32 rate_n_flags)
157{
158 return le32_to_cpu(rate_n_flags) & 0xFF;
159}
160
161/* eeprom */
162void il4965_eeprom_get_mac(const struct il_priv *il, u8 * mac);
163int il4965_eeprom_acquire_semaphore(struct il_priv *il);
164void il4965_eeprom_release_semaphore(struct il_priv *il);
165int il4965_eeprom_check_version(struct il_priv *il);
166
167/* mac80211 handlers (for 4965) */
168void il4965_mac_tx(struct ieee80211_hw *hw,
169 struct ieee80211_tx_control *control,
170 struct sk_buff *skb);
171int il4965_mac_start(struct ieee80211_hw *hw);
172void il4965_mac_stop(struct ieee80211_hw *hw);
173void il4965_configure_filter(struct ieee80211_hw *hw,
174 unsigned int changed_flags,
175 unsigned int *total_flags, u64 multicast);
176int il4965_mac_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
177 struct ieee80211_vif *vif, struct ieee80211_sta *sta,
178 struct ieee80211_key_conf *key);
179void il4965_mac_update_tkip_key(struct ieee80211_hw *hw,
180 struct ieee80211_vif *vif,
181 struct ieee80211_key_conf *keyconf,
182 struct ieee80211_sta *sta, u32 iv32,
183 u16 *phase1key);
184int il4965_mac_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
185 enum ieee80211_ampdu_mlme_action action,
186 struct ieee80211_sta *sta, u16 tid, u16 * ssn,
187 u8 buf_size, bool amsdu);
188int il4965_mac_sta_add(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
189 struct ieee80211_sta *sta);
190void
191il4965_mac_channel_switch(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
192 struct ieee80211_channel_switch *ch_switch);
193
194void il4965_led_enable(struct il_priv *il);
195
196/* EEPROM */
197#define IL4965_EEPROM_IMG_SIZE 1024
198
199/*
200 * uCode queue management definitions ...
201 * The first queue used for block-ack aggregation is #7 (4965 only).
202 * All block-ack aggregation queues should map to Tx DMA/FIFO channel 7.
203 */
204#define IL49_FIRST_AMPDU_QUEUE 7
205
206/* Sizes and addresses for instruction and data memory (SRAM) in
207 * 4965's embedded processor. Driver access is via HBUS_TARG_MEM_* regs. */
208#define IL49_RTC_INST_LOWER_BOUND (0x000000)
209#define IL49_RTC_INST_UPPER_BOUND (0x018000)
210
211#define IL49_RTC_DATA_LOWER_BOUND (0x800000)
212#define IL49_RTC_DATA_UPPER_BOUND (0x80A000)
213
214#define IL49_RTC_INST_SIZE (IL49_RTC_INST_UPPER_BOUND - \
215 IL49_RTC_INST_LOWER_BOUND)
216#define IL49_RTC_DATA_SIZE (IL49_RTC_DATA_UPPER_BOUND - \
217 IL49_RTC_DATA_LOWER_BOUND)
218
219#define IL49_MAX_INST_SIZE IL49_RTC_INST_SIZE
220#define IL49_MAX_DATA_SIZE IL49_RTC_DATA_SIZE
221
222/* Size of uCode instruction memory in bootstrap state machine */
223#define IL49_MAX_BSM_SIZE BSM_SRAM_SIZE
224
225static inline int
226il4965_hw_valid_rtc_data_addr(u32 addr)
227{
228 return (addr >= IL49_RTC_DATA_LOWER_BOUND &&
229 addr < IL49_RTC_DATA_UPPER_BOUND);
230}
231
232/********************* START TEMPERATURE *************************************/
233
234/**
235 * 4965 temperature calculation.
236 *
237 * The driver must calculate the device temperature before calculating
238 * a txpower setting (amplifier gain is temperature dependent). The
239 * calculation uses 4 measurements, 3 of which (R1, R2, R3) are calibration
240 * values used for the life of the driver, and one of which (R4) is the
241 * real-time temperature indicator.
242 *
243 * uCode provides all 4 values to the driver via the "initialize alive"
244 * notification (see struct il4965_init_alive_resp). After the runtime uCode
245 * image loads, uCode updates the R4 value via stats notifications
246 * (see N_STATS), which occur after each received beacon
247 * when associated, or can be requested via C_STATS.
248 *
249 * NOTE: uCode provides the R4 value as a 23-bit signed value. Driver
250 * must sign-extend to 32 bits before applying formula below.
251 *
252 * Formula:
253 *
254 * degrees Kelvin = ((97 * 259 * (R4 - R2) / (R3 - R1)) / 100) + 8
255 *
256 * NOTE: The basic formula is 259 * (R4-R2) / (R3-R1). The 97/100 is
257 * an additional correction, which should be centered around 0 degrees
258 * Celsius (273 degrees Kelvin). The 8 (3 percent of 273) compensates for
259 * centering the 97/100 correction around 0 degrees K.
260 *
261 * Add 273 to Kelvin value to find degrees Celsius, for comparing current
262 * temperature with factory-measured temperatures when calculating txpower
263 * settings.
264 */
265#define TEMPERATURE_CALIB_KELVIN_OFFSET 8
266#define TEMPERATURE_CALIB_A_VAL 259
267
268/* Limit range of calculated temperature to be between these Kelvin values */
269#define IL_TX_POWER_TEMPERATURE_MIN (263)
270#define IL_TX_POWER_TEMPERATURE_MAX (410)
271
272#define IL_TX_POWER_TEMPERATURE_OUT_OF_RANGE(t) \
273 ((t) < IL_TX_POWER_TEMPERATURE_MIN || \
274 (t) > IL_TX_POWER_TEMPERATURE_MAX)
275
276void il4965_temperature_calib(struct il_priv *il);
277/********************* END TEMPERATURE ***************************************/
278
279/********************* START TXPOWER *****************************************/
280
281/**
282 * 4965 txpower calculations rely on information from three sources:
283 *
284 * 1) EEPROM
285 * 2) "initialize" alive notification
286 * 3) stats notifications
287 *
288 * EEPROM data consists of:
289 *
290 * 1) Regulatory information (max txpower and channel usage flags) is provided
291 * separately for each channel that can possibly supported by 4965.
292 * 40 MHz wide (.11n HT40) channels are listed separately from 20 MHz
293 * (legacy) channels.
294 *
295 * See struct il4965_eeprom_channel for format, and struct il4965_eeprom
296 * for locations in EEPROM.
297 *
298 * 2) Factory txpower calibration information is provided separately for
299 * sub-bands of contiguous channels. 2.4GHz has just one sub-band,
300 * but 5 GHz has several sub-bands.
301 *
302 * In addition, per-band (2.4 and 5 Ghz) saturation txpowers are provided.
303 *
304 * See struct il4965_eeprom_calib_info (and the tree of structures
305 * contained within it) for format, and struct il4965_eeprom for
306 * locations in EEPROM.
307 *
308 * "Initialization alive" notification (see struct il4965_init_alive_resp)
309 * consists of:
310 *
311 * 1) Temperature calculation parameters.
312 *
313 * 2) Power supply voltage measurement.
314 *
315 * 3) Tx gain compensation to balance 2 transmitters for MIMO use.
316 *
317 * Statistics notifications deliver:
318 *
319 * 1) Current values for temperature param R4.
320 */
321
322/**
323 * To calculate a txpower setting for a given desired target txpower, channel,
324 * modulation bit rate, and transmitter chain (4965 has 2 transmitters to
325 * support MIMO and transmit diversity), driver must do the following:
326 *
327 * 1) Compare desired txpower vs. (EEPROM) regulatory limit for this channel.
328 * Do not exceed regulatory limit; reduce target txpower if necessary.
329 *
330 * If setting up txpowers for MIMO rates (rate idxes 8-15, 24-31),
331 * 2 transmitters will be used simultaneously; driver must reduce the
332 * regulatory limit by 3 dB (half-power) for each transmitter, so the
333 * combined total output of the 2 transmitters is within regulatory limits.
334 *
335 *
336 * 2) Compare target txpower vs. (EEPROM) saturation txpower *reduced by
337 * backoff for this bit rate*. Do not exceed (saturation - backoff[rate]);
338 * reduce target txpower if necessary.
339 *
340 * Backoff values below are in 1/2 dB units (equivalent to steps in
341 * txpower gain tables):
342 *
343 * OFDM 6 - 36 MBit: 10 steps (5 dB)
344 * OFDM 48 MBit: 15 steps (7.5 dB)
345 * OFDM 54 MBit: 17 steps (8.5 dB)
346 * OFDM 60 MBit: 20 steps (10 dB)
347 * CCK all rates: 10 steps (5 dB)
348 *
349 * Backoff values apply to saturation txpower on a per-transmitter basis;
350 * when using MIMO (2 transmitters), each transmitter uses the same
351 * saturation level provided in EEPROM, and the same backoff values;
352 * no reduction (such as with regulatory txpower limits) is required.
353 *
354 * Saturation and Backoff values apply equally to 20 Mhz (legacy) channel
355 * widths and 40 Mhz (.11n HT40) channel widths; there is no separate
356 * factory measurement for ht40 channels.
357 *
358 * The result of this step is the final target txpower. The rest of
359 * the steps figure out the proper settings for the device to achieve
360 * that target txpower.
361 *
362 *
363 * 3) Determine (EEPROM) calibration sub band for the target channel, by
364 * comparing against first and last channels in each sub band
365 * (see struct il4965_eeprom_calib_subband_info).
366 *
367 *
368 * 4) Linearly interpolate (EEPROM) factory calibration measurement sets,
369 * referencing the 2 factory-measured (sample) channels within the sub band.
370 *
371 * Interpolation is based on difference between target channel's frequency
372 * and the sample channels' frequencies. Since channel numbers are based
373 * on frequency (5 MHz between each channel number), this is equivalent
374 * to interpolating based on channel number differences.
375 *
376 * Note that the sample channels may or may not be the channels at the
377 * edges of the sub band. The target channel may be "outside" of the
378 * span of the sampled channels.
379 *
380 * Driver may choose the pair (for 2 Tx chains) of measurements (see
381 * struct il4965_eeprom_calib_ch_info) for which the actual measured
382 * txpower comes closest to the desired txpower. Usually, though,
383 * the middle set of measurements is closest to the regulatory limits,
384 * and is therefore a good choice for all txpower calculations (this
385 * assumes that high accuracy is needed for maximizing legal txpower,
386 * while lower txpower configurations do not need as much accuracy).
387 *
388 * Driver should interpolate both members of the chosen measurement pair,
389 * i.e. for both Tx chains (radio transmitters), unless the driver knows
390 * that only one of the chains will be used (e.g. only one tx antenna
391 * connected, but this should be unusual). The rate scaling algorithm
392 * switches antennas to find best performance, so both Tx chains will
393 * be used (although only one at a time) even for non-MIMO transmissions.
394 *
395 * Driver should interpolate factory values for temperature, gain table
396 * idx, and actual power. The power amplifier detector values are
397 * not used by the driver.
398 *
399 * Sanity check: If the target channel happens to be one of the sample
400 * channels, the results should agree with the sample channel's
401 * measurements!
402 *
403 *
404 * 5) Find difference between desired txpower and (interpolated)
405 * factory-measured txpower. Using (interpolated) factory gain table idx
406 * (shown elsewhere) as a starting point, adjust this idx lower to
407 * increase txpower, or higher to decrease txpower, until the target
408 * txpower is reached. Each step in the gain table is 1/2 dB.
409 *
410 * For example, if factory measured txpower is 16 dBm, and target txpower
411 * is 13 dBm, add 6 steps to the factory gain idx to reduce txpower
412 * by 3 dB.
413 *
414 *
415 * 6) Find difference between current device temperature and (interpolated)
416 * factory-measured temperature for sub-band. Factory values are in
417 * degrees Celsius. To calculate current temperature, see comments for
418 * "4965 temperature calculation".
419 *
420 * If current temperature is higher than factory temperature, driver must
421 * increase gain (lower gain table idx), and vice verse.
422 *
423 * Temperature affects gain differently for different channels:
424 *
425 * 2.4 GHz all channels: 3.5 degrees per half-dB step
426 * 5 GHz channels 34-43: 4.5 degrees per half-dB step
427 * 5 GHz channels >= 44: 4.0 degrees per half-dB step
428 *
429 * NOTE: Temperature can increase rapidly when transmitting, especially
430 * with heavy traffic at high txpowers. Driver should update
431 * temperature calculations often under these conditions to
432 * maintain strong txpower in the face of rising temperature.
433 *
434 *
435 * 7) Find difference between current power supply voltage indicator
436 * (from "initialize alive") and factory-measured power supply voltage
437 * indicator (EEPROM).
438 *
439 * If the current voltage is higher (indicator is lower) than factory
440 * voltage, gain should be reduced (gain table idx increased) by:
441 *
442 * (eeprom - current) / 7
443 *
444 * If the current voltage is lower (indicator is higher) than factory
445 * voltage, gain should be increased (gain table idx decreased) by:
446 *
447 * 2 * (current - eeprom) / 7
448 *
449 * If number of idx steps in either direction turns out to be > 2,
450 * something is wrong ... just use 0.
451 *
452 * NOTE: Voltage compensation is independent of band/channel.
453 *
454 * NOTE: "Initialize" uCode measures current voltage, which is assumed
455 * to be constant after this initial measurement. Voltage
456 * compensation for txpower (number of steps in gain table)
457 * may be calculated once and used until the next uCode bootload.
458 *
459 *
460 * 8) If setting up txpowers for MIMO rates (rate idxes 8-15, 24-31),
461 * adjust txpower for each transmitter chain, so txpower is balanced
462 * between the two chains. There are 5 pairs of tx_atten[group][chain]
463 * values in "initialize alive", one pair for each of 5 channel ranges:
464 *
465 * Group 0: 5 GHz channel 34-43
466 * Group 1: 5 GHz channel 44-70
467 * Group 2: 5 GHz channel 71-124
468 * Group 3: 5 GHz channel 125-200
469 * Group 4: 2.4 GHz all channels
470 *
471 * Add the tx_atten[group][chain] value to the idx for the target chain.
472 * The values are signed, but are in pairs of 0 and a non-negative number,
473 * so as to reduce gain (if necessary) of the "hotter" channel. This
474 * avoids any need to double-check for regulatory compliance after
475 * this step.
476 *
477 *
478 * 9) If setting up for a CCK rate, lower the gain by adding a CCK compensation
479 * value to the idx:
480 *
481 * Hardware rev B: 9 steps (4.5 dB)
482 * Hardware rev C: 5 steps (2.5 dB)
483 *
484 * Hardware rev for 4965 can be determined by reading CSR_HW_REV_WA_REG,
485 * bits [3:2], 1 = B, 2 = C.
486 *
487 * NOTE: This compensation is in addition to any saturation backoff that
488 * might have been applied in an earlier step.
489 *
490 *
491 * 10) Select the gain table, based on band (2.4 vs 5 GHz).
492 *
493 * Limit the adjusted idx to stay within the table!
494 *
495 *
496 * 11) Read gain table entries for DSP and radio gain, place into appropriate
497 * location(s) in command (struct il4965_txpowertable_cmd).
498 */
499
500/**
501 * When MIMO is used (2 transmitters operating simultaneously), driver should
502 * limit each transmitter to deliver a max of 3 dB below the regulatory limit
503 * for the device. That is, use half power for each transmitter, so total
504 * txpower is within regulatory limits.
505 *
506 * The value "6" represents number of steps in gain table to reduce power 3 dB.
507 * Each step is 1/2 dB.
508 */
509#define IL_TX_POWER_MIMO_REGULATORY_COMPENSATION (6)
510
511/**
512 * CCK gain compensation.
513 *
514 * When calculating txpowers for CCK, after making sure that the target power
515 * is within regulatory and saturation limits, driver must additionally
516 * back off gain by adding these values to the gain table idx.
517 *
518 * Hardware rev for 4965 can be determined by reading CSR_HW_REV_WA_REG,
519 * bits [3:2], 1 = B, 2 = C.
520 */
521#define IL_TX_POWER_CCK_COMPENSATION_B_STEP (9)
522#define IL_TX_POWER_CCK_COMPENSATION_C_STEP (5)
523
524/*
525 * 4965 power supply voltage compensation for txpower
526 */
527#define TX_POWER_IL_VOLTAGE_CODES_PER_03V (7)
528
529/**
530 * Gain tables.
531 *
532 * The following tables contain pair of values for setting txpower, i.e.
533 * gain settings for the output of the device's digital signal processor (DSP),
534 * and for the analog gain structure of the transmitter.
535 *
536 * Each entry in the gain tables represents a step of 1/2 dB. Note that these
537 * are *relative* steps, not indications of absolute output power. Output
538 * power varies with temperature, voltage, and channel frequency, and also
539 * requires consideration of average power (to satisfy regulatory constraints),
540 * and peak power (to avoid distortion of the output signal).
541 *
542 * Each entry contains two values:
543 * 1) DSP gain (or sometimes called DSP attenuation). This is a fine-grained
544 * linear value that multiplies the output of the digital signal processor,
545 * before being sent to the analog radio.
546 * 2) Radio gain. This sets the analog gain of the radio Tx path.
547 * It is a coarser setting, and behaves in a logarithmic (dB) fashion.
548 *
549 * EEPROM contains factory calibration data for txpower. This maps actual
550 * measured txpower levels to gain settings in the "well known" tables
551 * below ("well-known" means here that both factory calibration *and* the
552 * driver work with the same table).
553 *
554 * There are separate tables for 2.4 GHz and 5 GHz bands. The 5 GHz table
555 * has an extension (into negative idxes), in case the driver needs to
556 * boost power setting for high device temperatures (higher than would be
557 * present during factory calibration). A 5 Ghz EEPROM idx of "40"
558 * corresponds to the 49th entry in the table used by the driver.
559 */
560#define MIN_TX_GAIN_IDX (0) /* highest gain, lowest idx, 2.4 */
561#define MIN_TX_GAIN_IDX_52GHZ_EXT (-9) /* highest gain, lowest idx, 5 */
562
563/**
564 * 2.4 GHz gain table
565 *
566 * Index Dsp gain Radio gain
567 * 0 110 0x3f (highest gain)
568 * 1 104 0x3f
569 * 2 98 0x3f
570 * 3 110 0x3e
571 * 4 104 0x3e
572 * 5 98 0x3e
573 * 6 110 0x3d
574 * 7 104 0x3d
575 * 8 98 0x3d
576 * 9 110 0x3c
577 * 10 104 0x3c
578 * 11 98 0x3c
579 * 12 110 0x3b
580 * 13 104 0x3b
581 * 14 98 0x3b
582 * 15 110 0x3a
583 * 16 104 0x3a
584 * 17 98 0x3a
585 * 18 110 0x39
586 * 19 104 0x39
587 * 20 98 0x39
588 * 21 110 0x38
589 * 22 104 0x38
590 * 23 98 0x38
591 * 24 110 0x37
592 * 25 104 0x37
593 * 26 98 0x37
594 * 27 110 0x36
595 * 28 104 0x36
596 * 29 98 0x36
597 * 30 110 0x35
598 * 31 104 0x35
599 * 32 98 0x35
600 * 33 110 0x34
601 * 34 104 0x34
602 * 35 98 0x34
603 * 36 110 0x33
604 * 37 104 0x33
605 * 38 98 0x33
606 * 39 110 0x32
607 * 40 104 0x32
608 * 41 98 0x32
609 * 42 110 0x31
610 * 43 104 0x31
611 * 44 98 0x31
612 * 45 110 0x30
613 * 46 104 0x30
614 * 47 98 0x30
615 * 48 110 0x6
616 * 49 104 0x6
617 * 50 98 0x6
618 * 51 110 0x5
619 * 52 104 0x5
620 * 53 98 0x5
621 * 54 110 0x4
622 * 55 104 0x4
623 * 56 98 0x4
624 * 57 110 0x3
625 * 58 104 0x3
626 * 59 98 0x3
627 * 60 110 0x2
628 * 61 104 0x2
629 * 62 98 0x2
630 * 63 110 0x1
631 * 64 104 0x1
632 * 65 98 0x1
633 * 66 110 0x0
634 * 67 104 0x0
635 * 68 98 0x0
636 * 69 97 0
637 * 70 96 0
638 * 71 95 0
639 * 72 94 0
640 * 73 93 0
641 * 74 92 0
642 * 75 91 0
643 * 76 90 0
644 * 77 89 0
645 * 78 88 0
646 * 79 87 0
647 * 80 86 0
648 * 81 85 0
649 * 82 84 0
650 * 83 83 0
651 * 84 82 0
652 * 85 81 0
653 * 86 80 0
654 * 87 79 0
655 * 88 78 0
656 * 89 77 0
657 * 90 76 0
658 * 91 75 0
659 * 92 74 0
660 * 93 73 0
661 * 94 72 0
662 * 95 71 0
663 * 96 70 0
664 * 97 69 0
665 * 98 68 0
666 */
667
668/**
669 * 5 GHz gain table
670 *
671 * Index Dsp gain Radio gain
672 * -9 123 0x3F (highest gain)
673 * -8 117 0x3F
674 * -7 110 0x3F
675 * -6 104 0x3F
676 * -5 98 0x3F
677 * -4 110 0x3E
678 * -3 104 0x3E
679 * -2 98 0x3E
680 * -1 110 0x3D
681 * 0 104 0x3D
682 * 1 98 0x3D
683 * 2 110 0x3C
684 * 3 104 0x3C
685 * 4 98 0x3C
686 * 5 110 0x3B
687 * 6 104 0x3B
688 * 7 98 0x3B
689 * 8 110 0x3A
690 * 9 104 0x3A
691 * 10 98 0x3A
692 * 11 110 0x39
693 * 12 104 0x39
694 * 13 98 0x39
695 * 14 110 0x38
696 * 15 104 0x38
697 * 16 98 0x38
698 * 17 110 0x37
699 * 18 104 0x37
700 * 19 98 0x37
701 * 20 110 0x36
702 * 21 104 0x36
703 * 22 98 0x36
704 * 23 110 0x35
705 * 24 104 0x35
706 * 25 98 0x35
707 * 26 110 0x34
708 * 27 104 0x34
709 * 28 98 0x34
710 * 29 110 0x33
711 * 30 104 0x33
712 * 31 98 0x33
713 * 32 110 0x32
714 * 33 104 0x32
715 * 34 98 0x32
716 * 35 110 0x31
717 * 36 104 0x31
718 * 37 98 0x31
719 * 38 110 0x30
720 * 39 104 0x30
721 * 40 98 0x30
722 * 41 110 0x25
723 * 42 104 0x25
724 * 43 98 0x25
725 * 44 110 0x24
726 * 45 104 0x24
727 * 46 98 0x24
728 * 47 110 0x23
729 * 48 104 0x23
730 * 49 98 0x23
731 * 50 110 0x22
732 * 51 104 0x18
733 * 52 98 0x18
734 * 53 110 0x17
735 * 54 104 0x17
736 * 55 98 0x17
737 * 56 110 0x16
738 * 57 104 0x16
739 * 58 98 0x16
740 * 59 110 0x15
741 * 60 104 0x15
742 * 61 98 0x15
743 * 62 110 0x14
744 * 63 104 0x14
745 * 64 98 0x14
746 * 65 110 0x13
747 * 66 104 0x13
748 * 67 98 0x13
749 * 68 110 0x12
750 * 69 104 0x08
751 * 70 98 0x08
752 * 71 110 0x07
753 * 72 104 0x07
754 * 73 98 0x07
755 * 74 110 0x06
756 * 75 104 0x06
757 * 76 98 0x06
758 * 77 110 0x05
759 * 78 104 0x05
760 * 79 98 0x05
761 * 80 110 0x04
762 * 81 104 0x04
763 * 82 98 0x04
764 * 83 110 0x03
765 * 84 104 0x03
766 * 85 98 0x03
767 * 86 110 0x02
768 * 87 104 0x02
769 * 88 98 0x02
770 * 89 110 0x01
771 * 90 104 0x01
772 * 91 98 0x01
773 * 92 110 0x00
774 * 93 104 0x00
775 * 94 98 0x00
776 * 95 93 0x00
777 * 96 88 0x00
778 * 97 83 0x00
779 * 98 78 0x00
780 */
781
782/**
783 * Sanity checks and default values for EEPROM regulatory levels.
784 * If EEPROM values fall outside MIN/MAX range, use default values.
785 *
786 * Regulatory limits refer to the maximum average txpower allowed by
787 * regulatory agencies in the geographies in which the device is meant
788 * to be operated. These limits are SKU-specific (i.e. geography-specific),
789 * and channel-specific; each channel has an individual regulatory limit
790 * listed in the EEPROM.
791 *
792 * Units are in half-dBm (i.e. "34" means 17 dBm).
793 */
794#define IL_TX_POWER_DEFAULT_REGULATORY_24 (34)
795#define IL_TX_POWER_DEFAULT_REGULATORY_52 (34)
796#define IL_TX_POWER_REGULATORY_MIN (0)
797#define IL_TX_POWER_REGULATORY_MAX (34)
798
799/**
800 * Sanity checks and default values for EEPROM saturation levels.
801 * If EEPROM values fall outside MIN/MAX range, use default values.
802 *
803 * Saturation is the highest level that the output power amplifier can produce
804 * without significant clipping distortion. This is a "peak" power level.
805 * Different types of modulation (i.e. various "rates", and OFDM vs. CCK)
806 * require differing amounts of backoff, relative to their average power output,
807 * in order to avoid clipping distortion.
808 *
809 * Driver must make sure that it is violating neither the saturation limit,
810 * nor the regulatory limit, when calculating Tx power settings for various
811 * rates.
812 *
813 * Units are in half-dBm (i.e. "38" means 19 dBm).
814 */
815#define IL_TX_POWER_DEFAULT_SATURATION_24 (38)
816#define IL_TX_POWER_DEFAULT_SATURATION_52 (38)
817#define IL_TX_POWER_SATURATION_MIN (20)
818#define IL_TX_POWER_SATURATION_MAX (50)
819
820/**
821 * Channel groups used for Tx Attenuation calibration (MIMO tx channel balance)
822 * and thermal Txpower calibration.
823 *
824 * When calculating txpower, driver must compensate for current device
825 * temperature; higher temperature requires higher gain. Driver must calculate
826 * current temperature (see "4965 temperature calculation"), then compare vs.
827 * factory calibration temperature in EEPROM; if current temperature is higher
828 * than factory temperature, driver must *increase* gain by proportions shown
829 * in table below. If current temperature is lower than factory, driver must
830 * *decrease* gain.
831 *
832 * Different frequency ranges require different compensation, as shown below.
833 */
834/* Group 0, 5.2 GHz ch 34-43: 4.5 degrees per 1/2 dB. */
835#define CALIB_IL_TX_ATTEN_GR1_FCH 34
836#define CALIB_IL_TX_ATTEN_GR1_LCH 43
837
838/* Group 1, 5.3 GHz ch 44-70: 4.0 degrees per 1/2 dB. */
839#define CALIB_IL_TX_ATTEN_GR2_FCH 44
840#define CALIB_IL_TX_ATTEN_GR2_LCH 70
841
842/* Group 2, 5.5 GHz ch 71-124: 4.0 degrees per 1/2 dB. */
843#define CALIB_IL_TX_ATTEN_GR3_FCH 71
844#define CALIB_IL_TX_ATTEN_GR3_LCH 124
845
846/* Group 3, 5.7 GHz ch 125-200: 4.0 degrees per 1/2 dB. */
847#define CALIB_IL_TX_ATTEN_GR4_FCH 125
848#define CALIB_IL_TX_ATTEN_GR4_LCH 200
849
850/* Group 4, 2.4 GHz all channels: 3.5 degrees per 1/2 dB. */
851#define CALIB_IL_TX_ATTEN_GR5_FCH 1
852#define CALIB_IL_TX_ATTEN_GR5_LCH 20
853
854enum {
855 CALIB_CH_GROUP_1 = 0,
856 CALIB_CH_GROUP_2 = 1,
857 CALIB_CH_GROUP_3 = 2,
858 CALIB_CH_GROUP_4 = 3,
859 CALIB_CH_GROUP_5 = 4,
860 CALIB_CH_GROUP_MAX
861};
862
863/********************* END TXPOWER *****************************************/
864
865/**
866 * Tx/Rx Queues
867 *
868 * Most communication between driver and 4965 is via queues of data buffers.
869 * For example, all commands that the driver issues to device's embedded
870 * controller (uCode) are via the command queue (one of the Tx queues). All
871 * uCode command responses/replies/notifications, including Rx frames, are
872 * conveyed from uCode to driver via the Rx queue.
873 *
874 * Most support for these queues, including handshake support, resides in
875 * structures in host DRAM, shared between the driver and the device. When
876 * allocating this memory, the driver must make sure that data written by
877 * the host CPU updates DRAM immediately (and does not get "stuck" in CPU's
878 * cache memory), so DRAM and cache are consistent, and the device can
879 * immediately see changes made by the driver.
880 *
881 * 4965 supports up to 16 DRAM-based Tx queues, and services these queues via
882 * up to 7 DMA channels (FIFOs). Each Tx queue is supported by a circular array
883 * in DRAM containing 256 Transmit Frame Descriptors (TFDs).
884 */
885#define IL49_NUM_FIFOS 7
886#define IL49_CMD_FIFO_NUM 4
887#define IL49_NUM_QUEUES 16
888#define IL49_NUM_AMPDU_QUEUES 8
889
890/**
891 * struct il4965_schedq_bc_tbl
892 *
893 * Byte Count table
894 *
895 * Each Tx queue uses a byte-count table containing 320 entries:
896 * one 16-bit entry for each of 256 TFDs, plus an additional 64 entries that
897 * duplicate the first 64 entries (to avoid wrap-around within a Tx win;
898 * max Tx win is 64 TFDs).
899 *
900 * When driver sets up a new TFD, it must also enter the total byte count
901 * of the frame to be transmitted into the corresponding entry in the byte
902 * count table for the chosen Tx queue. If the TFD idx is 0-63, the driver
903 * must duplicate the byte count entry in corresponding idx 256-319.
904 *
905 * padding puts each byte count table on a 1024-byte boundary;
906 * 4965 assumes tables are separated by 1024 bytes.
907 */
908struct il4965_scd_bc_tbl {
909 __le16 tfd_offset[TFD_QUEUE_BC_SIZE];
910 u8 pad[1024 - (TFD_QUEUE_BC_SIZE) * sizeof(__le16)];
911} __packed;
912
913#define IL4965_RTC_INST_LOWER_BOUND (0x000000)
914
915/* RSSI to dBm */
916#define IL4965_RSSI_OFFSET 44
917
918/* PCI registers */
919#define PCI_CFG_RETRY_TIMEOUT 0x041
920
921#define IL4965_DEFAULT_TX_RETRY 15
922
923/* EEPROM */
924#define IL4965_FIRST_AMPDU_QUEUE 10
925
926/* Calibration */
927void il4965_chain_noise_calibration(struct il_priv *il, void *stat_resp);
928void il4965_sensitivity_calibration(struct il_priv *il, void *resp);
929void il4965_init_sensitivity(struct il_priv *il);
930void il4965_reset_run_time_calib(struct il_priv *il);
931
932/* Debug */
933#ifdef CONFIG_IWLEGACY_DEBUGFS
934extern const struct il_debugfs_ops il4965_debugfs_ops;
935#endif
936
937/****************************/
938/* Flow Handler Definitions */
939/****************************/
940
941/**
942 * This I/O area is directly read/writable by driver (e.g. Linux uses writel())
943 * Addresses are offsets from device's PCI hardware base address.
944 */
945#define FH49_MEM_LOWER_BOUND (0x1000)
946#define FH49_MEM_UPPER_BOUND (0x2000)
947
948/**
949 * Keep-Warm (KW) buffer base address.
950 *
951 * Driver must allocate a 4KByte buffer that is used by 4965 for keeping the
952 * host DRAM powered on (via dummy accesses to DRAM) to maintain low-latency
953 * DRAM access when 4965 is Txing or Rxing. The dummy accesses prevent host
954 * from going into a power-savings mode that would cause higher DRAM latency,
955 * and possible data over/under-runs, before all Tx/Rx is complete.
956 *
957 * Driver loads FH49_KW_MEM_ADDR_REG with the physical address (bits 35:4)
958 * of the buffer, which must be 4K aligned. Once this is set up, the 4965
959 * automatically invokes keep-warm accesses when normal accesses might not
960 * be sufficient to maintain fast DRAM response.
961 *
962 * Bit fields:
963 * 31-0: Keep-warm buffer physical base address [35:4], must be 4K aligned
964 */
965#define FH49_KW_MEM_ADDR_REG (FH49_MEM_LOWER_BOUND + 0x97C)
966
967/**
968 * TFD Circular Buffers Base (CBBC) addresses
969 *
970 * 4965 has 16 base pointer registers, one for each of 16 host-DRAM-resident
971 * circular buffers (CBs/queues) containing Transmit Frame Descriptors (TFDs)
972 * (see struct il_tfd_frame). These 16 pointer registers are offset by 0x04
973 * bytes from one another. Each TFD circular buffer in DRAM must be 256-byte
974 * aligned (address bits 0-7 must be 0).
975 *
976 * Bit fields in each pointer register:
977 * 27-0: TFD CB physical base address [35:8], must be 256-byte aligned
978 */
979#define FH49_MEM_CBBC_LOWER_BOUND (FH49_MEM_LOWER_BOUND + 0x9D0)
980#define FH49_MEM_CBBC_UPPER_BOUND (FH49_MEM_LOWER_BOUND + 0xA10)
981
982/* Find TFD CB base pointer for given queue (range 0-15). */
983#define FH49_MEM_CBBC_QUEUE(x) (FH49_MEM_CBBC_LOWER_BOUND + (x) * 0x4)
984
985/**
986 * Rx SRAM Control and Status Registers (RSCSR)
987 *
988 * These registers provide handshake between driver and 4965 for the Rx queue
989 * (this queue handles *all* command responses, notifications, Rx data, etc.
990 * sent from 4965 uCode to host driver). Unlike Tx, there is only one Rx
991 * queue, and only one Rx DMA/FIFO channel. Also unlike Tx, which can
992 * concatenate up to 20 DRAM buffers to form a Tx frame, each Receive Buffer
993 * Descriptor (RBD) points to only one Rx Buffer (RB); there is a 1:1
994 * mapping between RBDs and RBs.
995 *
996 * Driver must allocate host DRAM memory for the following, and set the
997 * physical address of each into 4965 registers:
998 *
999 * 1) Receive Buffer Descriptor (RBD) circular buffer (CB), typically with 256
1000 * entries (although any power of 2, up to 4096, is selectable by driver).
1001 * Each entry (1 dword) points to a receive buffer (RB) of consistent size
1002 * (typically 4K, although 8K or 16K are also selectable by driver).
1003 * Driver sets up RB size and number of RBDs in the CB via Rx config
1004 * register FH49_MEM_RCSR_CHNL0_CONFIG_REG.
1005 *
1006 * Bit fields within one RBD:
1007 * 27-0: Receive Buffer physical address bits [35:8], 256-byte aligned
1008 *
1009 * Driver sets physical address [35:8] of base of RBD circular buffer
1010 * into FH49_RSCSR_CHNL0_RBDCB_BASE_REG [27:0].
1011 *
1012 * 2) Rx status buffer, 8 bytes, in which 4965 indicates which Rx Buffers
1013 * (RBs) have been filled, via a "write pointer", actually the idx of
1014 * the RB's corresponding RBD within the circular buffer. Driver sets
1015 * physical address [35:4] into FH49_RSCSR_CHNL0_STTS_WPTR_REG [31:0].
1016 *
1017 * Bit fields in lower dword of Rx status buffer (upper dword not used
1018 * by driver; see struct il4965_shared, val0):
1019 * 31-12: Not used by driver
1020 * 11- 0: Index of last filled Rx buffer descriptor
1021 * (4965 writes, driver reads this value)
1022 *
1023 * As the driver prepares Receive Buffers (RBs) for 4965 to fill, driver must
1024 * enter pointers to these RBs into contiguous RBD circular buffer entries,
1025 * and update the 4965's "write" idx register,
1026 * FH49_RSCSR_CHNL0_RBDCB_WPTR_REG.
1027 *
1028 * This "write" idx corresponds to the *next* RBD that the driver will make
1029 * available, i.e. one RBD past the tail of the ready-to-fill RBDs within
1030 * the circular buffer. This value should initially be 0 (before preparing any
1031 * RBs), should be 8 after preparing the first 8 RBs (for example), and must
1032 * wrap back to 0 at the end of the circular buffer (but don't wrap before
1033 * "read" idx has advanced past 1! See below).
1034 * NOTE: 4965 EXPECTS THE WRITE IDX TO BE INCREMENTED IN MULTIPLES OF 8.
1035 *
1036 * As the 4965 fills RBs (referenced from contiguous RBDs within the circular
1037 * buffer), it updates the Rx status buffer in host DRAM, 2) described above,
1038 * to tell the driver the idx of the latest filled RBD. The driver must
1039 * read this "read" idx from DRAM after receiving an Rx interrupt from 4965.
1040 *
1041 * The driver must also internally keep track of a third idx, which is the
1042 * next RBD to process. When receiving an Rx interrupt, driver should process
1043 * all filled but unprocessed RBs up to, but not including, the RB
1044 * corresponding to the "read" idx. For example, if "read" idx becomes "1",
1045 * driver may process the RB pointed to by RBD 0. Depending on volume of
1046 * traffic, there may be many RBs to process.
1047 *
1048 * If read idx == write idx, 4965 thinks there is no room to put new data.
1049 * Due to this, the maximum number of filled RBs is 255, instead of 256. To
1050 * be safe, make sure that there is a gap of at least 2 RBDs between "write"
1051 * and "read" idxes; that is, make sure that there are no more than 254
1052 * buffers waiting to be filled.
1053 */
1054#define FH49_MEM_RSCSR_LOWER_BOUND (FH49_MEM_LOWER_BOUND + 0xBC0)
1055#define FH49_MEM_RSCSR_UPPER_BOUND (FH49_MEM_LOWER_BOUND + 0xC00)
1056#define FH49_MEM_RSCSR_CHNL0 (FH49_MEM_RSCSR_LOWER_BOUND)
1057
1058/**
1059 * Physical base address of 8-byte Rx Status buffer.
1060 * Bit fields:
1061 * 31-0: Rx status buffer physical base address [35:4], must 16-byte aligned.
1062 */
1063#define FH49_RSCSR_CHNL0_STTS_WPTR_REG (FH49_MEM_RSCSR_CHNL0)
1064
1065/**
1066 * Physical base address of Rx Buffer Descriptor Circular Buffer.
1067 * Bit fields:
1068 * 27-0: RBD CD physical base address [35:8], must be 256-byte aligned.
1069 */
1070#define FH49_RSCSR_CHNL0_RBDCB_BASE_REG (FH49_MEM_RSCSR_CHNL0 + 0x004)
1071
1072/**
1073 * Rx write pointer (idx, really!).
1074 * Bit fields:
1075 * 11-0: Index of driver's most recent prepared-to-be-filled RBD, + 1.
1076 * NOTE: For 256-entry circular buffer, use only bits [7:0].
1077 */
1078#define FH49_RSCSR_CHNL0_RBDCB_WPTR_REG (FH49_MEM_RSCSR_CHNL0 + 0x008)
1079#define FH49_RSCSR_CHNL0_WPTR (FH49_RSCSR_CHNL0_RBDCB_WPTR_REG)
1080
1081/**
1082 * Rx Config/Status Registers (RCSR)
1083 * Rx Config Reg for channel 0 (only channel used)
1084 *
1085 * Driver must initialize FH49_MEM_RCSR_CHNL0_CONFIG_REG as follows for
1086 * normal operation (see bit fields).
1087 *
1088 * Clearing FH49_MEM_RCSR_CHNL0_CONFIG_REG to 0 turns off Rx DMA.
1089 * Driver should poll FH49_MEM_RSSR_RX_STATUS_REG for
1090 * FH49_RSSR_CHNL0_RX_STATUS_CHNL_IDLE (bit 24) before continuing.
1091 *
1092 * Bit fields:
1093 * 31-30: Rx DMA channel enable: '00' off/pause, '01' pause at end of frame,
1094 * '10' operate normally
1095 * 29-24: reserved
1096 * 23-20: # RBDs in circular buffer = 2^value; use "8" for 256 RBDs (normal),
1097 * min "5" for 32 RBDs, max "12" for 4096 RBDs.
1098 * 19-18: reserved
1099 * 17-16: size of each receive buffer; '00' 4K (normal), '01' 8K,
1100 * '10' 12K, '11' 16K.
1101 * 15-14: reserved
1102 * 13-12: IRQ destination; '00' none, '01' host driver (normal operation)
1103 * 11- 4: timeout for closing Rx buffer and interrupting host (units 32 usec)
1104 * typical value 0x10 (about 1/2 msec)
1105 * 3- 0: reserved
1106 */
1107#define FH49_MEM_RCSR_LOWER_BOUND (FH49_MEM_LOWER_BOUND + 0xC00)
1108#define FH49_MEM_RCSR_UPPER_BOUND (FH49_MEM_LOWER_BOUND + 0xCC0)
1109#define FH49_MEM_RCSR_CHNL0 (FH49_MEM_RCSR_LOWER_BOUND)
1110
1111#define FH49_MEM_RCSR_CHNL0_CONFIG_REG (FH49_MEM_RCSR_CHNL0)
1112
1113#define FH49_RCSR_CHNL0_RX_CONFIG_RB_TIMEOUT_MSK (0x00000FF0) /* bits 4-11 */
1114#define FH49_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_MSK (0x00001000) /* bits 12 */
1115#define FH49_RCSR_CHNL0_RX_CONFIG_SINGLE_FRAME_MSK (0x00008000) /* bit 15 */
1116#define FH49_RCSR_CHNL0_RX_CONFIG_RB_SIZE_MSK (0x00030000) /* bits 16-17 */
1117#define FH49_RCSR_CHNL0_RX_CONFIG_RBDBC_SIZE_MSK (0x00F00000) /* bits 20-23 */
1118#define FH49_RCSR_CHNL0_RX_CONFIG_DMA_CHNL_EN_MSK (0xC0000000) /* bits 30-31 */
1119
1120#define FH49_RCSR_RX_CONFIG_RBDCB_SIZE_POS (20)
1121#define FH49_RCSR_RX_CONFIG_REG_IRQ_RBTH_POS (4)
1122#define RX_RB_TIMEOUT (0x10)
1123
1124#define FH49_RCSR_RX_CONFIG_CHNL_EN_PAUSE_VAL (0x00000000)
1125#define FH49_RCSR_RX_CONFIG_CHNL_EN_PAUSE_EOF_VAL (0x40000000)
1126#define FH49_RCSR_RX_CONFIG_CHNL_EN_ENABLE_VAL (0x80000000)
1127
1128#define FH49_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_4K (0x00000000)
1129#define FH49_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_8K (0x00010000)
1130#define FH49_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_12K (0x00020000)
1131#define FH49_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_16K (0x00030000)
1132
1133#define FH49_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY (0x00000004)
1134#define FH49_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_NO_INT_VAL (0x00000000)
1135#define FH49_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_INT_HOST_VAL (0x00001000)
1136
1137/**
1138 * Rx Shared Status Registers (RSSR)
1139 *
1140 * After stopping Rx DMA channel (writing 0 to
1141 * FH49_MEM_RCSR_CHNL0_CONFIG_REG), driver must poll
1142 * FH49_MEM_RSSR_RX_STATUS_REG until Rx channel is idle.
1143 *
1144 * Bit fields:
1145 * 24: 1 = Channel 0 is idle
1146 *
1147 * FH49_MEM_RSSR_SHARED_CTRL_REG and FH49_MEM_RSSR_RX_ENABLE_ERR_IRQ2DRV
1148 * contain default values that should not be altered by the driver.
1149 */
1150#define FH49_MEM_RSSR_LOWER_BOUND (FH49_MEM_LOWER_BOUND + 0xC40)
1151#define FH49_MEM_RSSR_UPPER_BOUND (FH49_MEM_LOWER_BOUND + 0xD00)
1152
1153#define FH49_MEM_RSSR_SHARED_CTRL_REG (FH49_MEM_RSSR_LOWER_BOUND)
1154#define FH49_MEM_RSSR_RX_STATUS_REG (FH49_MEM_RSSR_LOWER_BOUND + 0x004)
1155#define FH49_MEM_RSSR_RX_ENABLE_ERR_IRQ2DRV\
1156 (FH49_MEM_RSSR_LOWER_BOUND + 0x008)
1157
1158#define FH49_RSSR_CHNL0_RX_STATUS_CHNL_IDLE (0x01000000)
1159
1160#define FH49_MEM_TFDIB_REG1_ADDR_BITSHIFT 28
1161
1162/* TFDB Area - TFDs buffer table */
1163#define FH49_MEM_TFDIB_DRAM_ADDR_LSB_MSK (0xFFFFFFFF)
1164#define FH49_TFDIB_LOWER_BOUND (FH49_MEM_LOWER_BOUND + 0x900)
1165#define FH49_TFDIB_UPPER_BOUND (FH49_MEM_LOWER_BOUND + 0x958)
1166#define FH49_TFDIB_CTRL0_REG(_chnl) (FH49_TFDIB_LOWER_BOUND + 0x8 * (_chnl))
1167#define FH49_TFDIB_CTRL1_REG(_chnl) (FH49_TFDIB_LOWER_BOUND + 0x8 * (_chnl) + 0x4)
1168
1169/**
1170 * Transmit DMA Channel Control/Status Registers (TCSR)
1171 *
1172 * 4965 has one configuration register for each of 8 Tx DMA/FIFO channels
1173 * supported in hardware (don't confuse these with the 16 Tx queues in DRAM,
1174 * which feed the DMA/FIFO channels); config regs are separated by 0x20 bytes.
1175 *
1176 * To use a Tx DMA channel, driver must initialize its
1177 * FH49_TCSR_CHNL_TX_CONFIG_REG(chnl) with:
1178 *
1179 * FH49_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE |
1180 * FH49_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE_VAL
1181 *
1182 * All other bits should be 0.
1183 *
1184 * Bit fields:
1185 * 31-30: Tx DMA channel enable: '00' off/pause, '01' pause at end of frame,
1186 * '10' operate normally
1187 * 29- 4: Reserved, set to "0"
1188 * 3: Enable internal DMA requests (1, normal operation), disable (0)
1189 * 2- 0: Reserved, set to "0"
1190 */
1191#define FH49_TCSR_LOWER_BOUND (FH49_MEM_LOWER_BOUND + 0xD00)
1192#define FH49_TCSR_UPPER_BOUND (FH49_MEM_LOWER_BOUND + 0xE60)
1193
1194/* Find Control/Status reg for given Tx DMA/FIFO channel */
1195#define FH49_TCSR_CHNL_NUM (7)
1196#define FH50_TCSR_CHNL_NUM (8)
1197
1198/* TCSR: tx_config register values */
1199#define FH49_TCSR_CHNL_TX_CONFIG_REG(_chnl) \
1200 (FH49_TCSR_LOWER_BOUND + 0x20 * (_chnl))
1201#define FH49_TCSR_CHNL_TX_CREDIT_REG(_chnl) \
1202 (FH49_TCSR_LOWER_BOUND + 0x20 * (_chnl) + 0x4)
1203#define FH49_TCSR_CHNL_TX_BUF_STS_REG(_chnl) \
1204 (FH49_TCSR_LOWER_BOUND + 0x20 * (_chnl) + 0x8)
1205
1206#define FH49_TCSR_TX_CONFIG_REG_VAL_MSG_MODE_TXF (0x00000000)
1207#define FH49_TCSR_TX_CONFIG_REG_VAL_MSG_MODE_DRV (0x00000001)
1208
1209#define FH49_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_DISABLE (0x00000000)
1210#define FH49_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE (0x00000008)
1211
1212#define FH49_TCSR_TX_CONFIG_REG_VAL_CIRQ_HOST_NOINT (0x00000000)
1213#define FH49_TCSR_TX_CONFIG_REG_VAL_CIRQ_HOST_ENDTFD (0x00100000)
1214#define FH49_TCSR_TX_CONFIG_REG_VAL_CIRQ_HOST_IFTFD (0x00200000)
1215
1216#define FH49_TCSR_TX_CONFIG_REG_VAL_CIRQ_RTC_NOINT (0x00000000)
1217#define FH49_TCSR_TX_CONFIG_REG_VAL_CIRQ_RTC_ENDTFD (0x00400000)
1218#define FH49_TCSR_TX_CONFIG_REG_VAL_CIRQ_RTC_IFTFD (0x00800000)
1219
1220#define FH49_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_PAUSE (0x00000000)
1221#define FH49_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_PAUSE_EOF (0x40000000)
1222#define FH49_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE (0x80000000)
1223
1224#define FH49_TCSR_CHNL_TX_BUF_STS_REG_VAL_TFDB_EMPTY (0x00000000)
1225#define FH49_TCSR_CHNL_TX_BUF_STS_REG_VAL_TFDB_WAIT (0x00002000)
1226#define FH49_TCSR_CHNL_TX_BUF_STS_REG_VAL_TFDB_VALID (0x00000003)
1227
1228#define FH49_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_NUM (20)
1229#define FH49_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_IDX (12)
1230
1231/**
1232 * Tx Shared Status Registers (TSSR)
1233 *
1234 * After stopping Tx DMA channel (writing 0 to
1235 * FH49_TCSR_CHNL_TX_CONFIG_REG(chnl)), driver must poll
1236 * FH49_TSSR_TX_STATUS_REG until selected Tx channel is idle
1237 * (channel's buffers empty | no pending requests).
1238 *
1239 * Bit fields:
1240 * 31-24: 1 = Channel buffers empty (channel 7:0)
1241 * 23-16: 1 = No pending requests (channel 7:0)
1242 */
1243#define FH49_TSSR_LOWER_BOUND (FH49_MEM_LOWER_BOUND + 0xEA0)
1244#define FH49_TSSR_UPPER_BOUND (FH49_MEM_LOWER_BOUND + 0xEC0)
1245
1246#define FH49_TSSR_TX_STATUS_REG (FH49_TSSR_LOWER_BOUND + 0x010)
1247
1248/**
1249 * Bit fields for TSSR(Tx Shared Status & Control) error status register:
1250 * 31: Indicates an address error when accessed to internal memory
1251 * uCode/driver must write "1" in order to clear this flag
1252 * 30: Indicates that Host did not send the expected number of dwords to FH
1253 * uCode/driver must write "1" in order to clear this flag
1254 * 16-9:Each status bit is for one channel. Indicates that an (Error) ActDMA
1255 * command was received from the scheduler while the TRB was already full
1256 * with previous command
1257 * uCode/driver must write "1" in order to clear this flag
1258 * 7-0: Each status bit indicates a channel's TxCredit error. When an error
1259 * bit is set, it indicates that the FH has received a full indication
1260 * from the RTC TxFIFO and the current value of the TxCredit counter was
1261 * not equal to zero. This mean that the credit mechanism was not
1262 * synchronized to the TxFIFO status
1263 * uCode/driver must write "1" in order to clear this flag
1264 */
1265#define FH49_TSSR_TX_ERROR_REG (FH49_TSSR_LOWER_BOUND + 0x018)
1266
1267#define FH49_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(_chnl) ((1 << (_chnl)) << 16)
1268
1269/* Tx service channels */
1270#define FH49_SRVC_CHNL (9)
1271#define FH49_SRVC_LOWER_BOUND (FH49_MEM_LOWER_BOUND + 0x9C8)
1272#define FH49_SRVC_UPPER_BOUND (FH49_MEM_LOWER_BOUND + 0x9D0)
1273#define FH49_SRVC_CHNL_SRAM_ADDR_REG(_chnl) \
1274 (FH49_SRVC_LOWER_BOUND + ((_chnl) - 9) * 0x4)
1275
1276#define FH49_TX_CHICKEN_BITS_REG (FH49_MEM_LOWER_BOUND + 0xE98)
1277/* Instruct FH to increment the retry count of a packet when
1278 * it is brought from the memory to TX-FIFO
1279 */
1280#define FH49_TX_CHICKEN_BITS_SCD_AUTO_RETRY_EN (0x00000002)
1281
1282/* Keep Warm Size */
1283#define IL_KW_SIZE 0x1000 /* 4k */
1284
1285#endif /* __il_4965_h__ */
diff --git a/drivers/net/wireless/intel/iwlegacy/Kconfig b/drivers/net/wireless/intel/iwlegacy/Kconfig
new file mode 100644
index 000000000000..fb919727b8bb
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlegacy/Kconfig
@@ -0,0 +1,100 @@
1config IWLEGACY
2 tristate
3 select FW_LOADER
4 select NEW_LEDS
5 select LEDS_CLASS
6 select LEDS_TRIGGERS
7 select MAC80211_LEDS
8
9config IWL4965
10 tristate "Intel Wireless WiFi 4965AGN (iwl4965)"
11 depends on PCI && MAC80211
12 select IWLEGACY
13 ---help---
14 This option enables support for
15
16 Select to build the driver supporting the:
17
18 Intel Wireless WiFi Link 4965AGN
19
20 This driver uses the kernel's mac80211 subsystem.
21
22 In order to use this driver, you will need a microcode (uCode)
23 image for it. You can obtain the microcode from:
24
25 <http://intellinuxwireless.org/>.
26
27 The microcode is typically installed in /lib/firmware. You can
28 look in the hotplug script /etc/hotplug/firmware.agent to
29 determine which directory FIRMWARE_DIR is set to when the script
30 runs.
31
32 If you want to compile the driver as a module ( = code which can be
33 inserted in and removed from the running kernel whenever you want),
34 say M here and read <file:Documentation/kbuild/modules.txt>. The
35 module will be called iwl4965.
36
37config IWL3945
38 tristate "Intel PRO/Wireless 3945ABG/BG Network Connection (iwl3945)"
39 depends on PCI && MAC80211
40 select IWLEGACY
41 ---help---
42 Select to build the driver supporting the:
43
44 Intel PRO/Wireless 3945ABG/BG Network Connection
45
46 This driver uses the kernel's mac80211 subsystem.
47
48 In order to use this driver, you will need a microcode (uCode)
49 image for it. You can obtain the microcode from:
50
51 <http://intellinuxwireless.org/>.
52
53 The microcode is typically installed in /lib/firmware. You can
54 look in the hotplug script /etc/hotplug/firmware.agent to
55 determine which directory FIRMWARE_DIR is set to when the script
56 runs.
57
58 If you want to compile the driver as a module ( = code which can be
59 inserted in and removed from the running kernel whenever you want),
60 say M here and read <file:Documentation/kbuild/modules.txt>. The
61 module will be called iwl3945.
62
63menu "iwl3945 / iwl4965 Debugging Options"
64 depends on IWLEGACY
65
66config IWLEGACY_DEBUG
67 bool "Enable full debugging output in iwlegacy (iwl 3945/4965) drivers"
68 depends on IWLEGACY
69 ---help---
70 This option will enable debug tracing output for the iwlegacy
71 drivers.
72
73 This will result in the kernel module being ~100k larger. You can
74 control which debug output is sent to the kernel log by setting the
75 value in
76
77 /sys/class/net/wlan0/device/debug_level
78
79 This entry will only exist if this option is enabled.
80
81 To set a value, simply echo an 8-byte hex value to the same file:
82
83 % echo 0x43fff > /sys/class/net/wlan0/device/debug_level
84
85 You can find the list of debug mask values in:
86 drivers/net/wireless/iwlegacy/common.h
87
88 If this is your first time using this driver, you should say Y here
89 as the debug information can assist others in helping you resolve
90 any problems you may encounter.
91
92config IWLEGACY_DEBUGFS
93 bool "iwlegacy (iwl 3945/4965) debugfs support"
94 depends on IWLEGACY && MAC80211_DEBUGFS
95 ---help---
96 Enable creation of debugfs files for the iwlegacy drivers. This
97 is a low-impact option that allows getting insight into the
98 driver's state at runtime.
99
100endmenu
diff --git a/drivers/net/wireless/intel/iwlegacy/Makefile b/drivers/net/wireless/intel/iwlegacy/Makefile
new file mode 100644
index 000000000000..c985a01a0731
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlegacy/Makefile
@@ -0,0 +1,17 @@
1obj-$(CONFIG_IWLEGACY) += iwlegacy.o
2iwlegacy-objs := common.o
3iwlegacy-$(CONFIG_IWLEGACY_DEBUGFS) += debug.o
4
5iwlegacy-objs += $(iwlegacy-m)
6
7# 4965
8obj-$(CONFIG_IWL4965) += iwl4965.o
9iwl4965-objs := 4965.o 4965-mac.o 4965-rs.o 4965-calib.o
10iwl4965-$(CONFIG_IWLEGACY_DEBUGFS) += 4965-debug.o
11
12# 3945
13obj-$(CONFIG_IWL3945) += iwl3945.o
14iwl3945-objs := 3945-mac.o 3945.o 3945-rs.o
15iwl3945-$(CONFIG_IWLEGACY_DEBUGFS) += 3945-debug.o
16
17ccflags-y += -D__CHECK_ENDIAN__
diff --git a/drivers/net/wireless/intel/iwlegacy/commands.h b/drivers/net/wireless/intel/iwlegacy/commands.h
new file mode 100644
index 000000000000..dd744135c956
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlegacy/commands.h
@@ -0,0 +1,3370 @@
1/******************************************************************************
2 *
3 * This file is provided under a dual BSD/GPLv2 license. When using or
4 * redistributing this file, you may do so under either license.
5 *
6 * GPL LICENSE SUMMARY
7 *
8 * Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved.
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of version 2 of the GNU General Public License as
12 * published by the Free Software Foundation.
13 *
14 * This program is distributed in the hope that it will be useful, but
15 * WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
22 * USA
23 *
24 * The full GNU General Public License is included in this distribution
25 * in the file called LICENSE.GPL.
26 *
27 * Contact Information:
28 * Intel Linux Wireless <ilw@linux.intel.com>
29 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
30 *
31 * BSD LICENSE
32 *
33 * Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved.
34 * All rights reserved.
35 *
36 * Redistribution and use in source and binary forms, with or without
37 * modification, are permitted provided that the following conditions
38 * are met:
39 *
40 * * Redistributions of source code must retain the above copyright
41 * notice, this list of conditions and the following disclaimer.
42 * * Redistributions in binary form must reproduce the above copyright
43 * notice, this list of conditions and the following disclaimer in
44 * the documentation and/or other materials provided with the
45 * distribution.
46 * * Neither the name Intel Corporation nor the names of its
47 * contributors may be used to endorse or promote products derived
48 * from this software without specific prior written permission.
49 *
50 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
51 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
52 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
53 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
54 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
55 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
56 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
57 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
58 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
59 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
60 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
61 *
62 *****************************************************************************/
63
64#ifndef __il_commands_h__
65#define __il_commands_h__
66
67#include <linux/ieee80211.h>
68
69struct il_priv;
70
71/* uCode version contains 4 values: Major/Minor/API/Serial */
72#define IL_UCODE_MAJOR(ver) (((ver) & 0xFF000000) >> 24)
73#define IL_UCODE_MINOR(ver) (((ver) & 0x00FF0000) >> 16)
74#define IL_UCODE_API(ver) (((ver) & 0x0000FF00) >> 8)
75#define IL_UCODE_SERIAL(ver) ((ver) & 0x000000FF)
76
77/* Tx rates */
78#define IL_CCK_RATES 4
79#define IL_OFDM_RATES 8
80#define IL_MAX_RATES (IL_CCK_RATES + IL_OFDM_RATES)
81
82enum {
83 N_ALIVE = 0x1,
84 N_ERROR = 0x2,
85
86 /* RXON and QOS commands */
87 C_RXON = 0x10,
88 C_RXON_ASSOC = 0x11,
89 C_QOS_PARAM = 0x13,
90 C_RXON_TIMING = 0x14,
91
92 /* Multi-Station support */
93 C_ADD_STA = 0x18,
94 C_REM_STA = 0x19,
95
96 /* Security */
97 C_WEPKEY = 0x20,
98
99 /* RX, TX, LEDs */
100 N_3945_RX = 0x1b, /* 3945 only */
101 C_TX = 0x1c,
102 C_RATE_SCALE = 0x47, /* 3945 only */
103 C_LEDS = 0x48,
104 C_TX_LINK_QUALITY_CMD = 0x4e, /* for 4965 */
105
106 /* 802.11h related */
107 C_CHANNEL_SWITCH = 0x72,
108 N_CHANNEL_SWITCH = 0x73,
109 C_SPECTRUM_MEASUREMENT = 0x74,
110 N_SPECTRUM_MEASUREMENT = 0x75,
111
112 /* Power Management */
113 C_POWER_TBL = 0x77,
114 N_PM_SLEEP = 0x7A,
115 N_PM_DEBUG_STATS = 0x7B,
116
117 /* Scan commands and notifications */
118 C_SCAN = 0x80,
119 C_SCAN_ABORT = 0x81,
120 N_SCAN_START = 0x82,
121 N_SCAN_RESULTS = 0x83,
122 N_SCAN_COMPLETE = 0x84,
123
124 /* IBSS/AP commands */
125 N_BEACON = 0x90,
126 C_TX_BEACON = 0x91,
127
128 /* Miscellaneous commands */
129 C_TX_PWR_TBL = 0x97,
130
131 /* Bluetooth device coexistence config command */
132 C_BT_CONFIG = 0x9b,
133
134 /* Statistics */
135 C_STATS = 0x9c,
136 N_STATS = 0x9d,
137
138 /* RF-KILL commands and notifications */
139 N_CARD_STATE = 0xa1,
140
141 /* Missed beacons notification */
142 N_MISSED_BEACONS = 0xa2,
143
144 C_CT_KILL_CONFIG = 0xa4,
145 C_SENSITIVITY = 0xa8,
146 C_PHY_CALIBRATION = 0xb0,
147 N_RX_PHY = 0xc0,
148 N_RX_MPDU = 0xc1,
149 N_RX = 0xc3,
150 N_COMPRESSED_BA = 0xc5,
151
152 IL_CN_MAX = 0xff
153};
154
155/******************************************************************************
156 * (0)
157 * Commonly used structures and definitions:
158 * Command header, rate_n_flags, txpower
159 *
160 *****************************************************************************/
161
162/* il_cmd_header flags value */
163#define IL_CMD_FAILED_MSK 0x40
164
165#define SEQ_TO_QUEUE(s) (((s) >> 8) & 0x1f)
166#define QUEUE_TO_SEQ(q) (((q) & 0x1f) << 8)
167#define SEQ_TO_IDX(s) ((s) & 0xff)
168#define IDX_TO_SEQ(i) ((i) & 0xff)
169#define SEQ_HUGE_FRAME cpu_to_le16(0x4000)
170#define SEQ_RX_FRAME cpu_to_le16(0x8000)
171
172/**
173 * struct il_cmd_header
174 *
175 * This header format appears in the beginning of each command sent from the
176 * driver, and each response/notification received from uCode.
177 */
178struct il_cmd_header {
179 u8 cmd; /* Command ID: C_RXON, etc. */
180 u8 flags; /* 0:5 reserved, 6 abort, 7 internal */
181 /*
182 * The driver sets up the sequence number to values of its choosing.
183 * uCode does not use this value, but passes it back to the driver
184 * when sending the response to each driver-originated command, so
185 * the driver can match the response to the command. Since the values
186 * don't get used by uCode, the driver may set up an arbitrary format.
187 *
188 * There is one exception: uCode sets bit 15 when it originates
189 * the response/notification, i.e. when the response/notification
190 * is not a direct response to a command sent by the driver. For
191 * example, uCode issues N_3945_RX when it sends a received frame
192 * to the driver; it is not a direct response to any driver command.
193 *
194 * The Linux driver uses the following format:
195 *
196 * 0:7 tfd idx - position within TX queue
197 * 8:12 TX queue id
198 * 13 reserved
199 * 14 huge - driver sets this to indicate command is in the
200 * 'huge' storage at the end of the command buffers
201 * 15 unsolicited RX or uCode-originated notification
202 */
203 __le16 sequence;
204
205 /* command or response/notification data follows immediately */
206 u8 data[0];
207} __packed;
208
209/**
210 * struct il3945_tx_power
211 *
212 * Used in C_TX_PWR_TBL, C_SCAN, C_CHANNEL_SWITCH
213 *
214 * Each entry contains two values:
215 * 1) DSP gain (or sometimes called DSP attenuation). This is a fine-grained
216 * linear value that multiplies the output of the digital signal processor,
217 * before being sent to the analog radio.
218 * 2) Radio gain. This sets the analog gain of the radio Tx path.
219 * It is a coarser setting, and behaves in a logarithmic (dB) fashion.
220 *
221 * Driver obtains values from struct il3945_tx_power power_gain_table[][].
222 */
223struct il3945_tx_power {
224 u8 tx_gain; /* gain for analog radio */
225 u8 dsp_atten; /* gain for DSP */
226} __packed;
227
228/**
229 * struct il3945_power_per_rate
230 *
231 * Used in C_TX_PWR_TBL, C_CHANNEL_SWITCH
232 */
233struct il3945_power_per_rate {
234 u8 rate; /* plcp */
235 struct il3945_tx_power tpc;
236 u8 reserved;
237} __packed;
238
239/**
240 * iwl4965 rate_n_flags bit fields
241 *
242 * rate_n_flags format is used in following iwl4965 commands:
243 * N_RX (response only)
244 * N_RX_MPDU (response only)
245 * C_TX (both command and response)
246 * C_TX_LINK_QUALITY_CMD
247 *
248 * High-throughput (HT) rate format for bits 7:0 (bit 8 must be "1"):
249 * 2-0: 0) 6 Mbps
250 * 1) 12 Mbps
251 * 2) 18 Mbps
252 * 3) 24 Mbps
253 * 4) 36 Mbps
254 * 5) 48 Mbps
255 * 6) 54 Mbps
256 * 7) 60 Mbps
257 *
258 * 4-3: 0) Single stream (SISO)
259 * 1) Dual stream (MIMO)
260 * 2) Triple stream (MIMO)
261 *
262 * 5: Value of 0x20 in bits 7:0 indicates 6 Mbps HT40 duplicate data
263 *
264 * Legacy OFDM rate format for bits 7:0 (bit 8 must be "0", bit 9 "0"):
265 * 3-0: 0xD) 6 Mbps
266 * 0xF) 9 Mbps
267 * 0x5) 12 Mbps
268 * 0x7) 18 Mbps
269 * 0x9) 24 Mbps
270 * 0xB) 36 Mbps
271 * 0x1) 48 Mbps
272 * 0x3) 54 Mbps
273 *
274 * Legacy CCK rate format for bits 7:0 (bit 8 must be "0", bit 9 "1"):
275 * 6-0: 10) 1 Mbps
276 * 20) 2 Mbps
277 * 55) 5.5 Mbps
278 * 110) 11 Mbps
279 */
280#define RATE_MCS_CODE_MSK 0x7
281#define RATE_MCS_SPATIAL_POS 3
282#define RATE_MCS_SPATIAL_MSK 0x18
283#define RATE_MCS_HT_DUP_POS 5
284#define RATE_MCS_HT_DUP_MSK 0x20
285
286/* Bit 8: (1) HT format, (0) legacy format in bits 7:0 */
287#define RATE_MCS_FLAGS_POS 8
288#define RATE_MCS_HT_POS 8
289#define RATE_MCS_HT_MSK 0x100
290
291/* Bit 9: (1) CCK, (0) OFDM. HT (bit 8) must be "0" for this bit to be valid */
292#define RATE_MCS_CCK_POS 9
293#define RATE_MCS_CCK_MSK 0x200
294
295/* Bit 10: (1) Use Green Field preamble */
296#define RATE_MCS_GF_POS 10
297#define RATE_MCS_GF_MSK 0x400
298
299/* Bit 11: (1) Use 40Mhz HT40 chnl width, (0) use 20 MHz legacy chnl width */
300#define RATE_MCS_HT40_POS 11
301#define RATE_MCS_HT40_MSK 0x800
302
303/* Bit 12: (1) Duplicate data on both 20MHz chnls. HT40 (bit 11) must be set. */
304#define RATE_MCS_DUP_POS 12
305#define RATE_MCS_DUP_MSK 0x1000
306
307/* Bit 13: (1) Short guard interval (0.4 usec), (0) normal GI (0.8 usec) */
308#define RATE_MCS_SGI_POS 13
309#define RATE_MCS_SGI_MSK 0x2000
310
311/**
312 * rate_n_flags Tx antenna masks
313 * 4965 has 2 transmitters
314 * bit14:16
315 */
316#define RATE_MCS_ANT_POS 14
317#define RATE_MCS_ANT_A_MSK 0x04000
318#define RATE_MCS_ANT_B_MSK 0x08000
319#define RATE_MCS_ANT_C_MSK 0x10000
320#define RATE_MCS_ANT_AB_MSK (RATE_MCS_ANT_A_MSK | RATE_MCS_ANT_B_MSK)
321#define RATE_MCS_ANT_ABC_MSK (RATE_MCS_ANT_AB_MSK | RATE_MCS_ANT_C_MSK)
322#define RATE_ANT_NUM 3
323
324#define POWER_TBL_NUM_ENTRIES 33
325#define POWER_TBL_NUM_HT_OFDM_ENTRIES 32
326#define POWER_TBL_CCK_ENTRY 32
327
328#define IL_PWR_NUM_HT_OFDM_ENTRIES 24
329#define IL_PWR_CCK_ENTRIES 2
330
331/**
332 * union il4965_tx_power_dual_stream
333 *
334 * Host format used for C_TX_PWR_TBL, C_CHANNEL_SWITCH
335 * Use __le32 version (struct tx_power_dual_stream) when building command.
336 *
337 * Driver provides radio gain and DSP attenuation settings to device in pairs,
338 * one value for each transmitter chain. The first value is for transmitter A,
339 * second for transmitter B.
340 *
341 * For SISO bit rates, both values in a pair should be identical.
342 * For MIMO rates, one value may be different from the other,
343 * in order to balance the Tx output between the two transmitters.
344 *
345 * See more details in doc for TXPOWER in 4965.h.
346 */
347union il4965_tx_power_dual_stream {
348 struct {
349 u8 radio_tx_gain[2];
350 u8 dsp_predis_atten[2];
351 } s;
352 u32 dw;
353};
354
355/**
356 * struct tx_power_dual_stream
357 *
358 * Table entries in C_TX_PWR_TBL, C_CHANNEL_SWITCH
359 *
360 * Same format as il_tx_power_dual_stream, but __le32
361 */
362struct tx_power_dual_stream {
363 __le32 dw;
364} __packed;
365
366/**
367 * struct il4965_tx_power_db
368 *
369 * Entire table within C_TX_PWR_TBL, C_CHANNEL_SWITCH
370 */
371struct il4965_tx_power_db {
372 struct tx_power_dual_stream power_tbl[POWER_TBL_NUM_ENTRIES];
373} __packed;
374
375/******************************************************************************
376 * (0a)
377 * Alive and Error Commands & Responses:
378 *
379 *****************************************************************************/
380
381#define UCODE_VALID_OK cpu_to_le32(0x1)
382#define INITIALIZE_SUBTYPE (9)
383
384/*
385 * ("Initialize") N_ALIVE = 0x1 (response only, not a command)
386 *
387 * uCode issues this "initialize alive" notification once the initialization
388 * uCode image has completed its work, and is ready to load the runtime image.
389 * This is the *first* "alive" notification that the driver will receive after
390 * rebooting uCode; the "initialize" alive is indicated by subtype field == 9.
391 *
392 * See comments documenting "BSM" (bootstrap state machine).
393 *
394 * For 4965, this notification contains important calibration data for
395 * calculating txpower settings:
396 *
397 * 1) Power supply voltage indication. The voltage sensor outputs higher
398 * values for lower voltage, and vice verse.
399 *
400 * 2) Temperature measurement parameters, for each of two channel widths
401 * (20 MHz and 40 MHz) supported by the radios. Temperature sensing
402 * is done via one of the receiver chains, and channel width influences
403 * the results.
404 *
405 * 3) Tx gain compensation to balance 4965's 2 Tx chains for MIMO operation,
406 * for each of 5 frequency ranges.
407 */
408struct il_init_alive_resp {
409 u8 ucode_minor;
410 u8 ucode_major;
411 __le16 reserved1;
412 u8 sw_rev[8];
413 u8 ver_type;
414 u8 ver_subtype; /* "9" for initialize alive */
415 __le16 reserved2;
416 __le32 log_event_table_ptr;
417 __le32 error_event_table_ptr;
418 __le32 timestamp;
419 __le32 is_valid;
420
421 /* calibration values from "initialize" uCode */
422 __le32 voltage; /* signed, higher value is lower voltage */
423 __le32 therm_r1[2]; /* signed, 1st for normal, 2nd for HT40 */
424 __le32 therm_r2[2]; /* signed */
425 __le32 therm_r3[2]; /* signed */
426 __le32 therm_r4[2]; /* signed */
427 __le32 tx_atten[5][2]; /* signed MIMO gain comp, 5 freq groups,
428 * 2 Tx chains */
429} __packed;
430
431/**
432 * N_ALIVE = 0x1 (response only, not a command)
433 *
434 * uCode issues this "alive" notification once the runtime image is ready
435 * to receive commands from the driver. This is the *second* "alive"
436 * notification that the driver will receive after rebooting uCode;
437 * this "alive" is indicated by subtype field != 9.
438 *
439 * See comments documenting "BSM" (bootstrap state machine).
440 *
441 * This response includes two pointers to structures within the device's
442 * data SRAM (access via HBUS_TARG_MEM_* regs) that are useful for debugging:
443 *
444 * 1) log_event_table_ptr indicates base of the event log. This traces
445 * a 256-entry history of uCode execution within a circular buffer.
446 * Its header format is:
447 *
448 * __le32 log_size; log capacity (in number of entries)
449 * __le32 type; (1) timestamp with each entry, (0) no timestamp
450 * __le32 wraps; # times uCode has wrapped to top of circular buffer
451 * __le32 write_idx; next circular buffer entry that uCode would fill
452 *
453 * The header is followed by the circular buffer of log entries. Entries
454 * with timestamps have the following format:
455 *
456 * __le32 event_id; range 0 - 1500
457 * __le32 timestamp; low 32 bits of TSF (of network, if associated)
458 * __le32 data; event_id-specific data value
459 *
460 * Entries without timestamps contain only event_id and data.
461 *
462 *
463 * 2) error_event_table_ptr indicates base of the error log. This contains
464 * information about any uCode error that occurs. For 4965, the format
465 * of the error log is:
466 *
467 * __le32 valid; (nonzero) valid, (0) log is empty
468 * __le32 error_id; type of error
469 * __le32 pc; program counter
470 * __le32 blink1; branch link
471 * __le32 blink2; branch link
472 * __le32 ilink1; interrupt link
473 * __le32 ilink2; interrupt link
474 * __le32 data1; error-specific data
475 * __le32 data2; error-specific data
476 * __le32 line; source code line of error
477 * __le32 bcon_time; beacon timer
478 * __le32 tsf_low; network timestamp function timer
479 * __le32 tsf_hi; network timestamp function timer
480 * __le32 gp1; GP1 timer register
481 * __le32 gp2; GP2 timer register
482 * __le32 gp3; GP3 timer register
483 * __le32 ucode_ver; uCode version
484 * __le32 hw_ver; HW Silicon version
485 * __le32 brd_ver; HW board version
486 * __le32 log_pc; log program counter
487 * __le32 frame_ptr; frame pointer
488 * __le32 stack_ptr; stack pointer
489 * __le32 hcmd; last host command
490 * __le32 isr0; isr status register LMPM_NIC_ISR0: rxtx_flag
491 * __le32 isr1; isr status register LMPM_NIC_ISR1: host_flag
492 * __le32 isr2; isr status register LMPM_NIC_ISR2: enc_flag
493 * __le32 isr3; isr status register LMPM_NIC_ISR3: time_flag
494 * __le32 isr4; isr status register LMPM_NIC_ISR4: wico interrupt
495 * __le32 isr_pref; isr status register LMPM_NIC_PREF_STAT
496 * __le32 wait_event; wait event() caller address
497 * __le32 l2p_control; L2pControlField
498 * __le32 l2p_duration; L2pDurationField
499 * __le32 l2p_mhvalid; L2pMhValidBits
500 * __le32 l2p_addr_match; L2pAddrMatchStat
501 * __le32 lmpm_pmg_sel; indicate which clocks are turned on (LMPM_PMG_SEL)
502 * __le32 u_timestamp; indicate when the date and time of the compilation
503 * __le32 reserved;
504 *
505 * The Linux driver can print both logs to the system log when a uCode error
506 * occurs.
507 */
508struct il_alive_resp {
509 u8 ucode_minor;
510 u8 ucode_major;
511 __le16 reserved1;
512 u8 sw_rev[8];
513 u8 ver_type;
514 u8 ver_subtype; /* not "9" for runtime alive */
515 __le16 reserved2;
516 __le32 log_event_table_ptr; /* SRAM address for event log */
517 __le32 error_event_table_ptr; /* SRAM address for error log */
518 __le32 timestamp;
519 __le32 is_valid;
520} __packed;
521
522/*
523 * N_ERROR = 0x2 (response only, not a command)
524 */
525struct il_error_resp {
526 __le32 error_type;
527 u8 cmd_id;
528 u8 reserved1;
529 __le16 bad_cmd_seq_num;
530 __le32 error_info;
531 __le64 timestamp;
532} __packed;
533
534/******************************************************************************
535 * (1)
536 * RXON Commands & Responses:
537 *
538 *****************************************************************************/
539
540/*
541 * Rx config defines & structure
542 */
543/* rx_config device types */
544enum {
545 RXON_DEV_TYPE_AP = 1,
546 RXON_DEV_TYPE_ESS = 3,
547 RXON_DEV_TYPE_IBSS = 4,
548 RXON_DEV_TYPE_SNIFFER = 6,
549};
550
551#define RXON_RX_CHAIN_DRIVER_FORCE_MSK cpu_to_le16(0x1 << 0)
552#define RXON_RX_CHAIN_DRIVER_FORCE_POS (0)
553#define RXON_RX_CHAIN_VALID_MSK cpu_to_le16(0x7 << 1)
554#define RXON_RX_CHAIN_VALID_POS (1)
555#define RXON_RX_CHAIN_FORCE_SEL_MSK cpu_to_le16(0x7 << 4)
556#define RXON_RX_CHAIN_FORCE_SEL_POS (4)
557#define RXON_RX_CHAIN_FORCE_MIMO_SEL_MSK cpu_to_le16(0x7 << 7)
558#define RXON_RX_CHAIN_FORCE_MIMO_SEL_POS (7)
559#define RXON_RX_CHAIN_CNT_MSK cpu_to_le16(0x3 << 10)
560#define RXON_RX_CHAIN_CNT_POS (10)
561#define RXON_RX_CHAIN_MIMO_CNT_MSK cpu_to_le16(0x3 << 12)
562#define RXON_RX_CHAIN_MIMO_CNT_POS (12)
563#define RXON_RX_CHAIN_MIMO_FORCE_MSK cpu_to_le16(0x1 << 14)
564#define RXON_RX_CHAIN_MIMO_FORCE_POS (14)
565
566/* rx_config flags */
567/* band & modulation selection */
568#define RXON_FLG_BAND_24G_MSK cpu_to_le32(1 << 0)
569#define RXON_FLG_CCK_MSK cpu_to_le32(1 << 1)
570/* auto detection enable */
571#define RXON_FLG_AUTO_DETECT_MSK cpu_to_le32(1 << 2)
572/* TGg protection when tx */
573#define RXON_FLG_TGG_PROTECT_MSK cpu_to_le32(1 << 3)
574/* cck short slot & preamble */
575#define RXON_FLG_SHORT_SLOT_MSK cpu_to_le32(1 << 4)
576#define RXON_FLG_SHORT_PREAMBLE_MSK cpu_to_le32(1 << 5)
577/* antenna selection */
578#define RXON_FLG_DIS_DIV_MSK cpu_to_le32(1 << 7)
579#define RXON_FLG_ANT_SEL_MSK cpu_to_le32(0x0f00)
580#define RXON_FLG_ANT_A_MSK cpu_to_le32(1 << 8)
581#define RXON_FLG_ANT_B_MSK cpu_to_le32(1 << 9)
582/* radar detection enable */
583#define RXON_FLG_RADAR_DETECT_MSK cpu_to_le32(1 << 12)
584#define RXON_FLG_TGJ_NARROW_BAND_MSK cpu_to_le32(1 << 13)
585/* rx response to host with 8-byte TSF
586* (according to ON_AIR deassertion) */
587#define RXON_FLG_TSF2HOST_MSK cpu_to_le32(1 << 15)
588
589/* HT flags */
590#define RXON_FLG_CTRL_CHANNEL_LOC_POS (22)
591#define RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK cpu_to_le32(0x1 << 22)
592
593#define RXON_FLG_HT_OPERATING_MODE_POS (23)
594
595#define RXON_FLG_HT_PROT_MSK cpu_to_le32(0x1 << 23)
596#define RXON_FLG_HT40_PROT_MSK cpu_to_le32(0x2 << 23)
597
598#define RXON_FLG_CHANNEL_MODE_POS (25)
599#define RXON_FLG_CHANNEL_MODE_MSK cpu_to_le32(0x3 << 25)
600
601/* channel mode */
602enum {
603 CHANNEL_MODE_LEGACY = 0,
604 CHANNEL_MODE_PURE_40 = 1,
605 CHANNEL_MODE_MIXED = 2,
606 CHANNEL_MODE_RESERVED = 3,
607};
608#define RXON_FLG_CHANNEL_MODE_LEGACY \
609 cpu_to_le32(CHANNEL_MODE_LEGACY << RXON_FLG_CHANNEL_MODE_POS)
610#define RXON_FLG_CHANNEL_MODE_PURE_40 \
611 cpu_to_le32(CHANNEL_MODE_PURE_40 << RXON_FLG_CHANNEL_MODE_POS)
612#define RXON_FLG_CHANNEL_MODE_MIXED \
613 cpu_to_le32(CHANNEL_MODE_MIXED << RXON_FLG_CHANNEL_MODE_POS)
614
615/* CTS to self (if spec allows) flag */
616#define RXON_FLG_SELF_CTS_EN cpu_to_le32(0x1<<30)
617
618/* rx_config filter flags */
619/* accept all data frames */
620#define RXON_FILTER_PROMISC_MSK cpu_to_le32(1 << 0)
621/* pass control & management to host */
622#define RXON_FILTER_CTL2HOST_MSK cpu_to_le32(1 << 1)
623/* accept multi-cast */
624#define RXON_FILTER_ACCEPT_GRP_MSK cpu_to_le32(1 << 2)
625/* don't decrypt uni-cast frames */
626#define RXON_FILTER_DIS_DECRYPT_MSK cpu_to_le32(1 << 3)
627/* don't decrypt multi-cast frames */
628#define RXON_FILTER_DIS_GRP_DECRYPT_MSK cpu_to_le32(1 << 4)
629/* STA is associated */
630#define RXON_FILTER_ASSOC_MSK cpu_to_le32(1 << 5)
631/* transfer to host non bssid beacons in associated state */
632#define RXON_FILTER_BCON_AWARE_MSK cpu_to_le32(1 << 6)
633
634/**
635 * C_RXON = 0x10 (command, has simple generic response)
636 *
637 * RXON tunes the radio tuner to a service channel, and sets up a number
638 * of parameters that are used primarily for Rx, but also for Tx operations.
639 *
640 * NOTE: When tuning to a new channel, driver must set the
641 * RXON_FILTER_ASSOC_MSK to 0. This will clear station-dependent
642 * info within the device, including the station tables, tx retry
643 * rate tables, and txpower tables. Driver must build a new station
644 * table and txpower table before transmitting anything on the RXON
645 * channel.
646 *
647 * NOTE: All RXONs wipe clean the internal txpower table. Driver must
648 * issue a new C_TX_PWR_TBL after each C_RXON (0x10),
649 * regardless of whether RXON_FILTER_ASSOC_MSK is set.
650 */
651
652struct il3945_rxon_cmd {
653 u8 node_addr[6];
654 __le16 reserved1;
655 u8 bssid_addr[6];
656 __le16 reserved2;
657 u8 wlap_bssid_addr[6];
658 __le16 reserved3;
659 u8 dev_type;
660 u8 air_propagation;
661 __le16 reserved4;
662 u8 ofdm_basic_rates;
663 u8 cck_basic_rates;
664 __le16 assoc_id;
665 __le32 flags;
666 __le32 filter_flags;
667 __le16 channel;
668 __le16 reserved5;
669} __packed;
670
671struct il4965_rxon_cmd {
672 u8 node_addr[6];
673 __le16 reserved1;
674 u8 bssid_addr[6];
675 __le16 reserved2;
676 u8 wlap_bssid_addr[6];
677 __le16 reserved3;
678 u8 dev_type;
679 u8 air_propagation;
680 __le16 rx_chain;
681 u8 ofdm_basic_rates;
682 u8 cck_basic_rates;
683 __le16 assoc_id;
684 __le32 flags;
685 __le32 filter_flags;
686 __le16 channel;
687 u8 ofdm_ht_single_stream_basic_rates;
688 u8 ofdm_ht_dual_stream_basic_rates;
689} __packed;
690
691/* Create a common rxon cmd which will be typecast into the 3945 or 4965
692 * specific rxon cmd, depending on where it is called from.
693 */
694struct il_rxon_cmd {
695 u8 node_addr[6];
696 __le16 reserved1;
697 u8 bssid_addr[6];
698 __le16 reserved2;
699 u8 wlap_bssid_addr[6];
700 __le16 reserved3;
701 u8 dev_type;
702 u8 air_propagation;
703 __le16 rx_chain;
704 u8 ofdm_basic_rates;
705 u8 cck_basic_rates;
706 __le16 assoc_id;
707 __le32 flags;
708 __le32 filter_flags;
709 __le16 channel;
710 u8 ofdm_ht_single_stream_basic_rates;
711 u8 ofdm_ht_dual_stream_basic_rates;
712 u8 reserved4;
713 u8 reserved5;
714} __packed;
715
716/*
717 * C_RXON_ASSOC = 0x11 (command, has simple generic response)
718 */
719struct il3945_rxon_assoc_cmd {
720 __le32 flags;
721 __le32 filter_flags;
722 u8 ofdm_basic_rates;
723 u8 cck_basic_rates;
724 __le16 reserved;
725} __packed;
726
727struct il4965_rxon_assoc_cmd {
728 __le32 flags;
729 __le32 filter_flags;
730 u8 ofdm_basic_rates;
731 u8 cck_basic_rates;
732 u8 ofdm_ht_single_stream_basic_rates;
733 u8 ofdm_ht_dual_stream_basic_rates;
734 __le16 rx_chain_select_flags;
735 __le16 reserved;
736} __packed;
737
738#define IL_CONN_MAX_LISTEN_INTERVAL 10
739#define IL_MAX_UCODE_BEACON_INTERVAL 4 /* 4096 */
740#define IL39_MAX_UCODE_BEACON_INTERVAL 1 /* 1024 */
741
742/*
743 * C_RXON_TIMING = 0x14 (command, has simple generic response)
744 */
745struct il_rxon_time_cmd {
746 __le64 timestamp;
747 __le16 beacon_interval;
748 __le16 atim_win;
749 __le32 beacon_init_val;
750 __le16 listen_interval;
751 u8 dtim_period;
752 u8 delta_cp_bss_tbtts;
753} __packed;
754
755/*
756 * C_CHANNEL_SWITCH = 0x72 (command, has simple generic response)
757 */
758struct il3945_channel_switch_cmd {
759 u8 band;
760 u8 expect_beacon;
761 __le16 channel;
762 __le32 rxon_flags;
763 __le32 rxon_filter_flags;
764 __le32 switch_time;
765 struct il3945_power_per_rate power[IL_MAX_RATES];
766} __packed;
767
768struct il4965_channel_switch_cmd {
769 u8 band;
770 u8 expect_beacon;
771 __le16 channel;
772 __le32 rxon_flags;
773 __le32 rxon_filter_flags;
774 __le32 switch_time;
775 struct il4965_tx_power_db tx_power;
776} __packed;
777
778/*
779 * N_CHANNEL_SWITCH = 0x73 (notification only, not a command)
780 */
781struct il_csa_notification {
782 __le16 band;
783 __le16 channel;
784 __le32 status; /* 0 - OK, 1 - fail */
785} __packed;
786
787/******************************************************************************
788 * (2)
789 * Quality-of-Service (QOS) Commands & Responses:
790 *
791 *****************************************************************************/
792
793/**
794 * struct il_ac_qos -- QOS timing params for C_QOS_PARAM
795 * One for each of 4 EDCA access categories in struct il_qosparam_cmd
796 *
797 * @cw_min: Contention win, start value in numbers of slots.
798 * Should be a power-of-2, minus 1. Device's default is 0x0f.
799 * @cw_max: Contention win, max value in numbers of slots.
800 * Should be a power-of-2, minus 1. Device's default is 0x3f.
801 * @aifsn: Number of slots in Arbitration Interframe Space (before
802 * performing random backoff timing prior to Tx). Device default 1.
803 * @edca_txop: Length of Tx opportunity, in uSecs. Device default is 0.
804 *
805 * Device will automatically increase contention win by (2*CW) + 1 for each
806 * transmission retry. Device uses cw_max as a bit mask, ANDed with new CW
807 * value, to cap the CW value.
808 */
809struct il_ac_qos {
810 __le16 cw_min;
811 __le16 cw_max;
812 u8 aifsn;
813 u8 reserved1;
814 __le16 edca_txop;
815} __packed;
816
817/* QoS flags defines */
818#define QOS_PARAM_FLG_UPDATE_EDCA_MSK cpu_to_le32(0x01)
819#define QOS_PARAM_FLG_TGN_MSK cpu_to_le32(0x02)
820#define QOS_PARAM_FLG_TXOP_TYPE_MSK cpu_to_le32(0x10)
821
822/* Number of Access Categories (AC) (EDCA), queues 0..3 */
823#define AC_NUM 4
824
825/*
826 * C_QOS_PARAM = 0x13 (command, has simple generic response)
827 *
828 * This command sets up timings for each of the 4 prioritized EDCA Tx FIFOs
829 * 0: Background, 1: Best Effort, 2: Video, 3: Voice.
830 */
831struct il_qosparam_cmd {
832 __le32 qos_flags;
833 struct il_ac_qos ac[AC_NUM];
834} __packed;
835
836/******************************************************************************
837 * (3)
838 * Add/Modify Stations Commands & Responses:
839 *
840 *****************************************************************************/
841/*
842 * Multi station support
843 */
844
845/* Special, dedicated locations within device's station table */
846#define IL_AP_ID 0
847#define IL_STA_ID 2
848#define IL3945_BROADCAST_ID 24
849#define IL3945_STATION_COUNT 25
850#define IL4965_BROADCAST_ID 31
851#define IL4965_STATION_COUNT 32
852
853#define IL_STATION_COUNT 32 /* MAX(3945,4965) */
854#define IL_INVALID_STATION 255
855
856#define STA_FLG_TX_RATE_MSK cpu_to_le32(1 << 2)
857#define STA_FLG_PWR_SAVE_MSK cpu_to_le32(1 << 8)
858#define STA_FLG_RTS_MIMO_PROT_MSK cpu_to_le32(1 << 17)
859#define STA_FLG_AGG_MPDU_8US_MSK cpu_to_le32(1 << 18)
860#define STA_FLG_MAX_AGG_SIZE_POS (19)
861#define STA_FLG_MAX_AGG_SIZE_MSK cpu_to_le32(3 << 19)
862#define STA_FLG_HT40_EN_MSK cpu_to_le32(1 << 21)
863#define STA_FLG_MIMO_DIS_MSK cpu_to_le32(1 << 22)
864#define STA_FLG_AGG_MPDU_DENSITY_POS (23)
865#define STA_FLG_AGG_MPDU_DENSITY_MSK cpu_to_le32(7 << 23)
866
867/* Use in mode field. 1: modify existing entry, 0: add new station entry */
868#define STA_CONTROL_MODIFY_MSK 0x01
869
870/* key flags __le16*/
871#define STA_KEY_FLG_ENCRYPT_MSK cpu_to_le16(0x0007)
872#define STA_KEY_FLG_NO_ENC cpu_to_le16(0x0000)
873#define STA_KEY_FLG_WEP cpu_to_le16(0x0001)
874#define STA_KEY_FLG_CCMP cpu_to_le16(0x0002)
875#define STA_KEY_FLG_TKIP cpu_to_le16(0x0003)
876
877#define STA_KEY_FLG_KEYID_POS 8
878#define STA_KEY_FLG_INVALID cpu_to_le16(0x0800)
879/* wep key is either from global key (0) or from station info array (1) */
880#define STA_KEY_FLG_MAP_KEY_MSK cpu_to_le16(0x0008)
881
882/* wep key in STA: 5-bytes (0) or 13-bytes (1) */
883#define STA_KEY_FLG_KEY_SIZE_MSK cpu_to_le16(0x1000)
884#define STA_KEY_MULTICAST_MSK cpu_to_le16(0x4000)
885#define STA_KEY_MAX_NUM 8
886
887/* Flags indicate whether to modify vs. don't change various station params */
888#define STA_MODIFY_KEY_MASK 0x01
889#define STA_MODIFY_TID_DISABLE_TX 0x02
890#define STA_MODIFY_TX_RATE_MSK 0x04
891#define STA_MODIFY_ADDBA_TID_MSK 0x08
892#define STA_MODIFY_DELBA_TID_MSK 0x10
893#define STA_MODIFY_SLEEP_TX_COUNT_MSK 0x20
894
895/* Receiver address (actually, Rx station's idx into station table),
896 * combined with Traffic ID (QOS priority), in format used by Tx Scheduler */
897#define BUILD_RAxTID(sta_id, tid) (((sta_id) << 4) + (tid))
898
899struct il4965_keyinfo {
900 __le16 key_flags;
901 u8 tkip_rx_tsc_byte2; /* TSC[2] for key mix ph1 detection */
902 u8 reserved1;
903 __le16 tkip_rx_ttak[5]; /* 10-byte unicast TKIP TTAK */
904 u8 key_offset;
905 u8 reserved2;
906 u8 key[16]; /* 16-byte unicast decryption key */
907} __packed;
908
909/**
910 * struct sta_id_modify
911 * @addr[ETH_ALEN]: station's MAC address
912 * @sta_id: idx of station in uCode's station table
913 * @modify_mask: STA_MODIFY_*, 1: modify, 0: don't change
914 *
915 * Driver selects unused table idx when adding new station,
916 * or the idx to a pre-existing station entry when modifying that station.
917 * Some idxes have special purposes (IL_AP_ID, idx 0, is for AP).
918 *
919 * modify_mask flags select which parameters to modify vs. leave alone.
920 */
921struct sta_id_modify {
922 u8 addr[ETH_ALEN];
923 __le16 reserved1;
924 u8 sta_id;
925 u8 modify_mask;
926 __le16 reserved2;
927} __packed;
928
929/*
930 * C_ADD_STA = 0x18 (command)
931 *
932 * The device contains an internal table of per-station information,
933 * with info on security keys, aggregation parameters, and Tx rates for
934 * initial Tx attempt and any retries (4965 devices uses
935 * C_TX_LINK_QUALITY_CMD,
936 * 3945 uses C_RATE_SCALE to set up rate tables).
937 *
938 * C_ADD_STA sets up the table entry for one station, either creating
939 * a new entry, or modifying a pre-existing one.
940 *
941 * NOTE: RXON command (without "associated" bit set) wipes the station table
942 * clean. Moving into RF_KILL state does this also. Driver must set up
943 * new station table before transmitting anything on the RXON channel
944 * (except active scans or active measurements; those commands carry
945 * their own txpower/rate setup data).
946 *
947 * When getting started on a new channel, driver must set up the
948 * IL_BROADCAST_ID entry (last entry in the table). For a client
949 * station in a BSS, once an AP is selected, driver sets up the AP STA
950 * in the IL_AP_ID entry (1st entry in the table). BROADCAST and AP
951 * are all that are needed for a BSS client station. If the device is
952 * used as AP, or in an IBSS network, driver must set up station table
953 * entries for all STAs in network, starting with idx IL_STA_ID.
954 */
955
956struct il3945_addsta_cmd {
957 u8 mode; /* 1: modify existing, 0: add new station */
958 u8 reserved[3];
959 struct sta_id_modify sta;
960 struct il4965_keyinfo key;
961 __le32 station_flags; /* STA_FLG_* */
962 __le32 station_flags_msk; /* STA_FLG_* */
963
964 /* bit field to disable (1) or enable (0) Tx for Traffic ID (TID)
965 * corresponding to bit (e.g. bit 5 controls TID 5).
966 * Set modify_mask bit STA_MODIFY_TID_DISABLE_TX to use this field. */
967 __le16 tid_disable_tx;
968
969 __le16 rate_n_flags;
970
971 /* TID for which to add block-ack support.
972 * Set modify_mask bit STA_MODIFY_ADDBA_TID_MSK to use this field. */
973 u8 add_immediate_ba_tid;
974
975 /* TID for which to remove block-ack support.
976 * Set modify_mask bit STA_MODIFY_DELBA_TID_MSK to use this field. */
977 u8 remove_immediate_ba_tid;
978
979 /* Starting Sequence Number for added block-ack support.
980 * Set modify_mask bit STA_MODIFY_ADDBA_TID_MSK to use this field. */
981 __le16 add_immediate_ba_ssn;
982} __packed;
983
984struct il4965_addsta_cmd {
985 u8 mode; /* 1: modify existing, 0: add new station */
986 u8 reserved[3];
987 struct sta_id_modify sta;
988 struct il4965_keyinfo key;
989 __le32 station_flags; /* STA_FLG_* */
990 __le32 station_flags_msk; /* STA_FLG_* */
991
992 /* bit field to disable (1) or enable (0) Tx for Traffic ID (TID)
993 * corresponding to bit (e.g. bit 5 controls TID 5).
994 * Set modify_mask bit STA_MODIFY_TID_DISABLE_TX to use this field. */
995 __le16 tid_disable_tx;
996
997 __le16 reserved1;
998
999 /* TID for which to add block-ack support.
1000 * Set modify_mask bit STA_MODIFY_ADDBA_TID_MSK to use this field. */
1001 u8 add_immediate_ba_tid;
1002
1003 /* TID for which to remove block-ack support.
1004 * Set modify_mask bit STA_MODIFY_DELBA_TID_MSK to use this field. */
1005 u8 remove_immediate_ba_tid;
1006
1007 /* Starting Sequence Number for added block-ack support.
1008 * Set modify_mask bit STA_MODIFY_ADDBA_TID_MSK to use this field. */
1009 __le16 add_immediate_ba_ssn;
1010
1011 /*
1012 * Number of packets OK to transmit to station even though
1013 * it is asleep -- used to synchronise PS-poll and u-APSD
1014 * responses while ucode keeps track of STA sleep state.
1015 */
1016 __le16 sleep_tx_count;
1017
1018 __le16 reserved2;
1019} __packed;
1020
1021/* Wrapper struct for 3945 and 4965 addsta_cmd structures */
1022struct il_addsta_cmd {
1023 u8 mode; /* 1: modify existing, 0: add new station */
1024 u8 reserved[3];
1025 struct sta_id_modify sta;
1026 struct il4965_keyinfo key;
1027 __le32 station_flags; /* STA_FLG_* */
1028 __le32 station_flags_msk; /* STA_FLG_* */
1029
1030 /* bit field to disable (1) or enable (0) Tx for Traffic ID (TID)
1031 * corresponding to bit (e.g. bit 5 controls TID 5).
1032 * Set modify_mask bit STA_MODIFY_TID_DISABLE_TX to use this field. */
1033 __le16 tid_disable_tx;
1034
1035 __le16 rate_n_flags; /* 3945 only */
1036
1037 /* TID for which to add block-ack support.
1038 * Set modify_mask bit STA_MODIFY_ADDBA_TID_MSK to use this field. */
1039 u8 add_immediate_ba_tid;
1040
1041 /* TID for which to remove block-ack support.
1042 * Set modify_mask bit STA_MODIFY_DELBA_TID_MSK to use this field. */
1043 u8 remove_immediate_ba_tid;
1044
1045 /* Starting Sequence Number for added block-ack support.
1046 * Set modify_mask bit STA_MODIFY_ADDBA_TID_MSK to use this field. */
1047 __le16 add_immediate_ba_ssn;
1048
1049 /*
1050 * Number of packets OK to transmit to station even though
1051 * it is asleep -- used to synchronise PS-poll and u-APSD
1052 * responses while ucode keeps track of STA sleep state.
1053 */
1054 __le16 sleep_tx_count;
1055
1056 __le16 reserved2;
1057} __packed;
1058
1059#define ADD_STA_SUCCESS_MSK 0x1
1060#define ADD_STA_NO_ROOM_IN_TBL 0x2
1061#define ADD_STA_NO_BLOCK_ACK_RESOURCE 0x4
1062#define ADD_STA_MODIFY_NON_EXIST_STA 0x8
1063/*
1064 * C_ADD_STA = 0x18 (response)
1065 */
1066struct il_add_sta_resp {
1067 u8 status; /* ADD_STA_* */
1068} __packed;
1069
1070#define REM_STA_SUCCESS_MSK 0x1
1071/*
1072 * C_REM_STA = 0x19 (response)
1073 */
1074struct il_rem_sta_resp {
1075 u8 status;
1076} __packed;
1077
1078/*
1079 * C_REM_STA = 0x19 (command)
1080 */
1081struct il_rem_sta_cmd {
1082 u8 num_sta; /* number of removed stations */
1083 u8 reserved[3];
1084 u8 addr[ETH_ALEN]; /* MAC addr of the first station */
1085 u8 reserved2[2];
1086} __packed;
1087
1088#define IL_TX_FIFO_BK_MSK cpu_to_le32(BIT(0))
1089#define IL_TX_FIFO_BE_MSK cpu_to_le32(BIT(1))
1090#define IL_TX_FIFO_VI_MSK cpu_to_le32(BIT(2))
1091#define IL_TX_FIFO_VO_MSK cpu_to_le32(BIT(3))
1092#define IL_AGG_TX_QUEUE_MSK cpu_to_le32(0xffc00)
1093
1094#define IL_DROP_SINGLE 0
1095#define IL_DROP_SELECTED 1
1096#define IL_DROP_ALL 2
1097
1098/*
1099 * REPLY_WEP_KEY = 0x20
1100 */
1101struct il_wep_key {
1102 u8 key_idx;
1103 u8 key_offset;
1104 u8 reserved1[2];
1105 u8 key_size;
1106 u8 reserved2[3];
1107 u8 key[16];
1108} __packed;
1109
1110struct il_wep_cmd {
1111 u8 num_keys;
1112 u8 global_key_type;
1113 u8 flags;
1114 u8 reserved;
1115 struct il_wep_key key[0];
1116} __packed;
1117
1118#define WEP_KEY_WEP_TYPE 1
1119#define WEP_KEYS_MAX 4
1120#define WEP_INVALID_OFFSET 0xff
1121#define WEP_KEY_LEN_64 5
1122#define WEP_KEY_LEN_128 13
1123
1124/******************************************************************************
1125 * (4)
1126 * Rx Responses:
1127 *
1128 *****************************************************************************/
1129
1130#define RX_RES_STATUS_NO_CRC32_ERROR cpu_to_le32(1 << 0)
1131#define RX_RES_STATUS_NO_RXE_OVERFLOW cpu_to_le32(1 << 1)
1132
1133#define RX_RES_PHY_FLAGS_BAND_24_MSK cpu_to_le16(1 << 0)
1134#define RX_RES_PHY_FLAGS_MOD_CCK_MSK cpu_to_le16(1 << 1)
1135#define RX_RES_PHY_FLAGS_SHORT_PREAMBLE_MSK cpu_to_le16(1 << 2)
1136#define RX_RES_PHY_FLAGS_NARROW_BAND_MSK cpu_to_le16(1 << 3)
1137#define RX_RES_PHY_FLAGS_ANTENNA_MSK 0x70
1138#define RX_RES_PHY_FLAGS_ANTENNA_POS 4
1139#define RX_RES_PHY_FLAGS_AGG_MSK cpu_to_le16(1 << 7)
1140
1141#define RX_RES_STATUS_SEC_TYPE_MSK (0x7 << 8)
1142#define RX_RES_STATUS_SEC_TYPE_NONE (0x0 << 8)
1143#define RX_RES_STATUS_SEC_TYPE_WEP (0x1 << 8)
1144#define RX_RES_STATUS_SEC_TYPE_CCMP (0x2 << 8)
1145#define RX_RES_STATUS_SEC_TYPE_TKIP (0x3 << 8)
1146#define RX_RES_STATUS_SEC_TYPE_ERR (0x7 << 8)
1147
1148#define RX_RES_STATUS_STATION_FOUND (1<<6)
1149#define RX_RES_STATUS_NO_STATION_INFO_MISMATCH (1<<7)
1150
1151#define RX_RES_STATUS_DECRYPT_TYPE_MSK (0x3 << 11)
1152#define RX_RES_STATUS_NOT_DECRYPT (0x0 << 11)
1153#define RX_RES_STATUS_DECRYPT_OK (0x3 << 11)
1154#define RX_RES_STATUS_BAD_ICV_MIC (0x1 << 11)
1155#define RX_RES_STATUS_BAD_KEY_TTAK (0x2 << 11)
1156
1157#define RX_MPDU_RES_STATUS_ICV_OK (0x20)
1158#define RX_MPDU_RES_STATUS_MIC_OK (0x40)
1159#define RX_MPDU_RES_STATUS_TTAK_OK (1 << 7)
1160#define RX_MPDU_RES_STATUS_DEC_DONE_MSK (0x800)
1161
1162struct il3945_rx_frame_stats {
1163 u8 phy_count;
1164 u8 id;
1165 u8 rssi;
1166 u8 agc;
1167 __le16 sig_avg;
1168 __le16 noise_diff;
1169 u8 payload[0];
1170} __packed;
1171
1172struct il3945_rx_frame_hdr {
1173 __le16 channel;
1174 __le16 phy_flags;
1175 u8 reserved1;
1176 u8 rate;
1177 __le16 len;
1178 u8 payload[0];
1179} __packed;
1180
1181struct il3945_rx_frame_end {
1182 __le32 status;
1183 __le64 timestamp;
1184 __le32 beacon_timestamp;
1185} __packed;
1186
1187/*
1188 * N_3945_RX = 0x1b (response only, not a command)
1189 *
1190 * NOTE: DO NOT dereference from casts to this structure
1191 * It is provided only for calculating minimum data set size.
1192 * The actual offsets of the hdr and end are dynamic based on
1193 * stats.phy_count
1194 */
1195struct il3945_rx_frame {
1196 struct il3945_rx_frame_stats stats;
1197 struct il3945_rx_frame_hdr hdr;
1198 struct il3945_rx_frame_end end;
1199} __packed;
1200
1201#define IL39_RX_FRAME_SIZE (4 + sizeof(struct il3945_rx_frame))
1202
1203/* Fixed (non-configurable) rx data from phy */
1204
1205#define IL49_RX_RES_PHY_CNT 14
1206#define IL49_RX_PHY_FLAGS_ANTENNAE_OFFSET (4)
1207#define IL49_RX_PHY_FLAGS_ANTENNAE_MASK (0x70)
1208#define IL49_AGC_DB_MASK (0x3f80) /* MASK(7,13) */
1209#define IL49_AGC_DB_POS (7)
1210struct il4965_rx_non_cfg_phy {
1211 __le16 ant_selection; /* ant A bit 4, ant B bit 5, ant C bit 6 */
1212 __le16 agc_info; /* agc code 0:6, agc dB 7:13, reserved 14:15 */
1213 u8 rssi_info[6]; /* we use even entries, 0/2/4 for A/B/C rssi */
1214 u8 pad[0];
1215} __packed;
1216
1217/*
1218 * N_RX = 0xc3 (response only, not a command)
1219 * Used only for legacy (non 11n) frames.
1220 */
1221struct il_rx_phy_res {
1222 u8 non_cfg_phy_cnt; /* non configurable DSP phy data byte count */
1223 u8 cfg_phy_cnt; /* configurable DSP phy data byte count */
1224 u8 stat_id; /* configurable DSP phy data set ID */
1225 u8 reserved1;
1226 __le64 timestamp; /* TSF at on air rise */
1227 __le32 beacon_time_stamp; /* beacon at on-air rise */
1228 __le16 phy_flags; /* general phy flags: band, modulation, ... */
1229 __le16 channel; /* channel number */
1230 u8 non_cfg_phy_buf[32]; /* for various implementations of non_cfg_phy */
1231 __le32 rate_n_flags; /* RATE_MCS_* */
1232 __le16 byte_count; /* frame's byte-count */
1233 __le16 frame_time; /* frame's time on the air */
1234} __packed;
1235
1236struct il_rx_mpdu_res_start {
1237 __le16 byte_count;
1238 __le16 reserved;
1239} __packed;
1240
1241/******************************************************************************
1242 * (5)
1243 * Tx Commands & Responses:
1244 *
1245 * Driver must place each C_TX command into one of the prioritized Tx
1246 * queues in host DRAM, shared between driver and device (see comments for
1247 * SCD registers and Tx/Rx Queues). When the device's Tx scheduler and uCode
1248 * are preparing to transmit, the device pulls the Tx command over the PCI
1249 * bus via one of the device's Tx DMA channels, to fill an internal FIFO
1250 * from which data will be transmitted.
1251 *
1252 * uCode handles all timing and protocol related to control frames
1253 * (RTS/CTS/ACK), based on flags in the Tx command. uCode and Tx scheduler
1254 * handle reception of block-acks; uCode updates the host driver via
1255 * N_COMPRESSED_BA.
1256 *
1257 * uCode handles retrying Tx when an ACK is expected but not received.
1258 * This includes trying lower data rates than the one requested in the Tx
1259 * command, as set up by the C_RATE_SCALE (for 3945) or
1260 * C_TX_LINK_QUALITY_CMD (4965).
1261 *
1262 * Driver sets up transmit power for various rates via C_TX_PWR_TBL.
1263 * This command must be executed after every RXON command, before Tx can occur.
1264 *****************************************************************************/
1265
1266/* C_TX Tx flags field */
1267
1268/*
1269 * 1: Use Request-To-Send protocol before this frame.
1270 * Mutually exclusive vs. TX_CMD_FLG_CTS_MSK.
1271 */
1272#define TX_CMD_FLG_RTS_MSK cpu_to_le32(1 << 1)
1273
1274/*
1275 * 1: Transmit Clear-To-Send to self before this frame.
1276 * Driver should set this for AUTH/DEAUTH/ASSOC-REQ/REASSOC mgmnt frames.
1277 * Mutually exclusive vs. TX_CMD_FLG_RTS_MSK.
1278 */
1279#define TX_CMD_FLG_CTS_MSK cpu_to_le32(1 << 2)
1280
1281/* 1: Expect ACK from receiving station
1282 * 0: Don't expect ACK (MAC header's duration field s/b 0)
1283 * Set this for unicast frames, but not broadcast/multicast. */
1284#define TX_CMD_FLG_ACK_MSK cpu_to_le32(1 << 3)
1285
1286/* For 4965 devices:
1287 * 1: Use rate scale table (see C_TX_LINK_QUALITY_CMD).
1288 * Tx command's initial_rate_idx indicates first rate to try;
1289 * uCode walks through table for additional Tx attempts.
1290 * 0: Use Tx rate/MCS from Tx command's rate_n_flags field.
1291 * This rate will be used for all Tx attempts; it will not be scaled. */
1292#define TX_CMD_FLG_STA_RATE_MSK cpu_to_le32(1 << 4)
1293
1294/* 1: Expect immediate block-ack.
1295 * Set when Txing a block-ack request frame. Also set TX_CMD_FLG_ACK_MSK. */
1296#define TX_CMD_FLG_IMM_BA_RSP_MASK cpu_to_le32(1 << 6)
1297
1298/*
1299 * 1: Frame requires full Tx-Op protection.
1300 * Set this if either RTS or CTS Tx Flag gets set.
1301 */
1302#define TX_CMD_FLG_FULL_TXOP_PROT_MSK cpu_to_le32(1 << 7)
1303
1304/* Tx antenna selection field; used only for 3945, reserved (0) for 4965 devices.
1305 * Set field to "0" to allow 3945 uCode to select antenna (normal usage). */
1306#define TX_CMD_FLG_ANT_SEL_MSK cpu_to_le32(0xf00)
1307#define TX_CMD_FLG_ANT_A_MSK cpu_to_le32(1 << 8)
1308#define TX_CMD_FLG_ANT_B_MSK cpu_to_le32(1 << 9)
1309
1310/* 1: uCode overrides sequence control field in MAC header.
1311 * 0: Driver provides sequence control field in MAC header.
1312 * Set this for management frames, non-QOS data frames, non-unicast frames,
1313 * and also in Tx command embedded in C_SCAN for active scans. */
1314#define TX_CMD_FLG_SEQ_CTL_MSK cpu_to_le32(1 << 13)
1315
1316/* 1: This frame is non-last MPDU; more fragments are coming.
1317 * 0: Last fragment, or not using fragmentation. */
1318#define TX_CMD_FLG_MORE_FRAG_MSK cpu_to_le32(1 << 14)
1319
1320/* 1: uCode calculates and inserts Timestamp Function (TSF) in outgoing frame.
1321 * 0: No TSF required in outgoing frame.
1322 * Set this for transmitting beacons and probe responses. */
1323#define TX_CMD_FLG_TSF_MSK cpu_to_le32(1 << 16)
1324
1325/* 1: Driver inserted 2 bytes pad after the MAC header, for (required) dword
1326 * alignment of frame's payload data field.
1327 * 0: No pad
1328 * Set this for MAC headers with 26 or 30 bytes, i.e. those with QOS or ADDR4
1329 * field (but not both). Driver must align frame data (i.e. data following
1330 * MAC header) to DWORD boundary. */
1331#define TX_CMD_FLG_MH_PAD_MSK cpu_to_le32(1 << 20)
1332
1333/* accelerate aggregation support
1334 * 0 - no CCMP encryption; 1 - CCMP encryption */
1335#define TX_CMD_FLG_AGG_CCMP_MSK cpu_to_le32(1 << 22)
1336
1337/* HCCA-AP - disable duration overwriting. */
1338#define TX_CMD_FLG_DUR_MSK cpu_to_le32(1 << 25)
1339
1340/*
1341 * TX command security control
1342 */
1343#define TX_CMD_SEC_WEP 0x01
1344#define TX_CMD_SEC_CCM 0x02
1345#define TX_CMD_SEC_TKIP 0x03
1346#define TX_CMD_SEC_MSK 0x03
1347#define TX_CMD_SEC_SHIFT 6
1348#define TX_CMD_SEC_KEY128 0x08
1349
1350/*
1351 * C_TX = 0x1c (command)
1352 */
1353
1354struct il3945_tx_cmd {
1355 /*
1356 * MPDU byte count:
1357 * MAC header (24/26/30/32 bytes) + 2 bytes pad if 26/30 header size,
1358 * + 8 byte IV for CCM or TKIP (not used for WEP)
1359 * + Data payload
1360 * + 8-byte MIC (not used for CCM/WEP)
1361 * NOTE: Does not include Tx command bytes, post-MAC pad bytes,
1362 * MIC (CCM) 8 bytes, ICV (WEP/TKIP/CKIP) 4 bytes, CRC 4 bytes.i
1363 * Range: 14-2342 bytes.
1364 */
1365 __le16 len;
1366
1367 /*
1368 * MPDU or MSDU byte count for next frame.
1369 * Used for fragmentation and bursting, but not 11n aggregation.
1370 * Same as "len", but for next frame. Set to 0 if not applicable.
1371 */
1372 __le16 next_frame_len;
1373
1374 __le32 tx_flags; /* TX_CMD_FLG_* */
1375
1376 u8 rate;
1377
1378 /* Index of recipient station in uCode's station table */
1379 u8 sta_id;
1380 u8 tid_tspec;
1381 u8 sec_ctl;
1382 u8 key[16];
1383 union {
1384 u8 byte[8];
1385 __le16 word[4];
1386 __le32 dw[2];
1387 } tkip_mic;
1388 __le32 next_frame_info;
1389 union {
1390 __le32 life_time;
1391 __le32 attempt;
1392 } stop_time;
1393 u8 supp_rates[2];
1394 u8 rts_retry_limit; /*byte 50 */
1395 u8 data_retry_limit; /*byte 51 */
1396 union {
1397 __le16 pm_frame_timeout;
1398 __le16 attempt_duration;
1399 } timeout;
1400
1401 /*
1402 * Duration of EDCA burst Tx Opportunity, in 32-usec units.
1403 * Set this if txop time is not specified by HCCA protocol (e.g. by AP).
1404 */
1405 __le16 driver_txop;
1406
1407 /*
1408 * MAC header goes here, followed by 2 bytes padding if MAC header
1409 * length is 26 or 30 bytes, followed by payload data
1410 */
1411 u8 payload[0];
1412 struct ieee80211_hdr hdr[0];
1413} __packed;
1414
1415/*
1416 * C_TX = 0x1c (response)
1417 */
1418struct il3945_tx_resp {
1419 u8 failure_rts;
1420 u8 failure_frame;
1421 u8 bt_kill_count;
1422 u8 rate;
1423 __le32 wireless_media_time;
1424 __le32 status; /* TX status */
1425} __packed;
1426
1427/*
1428 * 4965 uCode updates these Tx attempt count values in host DRAM.
1429 * Used for managing Tx retries when expecting block-acks.
1430 * Driver should set these fields to 0.
1431 */
1432struct il_dram_scratch {
1433 u8 try_cnt; /* Tx attempts */
1434 u8 bt_kill_cnt; /* Tx attempts blocked by Bluetooth device */
1435 __le16 reserved;
1436} __packed;
1437
1438struct il_tx_cmd {
1439 /*
1440 * MPDU byte count:
1441 * MAC header (24/26/30/32 bytes) + 2 bytes pad if 26/30 header size,
1442 * + 8 byte IV for CCM or TKIP (not used for WEP)
1443 * + Data payload
1444 * + 8-byte MIC (not used for CCM/WEP)
1445 * NOTE: Does not include Tx command bytes, post-MAC pad bytes,
1446 * MIC (CCM) 8 bytes, ICV (WEP/TKIP/CKIP) 4 bytes, CRC 4 bytes.i
1447 * Range: 14-2342 bytes.
1448 */
1449 __le16 len;
1450
1451 /*
1452 * MPDU or MSDU byte count for next frame.
1453 * Used for fragmentation and bursting, but not 11n aggregation.
1454 * Same as "len", but for next frame. Set to 0 if not applicable.
1455 */
1456 __le16 next_frame_len;
1457
1458 __le32 tx_flags; /* TX_CMD_FLG_* */
1459
1460 /* uCode may modify this field of the Tx command (in host DRAM!).
1461 * Driver must also set dram_lsb_ptr and dram_msb_ptr in this cmd. */
1462 struct il_dram_scratch scratch;
1463
1464 /* Rate for *all* Tx attempts, if TX_CMD_FLG_STA_RATE_MSK is cleared. */
1465 __le32 rate_n_flags; /* RATE_MCS_* */
1466
1467 /* Index of destination station in uCode's station table */
1468 u8 sta_id;
1469
1470 /* Type of security encryption: CCM or TKIP */
1471 u8 sec_ctl; /* TX_CMD_SEC_* */
1472
1473 /*
1474 * Index into rate table (see C_TX_LINK_QUALITY_CMD) for initial
1475 * Tx attempt, if TX_CMD_FLG_STA_RATE_MSK is set. Normally "0" for
1476 * data frames, this field may be used to selectively reduce initial
1477 * rate (via non-0 value) for special frames (e.g. management), while
1478 * still supporting rate scaling for all frames.
1479 */
1480 u8 initial_rate_idx;
1481 u8 reserved;
1482 u8 key[16];
1483 __le16 next_frame_flags;
1484 __le16 reserved2;
1485 union {
1486 __le32 life_time;
1487 __le32 attempt;
1488 } stop_time;
1489
1490 /* Host DRAM physical address pointer to "scratch" in this command.
1491 * Must be dword aligned. "0" in dram_lsb_ptr disables usage. */
1492 __le32 dram_lsb_ptr;
1493 u8 dram_msb_ptr;
1494
1495 u8 rts_retry_limit; /*byte 50 */
1496 u8 data_retry_limit; /*byte 51 */
1497 u8 tid_tspec;
1498 union {
1499 __le16 pm_frame_timeout;
1500 __le16 attempt_duration;
1501 } timeout;
1502
1503 /*
1504 * Duration of EDCA burst Tx Opportunity, in 32-usec units.
1505 * Set this if txop time is not specified by HCCA protocol (e.g. by AP).
1506 */
1507 __le16 driver_txop;
1508
1509 /*
1510 * MAC header goes here, followed by 2 bytes padding if MAC header
1511 * length is 26 or 30 bytes, followed by payload data
1512 */
1513 u8 payload[0];
1514 struct ieee80211_hdr hdr[0];
1515} __packed;
1516
1517/* TX command response is sent after *3945* transmission attempts.
1518 *
1519 * NOTES:
1520 *
1521 * TX_STATUS_FAIL_NEXT_FRAG
1522 *
1523 * If the fragment flag in the MAC header for the frame being transmitted
1524 * is set and there is insufficient time to transmit the next frame, the
1525 * TX status will be returned with 'TX_STATUS_FAIL_NEXT_FRAG'.
1526 *
1527 * TX_STATUS_FIFO_UNDERRUN
1528 *
1529 * Indicates the host did not provide bytes to the FIFO fast enough while
1530 * a TX was in progress.
1531 *
1532 * TX_STATUS_FAIL_MGMNT_ABORT
1533 *
1534 * This status is only possible if the ABORT ON MGMT RX parameter was
1535 * set to true with the TX command.
1536 *
1537 * If the MSB of the status parameter is set then an abort sequence is
1538 * required. This sequence consists of the host activating the TX Abort
1539 * control line, and then waiting for the TX Abort command response. This
1540 * indicates that a the device is no longer in a transmit state, and that the
1541 * command FIFO has been cleared. The host must then deactivate the TX Abort
1542 * control line. Receiving is still allowed in this case.
1543 */
1544enum {
1545 TX_3945_STATUS_SUCCESS = 0x01,
1546 TX_3945_STATUS_DIRECT_DONE = 0x02,
1547 TX_3945_STATUS_FAIL_SHORT_LIMIT = 0x82,
1548 TX_3945_STATUS_FAIL_LONG_LIMIT = 0x83,
1549 TX_3945_STATUS_FAIL_FIFO_UNDERRUN = 0x84,
1550 TX_3945_STATUS_FAIL_MGMNT_ABORT = 0x85,
1551 TX_3945_STATUS_FAIL_NEXT_FRAG = 0x86,
1552 TX_3945_STATUS_FAIL_LIFE_EXPIRE = 0x87,
1553 TX_3945_STATUS_FAIL_DEST_PS = 0x88,
1554 TX_3945_STATUS_FAIL_ABORTED = 0x89,
1555 TX_3945_STATUS_FAIL_BT_RETRY = 0x8a,
1556 TX_3945_STATUS_FAIL_STA_INVALID = 0x8b,
1557 TX_3945_STATUS_FAIL_FRAG_DROPPED = 0x8c,
1558 TX_3945_STATUS_FAIL_TID_DISABLE = 0x8d,
1559 TX_3945_STATUS_FAIL_FRAME_FLUSHED = 0x8e,
1560 TX_3945_STATUS_FAIL_INSUFFICIENT_CF_POLL = 0x8f,
1561 TX_3945_STATUS_FAIL_TX_LOCKED = 0x90,
1562 TX_3945_STATUS_FAIL_NO_BEACON_ON_RADAR = 0x91,
1563};
1564
1565/*
1566 * TX command response is sent after *4965* transmission attempts.
1567 *
1568 * both postpone and abort status are expected behavior from uCode. there is
1569 * no special operation required from driver; except for RFKILL_FLUSH,
1570 * which required tx flush host command to flush all the tx frames in queues
1571 */
1572enum {
1573 TX_STATUS_SUCCESS = 0x01,
1574 TX_STATUS_DIRECT_DONE = 0x02,
1575 /* postpone TX */
1576 TX_STATUS_POSTPONE_DELAY = 0x40,
1577 TX_STATUS_POSTPONE_FEW_BYTES = 0x41,
1578 TX_STATUS_POSTPONE_QUIET_PERIOD = 0x43,
1579 TX_STATUS_POSTPONE_CALC_TTAK = 0x44,
1580 /* abort TX */
1581 TX_STATUS_FAIL_INTERNAL_CROSSED_RETRY = 0x81,
1582 TX_STATUS_FAIL_SHORT_LIMIT = 0x82,
1583 TX_STATUS_FAIL_LONG_LIMIT = 0x83,
1584 TX_STATUS_FAIL_FIFO_UNDERRUN = 0x84,
1585 TX_STATUS_FAIL_DRAIN_FLOW = 0x85,
1586 TX_STATUS_FAIL_RFKILL_FLUSH = 0x86,
1587 TX_STATUS_FAIL_LIFE_EXPIRE = 0x87,
1588 TX_STATUS_FAIL_DEST_PS = 0x88,
1589 TX_STATUS_FAIL_HOST_ABORTED = 0x89,
1590 TX_STATUS_FAIL_BT_RETRY = 0x8a,
1591 TX_STATUS_FAIL_STA_INVALID = 0x8b,
1592 TX_STATUS_FAIL_FRAG_DROPPED = 0x8c,
1593 TX_STATUS_FAIL_TID_DISABLE = 0x8d,
1594 TX_STATUS_FAIL_FIFO_FLUSHED = 0x8e,
1595 TX_STATUS_FAIL_INSUFFICIENT_CF_POLL = 0x8f,
1596 TX_STATUS_FAIL_PASSIVE_NO_RX = 0x90,
1597 TX_STATUS_FAIL_NO_BEACON_ON_RADAR = 0x91,
1598};
1599
1600#define TX_PACKET_MODE_REGULAR 0x0000
1601#define TX_PACKET_MODE_BURST_SEQ 0x0100
1602#define TX_PACKET_MODE_BURST_FIRST 0x0200
1603
1604enum {
1605 TX_POWER_PA_NOT_ACTIVE = 0x0,
1606};
1607
1608enum {
1609 TX_STATUS_MSK = 0x000000ff, /* bits 0:7 */
1610 TX_STATUS_DELAY_MSK = 0x00000040,
1611 TX_STATUS_ABORT_MSK = 0x00000080,
1612 TX_PACKET_MODE_MSK = 0x0000ff00, /* bits 8:15 */
1613 TX_FIFO_NUMBER_MSK = 0x00070000, /* bits 16:18 */
1614 TX_RESERVED = 0x00780000, /* bits 19:22 */
1615 TX_POWER_PA_DETECT_MSK = 0x7f800000, /* bits 23:30 */
1616 TX_ABORT_REQUIRED_MSK = 0x80000000, /* bits 31:31 */
1617};
1618
1619/* *******************************
1620 * TX aggregation status
1621 ******************************* */
1622
1623enum {
1624 AGG_TX_STATE_TRANSMITTED = 0x00,
1625 AGG_TX_STATE_UNDERRUN_MSK = 0x01,
1626 AGG_TX_STATE_FEW_BYTES_MSK = 0x04,
1627 AGG_TX_STATE_ABORT_MSK = 0x08,
1628 AGG_TX_STATE_LAST_SENT_TTL_MSK = 0x10,
1629 AGG_TX_STATE_LAST_SENT_TRY_CNT_MSK = 0x20,
1630 AGG_TX_STATE_SCD_QUERY_MSK = 0x80,
1631 AGG_TX_STATE_TEST_BAD_CRC32_MSK = 0x100,
1632 AGG_TX_STATE_RESPONSE_MSK = 0x1ff,
1633 AGG_TX_STATE_DUMP_TX_MSK = 0x200,
1634 AGG_TX_STATE_DELAY_TX_MSK = 0x400
1635};
1636
1637#define AGG_TX_STATUS_MSK 0x00000fff /* bits 0:11 */
1638#define AGG_TX_TRY_MSK 0x0000f000 /* bits 12:15 */
1639
1640#define AGG_TX_STATE_LAST_SENT_MSK (AGG_TX_STATE_LAST_SENT_TTL_MSK | \
1641 AGG_TX_STATE_LAST_SENT_TRY_CNT_MSK)
1642
1643/* # tx attempts for first frame in aggregation */
1644#define AGG_TX_STATE_TRY_CNT_POS 12
1645#define AGG_TX_STATE_TRY_CNT_MSK 0xf000
1646
1647/* Command ID and sequence number of Tx command for this frame */
1648#define AGG_TX_STATE_SEQ_NUM_POS 16
1649#define AGG_TX_STATE_SEQ_NUM_MSK 0xffff0000
1650
1651/*
1652 * C_TX = 0x1c (response)
1653 *
1654 * This response may be in one of two slightly different formats, indicated
1655 * by the frame_count field:
1656 *
1657 * 1) No aggregation (frame_count == 1). This reports Tx results for
1658 * a single frame. Multiple attempts, at various bit rates, may have
1659 * been made for this frame.
1660 *
1661 * 2) Aggregation (frame_count > 1). This reports Tx results for
1662 * 2 or more frames that used block-acknowledge. All frames were
1663 * transmitted at same rate. Rate scaling may have been used if first
1664 * frame in this new agg block failed in previous agg block(s).
1665 *
1666 * Note that, for aggregation, ACK (block-ack) status is not delivered here;
1667 * block-ack has not been received by the time the 4965 device records
1668 * this status.
1669 * This status relates to reasons the tx might have been blocked or aborted
1670 * within the sending station (this 4965 device), rather than whether it was
1671 * received successfully by the destination station.
1672 */
1673struct agg_tx_status {
1674 __le16 status;
1675 __le16 sequence;
1676} __packed;
1677
1678struct il4965_tx_resp {
1679 u8 frame_count; /* 1 no aggregation, >1 aggregation */
1680 u8 bt_kill_count; /* # blocked by bluetooth (unused for agg) */
1681 u8 failure_rts; /* # failures due to unsuccessful RTS */
1682 u8 failure_frame; /* # failures due to no ACK (unused for agg) */
1683
1684 /* For non-agg: Rate at which frame was successful.
1685 * For agg: Rate at which all frames were transmitted. */
1686 __le32 rate_n_flags; /* RATE_MCS_* */
1687
1688 /* For non-agg: RTS + CTS + frame tx attempts time + ACK.
1689 * For agg: RTS + CTS + aggregation tx time + block-ack time. */
1690 __le16 wireless_media_time; /* uSecs */
1691
1692 __le16 reserved;
1693 __le32 pa_power1; /* RF power amplifier measurement (not used) */
1694 __le32 pa_power2;
1695
1696 /*
1697 * For non-agg: frame status TX_STATUS_*
1698 * For agg: status of 1st frame, AGG_TX_STATE_*; other frame status
1699 * fields follow this one, up to frame_count.
1700 * Bit fields:
1701 * 11- 0: AGG_TX_STATE_* status code
1702 * 15-12: Retry count for 1st frame in aggregation (retries
1703 * occur if tx failed for this frame when it was a
1704 * member of a previous aggregation block). If rate
1705 * scaling is used, retry count indicates the rate
1706 * table entry used for all frames in the new agg.
1707 * 31-16: Sequence # for this frame's Tx cmd (not SSN!)
1708 */
1709 union {
1710 __le32 status;
1711 struct agg_tx_status agg_status[0]; /* for each agg frame */
1712 } u;
1713} __packed;
1714
1715/*
1716 * N_COMPRESSED_BA = 0xc5 (response only, not a command)
1717 *
1718 * Reports Block-Acknowledge from recipient station
1719 */
1720struct il_compressed_ba_resp {
1721 __le32 sta_addr_lo32;
1722 __le16 sta_addr_hi16;
1723 __le16 reserved;
1724
1725 /* Index of recipient (BA-sending) station in uCode's station table */
1726 u8 sta_id;
1727 u8 tid;
1728 __le16 seq_ctl;
1729 __le64 bitmap;
1730 __le16 scd_flow;
1731 __le16 scd_ssn;
1732} __packed;
1733
1734/*
1735 * C_TX_PWR_TBL = 0x97 (command, has simple generic response)
1736 *
1737 * See details under "TXPOWER" in 4965.h.
1738 */
1739
1740struct il3945_txpowertable_cmd {
1741 u8 band; /* 0: 5 GHz, 1: 2.4 GHz */
1742 u8 reserved;
1743 __le16 channel;
1744 struct il3945_power_per_rate power[IL_MAX_RATES];
1745} __packed;
1746
1747struct il4965_txpowertable_cmd {
1748 u8 band; /* 0: 5 GHz, 1: 2.4 GHz */
1749 u8 reserved;
1750 __le16 channel;
1751 struct il4965_tx_power_db tx_power;
1752} __packed;
1753
1754/**
1755 * struct il3945_rate_scaling_cmd - Rate Scaling Command & Response
1756 *
1757 * C_RATE_SCALE = 0x47 (command, has simple generic response)
1758 *
1759 * NOTE: The table of rates passed to the uCode via the
1760 * RATE_SCALE command sets up the corresponding order of
1761 * rates used for all related commands, including rate
1762 * masks, etc.
1763 *
1764 * For example, if you set 9MB (PLCP 0x0f) as the first
1765 * rate in the rate table, the bit mask for that rate
1766 * when passed through ofdm_basic_rates on the C_RXON
1767 * command would be bit 0 (1 << 0)
1768 */
1769struct il3945_rate_scaling_info {
1770 __le16 rate_n_flags;
1771 u8 try_cnt;
1772 u8 next_rate_idx;
1773} __packed;
1774
1775struct il3945_rate_scaling_cmd {
1776 u8 table_id;
1777 u8 reserved[3];
1778 struct il3945_rate_scaling_info table[IL_MAX_RATES];
1779} __packed;
1780
1781/*RS_NEW_API: only TLC_RTS remains and moved to bit 0 */
1782#define LINK_QUAL_FLAGS_SET_STA_TLC_RTS_MSK (1 << 0)
1783
1784/* # of EDCA prioritized tx fifos */
1785#define LINK_QUAL_AC_NUM AC_NUM
1786
1787/* # entries in rate scale table to support Tx retries */
1788#define LINK_QUAL_MAX_RETRY_NUM 16
1789
1790/* Tx antenna selection values */
1791#define LINK_QUAL_ANT_A_MSK (1 << 0)
1792#define LINK_QUAL_ANT_B_MSK (1 << 1)
1793#define LINK_QUAL_ANT_MSK (LINK_QUAL_ANT_A_MSK|LINK_QUAL_ANT_B_MSK)
1794
1795/**
1796 * struct il_link_qual_general_params
1797 *
1798 * Used in C_TX_LINK_QUALITY_CMD
1799 */
1800struct il_link_qual_general_params {
1801 u8 flags;
1802
1803 /* No entries at or above this (driver chosen) idx contain MIMO */
1804 u8 mimo_delimiter;
1805
1806 /* Best single antenna to use for single stream (legacy, SISO). */
1807 u8 single_stream_ant_msk; /* LINK_QUAL_ANT_* */
1808
1809 /* Best antennas to use for MIMO (unused for 4965, assumes both). */
1810 u8 dual_stream_ant_msk; /* LINK_QUAL_ANT_* */
1811
1812 /*
1813 * If driver needs to use different initial rates for different
1814 * EDCA QOS access categories (as implemented by tx fifos 0-3),
1815 * this table will set that up, by indicating the idxes in the
1816 * rs_table[LINK_QUAL_MAX_RETRY_NUM] rate table at which to start.
1817 * Otherwise, driver should set all entries to 0.
1818 *
1819 * Entry usage:
1820 * 0 = Background, 1 = Best Effort (normal), 2 = Video, 3 = Voice
1821 * TX FIFOs above 3 use same value (typically 0) as TX FIFO 3.
1822 */
1823 u8 start_rate_idx[LINK_QUAL_AC_NUM];
1824} __packed;
1825
1826#define LINK_QUAL_AGG_TIME_LIMIT_DEF (4000) /* 4 milliseconds */
1827#define LINK_QUAL_AGG_TIME_LIMIT_MAX (8000)
1828#define LINK_QUAL_AGG_TIME_LIMIT_MIN (100)
1829
1830#define LINK_QUAL_AGG_DISABLE_START_DEF (3)
1831#define LINK_QUAL_AGG_DISABLE_START_MAX (255)
1832#define LINK_QUAL_AGG_DISABLE_START_MIN (0)
1833
1834#define LINK_QUAL_AGG_FRAME_LIMIT_DEF (31)
1835#define LINK_QUAL_AGG_FRAME_LIMIT_MAX (63)
1836#define LINK_QUAL_AGG_FRAME_LIMIT_MIN (0)
1837
1838/**
1839 * struct il_link_qual_agg_params
1840 *
1841 * Used in C_TX_LINK_QUALITY_CMD
1842 */
1843struct il_link_qual_agg_params {
1844
1845 /*
1846 *Maximum number of uSec in aggregation.
1847 * default set to 4000 (4 milliseconds) if not configured in .cfg
1848 */
1849 __le16 agg_time_limit;
1850
1851 /*
1852 * Number of Tx retries allowed for a frame, before that frame will
1853 * no longer be considered for the start of an aggregation sequence
1854 * (scheduler will then try to tx it as single frame).
1855 * Driver should set this to 3.
1856 */
1857 u8 agg_dis_start_th;
1858
1859 /*
1860 * Maximum number of frames in aggregation.
1861 * 0 = no limit (default). 1 = no aggregation.
1862 * Other values = max # frames in aggregation.
1863 */
1864 u8 agg_frame_cnt_limit;
1865
1866 __le32 reserved;
1867} __packed;
1868
1869/*
1870 * C_TX_LINK_QUALITY_CMD = 0x4e (command, has simple generic response)
1871 *
1872 * For 4965 devices only; 3945 uses C_RATE_SCALE.
1873 *
1874 * Each station in the 4965 device's internal station table has its own table
1875 * of 16
1876 * Tx rates and modulation modes (e.g. legacy/SISO/MIMO) for retrying Tx when
1877 * an ACK is not received. This command replaces the entire table for
1878 * one station.
1879 *
1880 * NOTE: Station must already be in 4965 device's station table.
1881 * Use C_ADD_STA.
1882 *
1883 * The rate scaling procedures described below work well. Of course, other
1884 * procedures are possible, and may work better for particular environments.
1885 *
1886 *
1887 * FILLING THE RATE TBL
1888 *
1889 * Given a particular initial rate and mode, as determined by the rate
1890 * scaling algorithm described below, the Linux driver uses the following
1891 * formula to fill the rs_table[LINK_QUAL_MAX_RETRY_NUM] rate table in the
1892 * Link Quality command:
1893 *
1894 *
1895 * 1) If using High-throughput (HT) (SISO or MIMO) initial rate:
1896 * a) Use this same initial rate for first 3 entries.
1897 * b) Find next lower available rate using same mode (SISO or MIMO),
1898 * use for next 3 entries. If no lower rate available, switch to
1899 * legacy mode (no HT40 channel, no MIMO, no short guard interval).
1900 * c) If using MIMO, set command's mimo_delimiter to number of entries
1901 * using MIMO (3 or 6).
1902 * d) After trying 2 HT rates, switch to legacy mode (no HT40 channel,
1903 * no MIMO, no short guard interval), at the next lower bit rate
1904 * (e.g. if second HT bit rate was 54, try 48 legacy), and follow
1905 * legacy procedure for remaining table entries.
1906 *
1907 * 2) If using legacy initial rate:
1908 * a) Use the initial rate for only one entry.
1909 * b) For each following entry, reduce the rate to next lower available
1910 * rate, until reaching the lowest available rate.
1911 * c) When reducing rate, also switch antenna selection.
1912 * d) Once lowest available rate is reached, repeat this rate until
1913 * rate table is filled (16 entries), switching antenna each entry.
1914 *
1915 *
1916 * ACCUMULATING HISTORY
1917 *
1918 * The rate scaling algorithm for 4965 devices, as implemented in Linux driver,
1919 * uses two sets of frame Tx success history: One for the current/active
1920 * modulation mode, and one for a speculative/search mode that is being
1921 * attempted. If the speculative mode turns out to be more effective (i.e.
1922 * actual transfer rate is better), then the driver continues to use the
1923 * speculative mode as the new current active mode.
1924 *
1925 * Each history set contains, separately for each possible rate, data for a
1926 * sliding win of the 62 most recent tx attempts at that rate. The data
1927 * includes a shifting bitmap of success(1)/failure(0), and sums of successful
1928 * and attempted frames, from which the driver can additionally calculate a
1929 * success ratio (success / attempted) and number of failures
1930 * (attempted - success), and control the size of the win (attempted).
1931 * The driver uses the bit map to remove successes from the success sum, as
1932 * the oldest tx attempts fall out of the win.
1933 *
1934 * When the 4965 device makes multiple tx attempts for a given frame, each
1935 * attempt might be at a different rate, and have different modulation
1936 * characteristics (e.g. antenna, fat channel, short guard interval), as set
1937 * up in the rate scaling table in the Link Quality command. The driver must
1938 * determine which rate table entry was used for each tx attempt, to determine
1939 * which rate-specific history to update, and record only those attempts that
1940 * match the modulation characteristics of the history set.
1941 *
1942 * When using block-ack (aggregation), all frames are transmitted at the same
1943 * rate, since there is no per-attempt acknowledgment from the destination
1944 * station. The Tx response struct il_tx_resp indicates the Tx rate in
1945 * rate_n_flags field. After receiving a block-ack, the driver can update
1946 * history for the entire block all at once.
1947 *
1948 *
1949 * FINDING BEST STARTING RATE:
1950 *
1951 * When working with a selected initial modulation mode (see below), the
1952 * driver attempts to find a best initial rate. The initial rate is the
1953 * first entry in the Link Quality command's rate table.
1954 *
1955 * 1) Calculate actual throughput (success ratio * expected throughput, see
1956 * table below) for current initial rate. Do this only if enough frames
1957 * have been attempted to make the value meaningful: at least 6 failed
1958 * tx attempts, or at least 8 successes. If not enough, don't try rate
1959 * scaling yet.
1960 *
1961 * 2) Find available rates adjacent to current initial rate. Available means:
1962 * a) supported by hardware &&
1963 * b) supported by association &&
1964 * c) within any constraints selected by user
1965 *
1966 * 3) Gather measured throughputs for adjacent rates. These might not have
1967 * enough history to calculate a throughput. That's okay, we might try
1968 * using one of them anyway!
1969 *
1970 * 4) Try decreasing rate if, for current rate:
1971 * a) success ratio is < 15% ||
1972 * b) lower adjacent rate has better measured throughput ||
1973 * c) higher adjacent rate has worse throughput, and lower is unmeasured
1974 *
1975 * As a sanity check, if decrease was determined above, leave rate
1976 * unchanged if:
1977 * a) lower rate unavailable
1978 * b) success ratio at current rate > 85% (very good)
1979 * c) current measured throughput is better than expected throughput
1980 * of lower rate (under perfect 100% tx conditions, see table below)
1981 *
1982 * 5) Try increasing rate if, for current rate:
1983 * a) success ratio is < 15% ||
1984 * b) both adjacent rates' throughputs are unmeasured (try it!) ||
1985 * b) higher adjacent rate has better measured throughput ||
1986 * c) lower adjacent rate has worse throughput, and higher is unmeasured
1987 *
1988 * As a sanity check, if increase was determined above, leave rate
1989 * unchanged if:
1990 * a) success ratio at current rate < 70%. This is not particularly
1991 * good performance; higher rate is sure to have poorer success.
1992 *
1993 * 6) Re-evaluate the rate after each tx frame. If working with block-
1994 * acknowledge, history and stats may be calculated for the entire
1995 * block (including prior history that fits within the history wins),
1996 * before re-evaluation.
1997 *
1998 * FINDING BEST STARTING MODULATION MODE:
1999 *
2000 * After working with a modulation mode for a "while" (and doing rate scaling),
2001 * the driver searches for a new initial mode in an attempt to improve
2002 * throughput. The "while" is measured by numbers of attempted frames:
2003 *
2004 * For legacy mode, search for new mode after:
2005 * 480 successful frames, or 160 failed frames
2006 * For high-throughput modes (SISO or MIMO), search for new mode after:
2007 * 4500 successful frames, or 400 failed frames
2008 *
2009 * Mode switch possibilities are (3 for each mode):
2010 *
2011 * For legacy:
2012 * Change antenna, try SISO (if HT association), try MIMO (if HT association)
2013 * For SISO:
2014 * Change antenna, try MIMO, try shortened guard interval (SGI)
2015 * For MIMO:
2016 * Try SISO antenna A, SISO antenna B, try shortened guard interval (SGI)
2017 *
2018 * When trying a new mode, use the same bit rate as the old/current mode when
2019 * trying antenna switches and shortened guard interval. When switching to
2020 * SISO from MIMO or legacy, or to MIMO from SISO or legacy, use a rate
2021 * for which the expected throughput (under perfect conditions) is about the
2022 * same or slightly better than the actual measured throughput delivered by
2023 * the old/current mode.
2024 *
2025 * Actual throughput can be estimated by multiplying the expected throughput
2026 * by the success ratio (successful / attempted tx frames). Frame size is
2027 * not considered in this calculation; it assumes that frame size will average
2028 * out to be fairly consistent over several samples. The following are
2029 * metric values for expected throughput assuming 100% success ratio.
2030 * Only G band has support for CCK rates:
2031 *
2032 * RATE: 1 2 5 11 6 9 12 18 24 36 48 54 60
2033 *
2034 * G: 7 13 35 58 40 57 72 98 121 154 177 186 186
2035 * A: 0 0 0 0 40 57 72 98 121 154 177 186 186
2036 * SISO 20MHz: 0 0 0 0 42 42 76 102 124 159 183 193 202
2037 * SGI SISO 20MHz: 0 0 0 0 46 46 82 110 132 168 192 202 211
2038 * MIMO 20MHz: 0 0 0 0 74 74 123 155 179 214 236 244 251
2039 * SGI MIMO 20MHz: 0 0 0 0 81 81 131 164 188 222 243 251 257
2040 * SISO 40MHz: 0 0 0 0 77 77 127 160 184 220 242 250 257
2041 * SGI SISO 40MHz: 0 0 0 0 83 83 135 169 193 229 250 257 264
2042 * MIMO 40MHz: 0 0 0 0 123 123 182 214 235 264 279 285 289
2043 * SGI MIMO 40MHz: 0 0 0 0 131 131 191 222 242 270 284 289 293
2044 *
2045 * After the new mode has been tried for a short while (minimum of 6 failed
2046 * frames or 8 successful frames), compare success ratio and actual throughput
2047 * estimate of the new mode with the old. If either is better with the new
2048 * mode, continue to use the new mode.
2049 *
2050 * Continue comparing modes until all 3 possibilities have been tried.
2051 * If moving from legacy to HT, try all 3 possibilities from the new HT
2052 * mode. After trying all 3, a best mode is found. Continue to use this mode
2053 * for the longer "while" described above (e.g. 480 successful frames for
2054 * legacy), and then repeat the search process.
2055 *
2056 */
2057struct il_link_quality_cmd {
2058
2059 /* Index of destination/recipient station in uCode's station table */
2060 u8 sta_id;
2061 u8 reserved1;
2062 __le16 control; /* not used */
2063 struct il_link_qual_general_params general_params;
2064 struct il_link_qual_agg_params agg_params;
2065
2066 /*
2067 * Rate info; when using rate-scaling, Tx command's initial_rate_idx
2068 * specifies 1st Tx rate attempted, via idx into this table.
2069 * 4965 devices works its way through table when retrying Tx.
2070 */
2071 struct {
2072 __le32 rate_n_flags; /* RATE_MCS_*, RATE_* */
2073 } rs_table[LINK_QUAL_MAX_RETRY_NUM];
2074 __le32 reserved2;
2075} __packed;
2076
2077/*
2078 * BT configuration enable flags:
2079 * bit 0 - 1: BT channel announcement enabled
2080 * 0: disable
2081 * bit 1 - 1: priority of BT device enabled
2082 * 0: disable
2083 */
2084#define BT_COEX_DISABLE (0x0)
2085#define BT_ENABLE_CHANNEL_ANNOUNCE BIT(0)
2086#define BT_ENABLE_PRIORITY BIT(1)
2087
2088#define BT_COEX_ENABLE (BT_ENABLE_CHANNEL_ANNOUNCE | BT_ENABLE_PRIORITY)
2089
2090#define BT_LEAD_TIME_DEF (0x1E)
2091
2092#define BT_MAX_KILL_DEF (0x5)
2093
2094/*
2095 * C_BT_CONFIG = 0x9b (command, has simple generic response)
2096 *
2097 * 3945 and 4965 devices support hardware handshake with Bluetooth device on
2098 * same platform. Bluetooth device alerts wireless device when it will Tx;
2099 * wireless device can delay or kill its own Tx to accommodate.
2100 */
2101struct il_bt_cmd {
2102 u8 flags;
2103 u8 lead_time;
2104 u8 max_kill;
2105 u8 reserved;
2106 __le32 kill_ack_mask;
2107 __le32 kill_cts_mask;
2108} __packed;
2109
2110/******************************************************************************
2111 * (6)
2112 * Spectrum Management (802.11h) Commands, Responses, Notifications:
2113 *
2114 *****************************************************************************/
2115
2116/*
2117 * Spectrum Management
2118 */
2119#define MEASUREMENT_FILTER_FLAG (RXON_FILTER_PROMISC_MSK | \
2120 RXON_FILTER_CTL2HOST_MSK | \
2121 RXON_FILTER_ACCEPT_GRP_MSK | \
2122 RXON_FILTER_DIS_DECRYPT_MSK | \
2123 RXON_FILTER_DIS_GRP_DECRYPT_MSK | \
2124 RXON_FILTER_ASSOC_MSK | \
2125 RXON_FILTER_BCON_AWARE_MSK)
2126
2127struct il_measure_channel {
2128 __le32 duration; /* measurement duration in extended beacon
2129 * format */
2130 u8 channel; /* channel to measure */
2131 u8 type; /* see enum il_measure_type */
2132 __le16 reserved;
2133} __packed;
2134
2135/*
2136 * C_SPECTRUM_MEASUREMENT = 0x74 (command)
2137 */
2138struct il_spectrum_cmd {
2139 __le16 len; /* number of bytes starting from token */
2140 u8 token; /* token id */
2141 u8 id; /* measurement id -- 0 or 1 */
2142 u8 origin; /* 0 = TGh, 1 = other, 2 = TGk */
2143 u8 periodic; /* 1 = periodic */
2144 __le16 path_loss_timeout;
2145 __le32 start_time; /* start time in extended beacon format */
2146 __le32 reserved2;
2147 __le32 flags; /* rxon flags */
2148 __le32 filter_flags; /* rxon filter flags */
2149 __le16 channel_count; /* minimum 1, maximum 10 */
2150 __le16 reserved3;
2151 struct il_measure_channel channels[10];
2152} __packed;
2153
2154/*
2155 * C_SPECTRUM_MEASUREMENT = 0x74 (response)
2156 */
2157struct il_spectrum_resp {
2158 u8 token;
2159 u8 id; /* id of the prior command replaced, or 0xff */
2160 __le16 status; /* 0 - command will be handled
2161 * 1 - cannot handle (conflicts with another
2162 * measurement) */
2163} __packed;
2164
2165enum il_measurement_state {
2166 IL_MEASUREMENT_START = 0,
2167 IL_MEASUREMENT_STOP = 1,
2168};
2169
2170enum il_measurement_status {
2171 IL_MEASUREMENT_OK = 0,
2172 IL_MEASUREMENT_CONCURRENT = 1,
2173 IL_MEASUREMENT_CSA_CONFLICT = 2,
2174 IL_MEASUREMENT_TGH_CONFLICT = 3,
2175 /* 4-5 reserved */
2176 IL_MEASUREMENT_STOPPED = 6,
2177 IL_MEASUREMENT_TIMEOUT = 7,
2178 IL_MEASUREMENT_PERIODIC_FAILED = 8,
2179};
2180
2181#define NUM_ELEMENTS_IN_HISTOGRAM 8
2182
2183struct il_measurement_histogram {
2184 __le32 ofdm[NUM_ELEMENTS_IN_HISTOGRAM]; /* in 0.8usec counts */
2185 __le32 cck[NUM_ELEMENTS_IN_HISTOGRAM]; /* in 1usec counts */
2186} __packed;
2187
2188/* clear channel availability counters */
2189struct il_measurement_cca_counters {
2190 __le32 ofdm;
2191 __le32 cck;
2192} __packed;
2193
2194enum il_measure_type {
2195 IL_MEASURE_BASIC = (1 << 0),
2196 IL_MEASURE_CHANNEL_LOAD = (1 << 1),
2197 IL_MEASURE_HISTOGRAM_RPI = (1 << 2),
2198 IL_MEASURE_HISTOGRAM_NOISE = (1 << 3),
2199 IL_MEASURE_FRAME = (1 << 4),
2200 /* bits 5:6 are reserved */
2201 IL_MEASURE_IDLE = (1 << 7),
2202};
2203
2204/*
2205 * N_SPECTRUM_MEASUREMENT = 0x75 (notification only, not a command)
2206 */
2207struct il_spectrum_notification {
2208 u8 id; /* measurement id -- 0 or 1 */
2209 u8 token;
2210 u8 channel_idx; /* idx in measurement channel list */
2211 u8 state; /* 0 - start, 1 - stop */
2212 __le32 start_time; /* lower 32-bits of TSF */
2213 u8 band; /* 0 - 5.2GHz, 1 - 2.4GHz */
2214 u8 channel;
2215 u8 type; /* see enum il_measurement_type */
2216 u8 reserved1;
2217 /* NOTE: cca_ofdm, cca_cck, basic_type, and histogram are only only
2218 * valid if applicable for measurement type requested. */
2219 __le32 cca_ofdm; /* cca fraction time in 40Mhz clock periods */
2220 __le32 cca_cck; /* cca fraction time in 44Mhz clock periods */
2221 __le32 cca_time; /* channel load time in usecs */
2222 u8 basic_type; /* 0 - bss, 1 - ofdm preamble, 2 -
2223 * unidentified */
2224 u8 reserved2[3];
2225 struct il_measurement_histogram histogram;
2226 __le32 stop_time; /* lower 32-bits of TSF */
2227 __le32 status; /* see il_measurement_status */
2228} __packed;
2229
2230/******************************************************************************
2231 * (7)
2232 * Power Management Commands, Responses, Notifications:
2233 *
2234 *****************************************************************************/
2235
2236/**
2237 * struct il_powertable_cmd - Power Table Command
2238 * @flags: See below:
2239 *
2240 * C_POWER_TBL = 0x77 (command, has simple generic response)
2241 *
2242 * PM allow:
2243 * bit 0 - '0' Driver not allow power management
2244 * '1' Driver allow PM (use rest of parameters)
2245 *
2246 * uCode send sleep notifications:
2247 * bit 1 - '0' Don't send sleep notification
2248 * '1' send sleep notification (SEND_PM_NOTIFICATION)
2249 *
2250 * Sleep over DTIM
2251 * bit 2 - '0' PM have to walk up every DTIM
2252 * '1' PM could sleep over DTIM till listen Interval.
2253 *
2254 * PCI power managed
2255 * bit 3 - '0' (PCI_CFG_LINK_CTRL & 0x1)
2256 * '1' !(PCI_CFG_LINK_CTRL & 0x1)
2257 *
2258 * Fast PD
2259 * bit 4 - '1' Put radio to sleep when receiving frame for others
2260 *
2261 * Force sleep Modes
2262 * bit 31/30- '00' use both mac/xtal sleeps
2263 * '01' force Mac sleep
2264 * '10' force xtal sleep
2265 * '11' Illegal set
2266 *
2267 * NOTE: if sleep_interval[SLEEP_INTRVL_TBL_SIZE-1] > DTIM period then
2268 * ucode assume sleep over DTIM is allowed and we don't need to wake up
2269 * for every DTIM.
2270 */
2271#define IL_POWER_VEC_SIZE 5
2272
2273#define IL_POWER_DRIVER_ALLOW_SLEEP_MSK cpu_to_le16(BIT(0))
2274#define IL_POWER_SLEEP_OVER_DTIM_MSK cpu_to_le16(BIT(2))
2275#define IL_POWER_PCI_PM_MSK cpu_to_le16(BIT(3))
2276
2277struct il3945_powertable_cmd {
2278 __le16 flags;
2279 u8 reserved[2];
2280 __le32 rx_data_timeout;
2281 __le32 tx_data_timeout;
2282 __le32 sleep_interval[IL_POWER_VEC_SIZE];
2283} __packed;
2284
2285struct il_powertable_cmd {
2286 __le16 flags;
2287 u8 keep_alive_seconds; /* 3945 reserved */
2288 u8 debug_flags; /* 3945 reserved */
2289 __le32 rx_data_timeout;
2290 __le32 tx_data_timeout;
2291 __le32 sleep_interval[IL_POWER_VEC_SIZE];
2292 __le32 keep_alive_beacons;
2293} __packed;
2294
2295/*
2296 * N_PM_SLEEP = 0x7A (notification only, not a command)
2297 * all devices identical.
2298 */
2299struct il_sleep_notification {
2300 u8 pm_sleep_mode;
2301 u8 pm_wakeup_src;
2302 __le16 reserved;
2303 __le32 sleep_time;
2304 __le32 tsf_low;
2305 __le32 bcon_timer;
2306} __packed;
2307
2308/* Sleep states. all devices identical. */
2309enum {
2310 IL_PM_NO_SLEEP = 0,
2311 IL_PM_SLP_MAC = 1,
2312 IL_PM_SLP_FULL_MAC_UNASSOCIATE = 2,
2313 IL_PM_SLP_FULL_MAC_CARD_STATE = 3,
2314 IL_PM_SLP_PHY = 4,
2315 IL_PM_SLP_REPENT = 5,
2316 IL_PM_WAKEUP_BY_TIMER = 6,
2317 IL_PM_WAKEUP_BY_DRIVER = 7,
2318 IL_PM_WAKEUP_BY_RFKILL = 8,
2319 /* 3 reserved */
2320 IL_PM_NUM_OF_MODES = 12,
2321};
2322
2323/*
2324 * N_CARD_STATE = 0xa1 (notification only, not a command)
2325 */
2326struct il_card_state_notif {
2327 __le32 flags;
2328} __packed;
2329
2330#define HW_CARD_DISABLED 0x01
2331#define SW_CARD_DISABLED 0x02
2332#define CT_CARD_DISABLED 0x04
2333#define RXON_CARD_DISABLED 0x10
2334
2335struct il_ct_kill_config {
2336 __le32 reserved;
2337 __le32 critical_temperature_M;
2338 __le32 critical_temperature_R;
2339} __packed;
2340
2341/******************************************************************************
2342 * (8)
2343 * Scan Commands, Responses, Notifications:
2344 *
2345 *****************************************************************************/
2346
2347#define SCAN_CHANNEL_TYPE_PASSIVE cpu_to_le32(0)
2348#define SCAN_CHANNEL_TYPE_ACTIVE cpu_to_le32(1)
2349
2350/**
2351 * struct il_scan_channel - entry in C_SCAN channel table
2352 *
2353 * One for each channel in the scan list.
2354 * Each channel can independently select:
2355 * 1) SSID for directed active scans
2356 * 2) Txpower setting (for rate specified within Tx command)
2357 * 3) How long to stay on-channel (behavior may be modified by quiet_time,
2358 * quiet_plcp_th, good_CRC_th)
2359 *
2360 * To avoid uCode errors, make sure the following are true (see comments
2361 * under struct il_scan_cmd about max_out_time and quiet_time):
2362 * 1) If using passive_dwell (i.e. passive_dwell != 0):
2363 * active_dwell <= passive_dwell (< max_out_time if max_out_time != 0)
2364 * 2) quiet_time <= active_dwell
2365 * 3) If restricting off-channel time (i.e. max_out_time !=0):
2366 * passive_dwell < max_out_time
2367 * active_dwell < max_out_time
2368 */
2369struct il3945_scan_channel {
2370 /*
2371 * type is defined as:
2372 * 0:0 1 = active, 0 = passive
2373 * 1:4 SSID direct bit map; if a bit is set, then corresponding
2374 * SSID IE is transmitted in probe request.
2375 * 5:7 reserved
2376 */
2377 u8 type;
2378 u8 channel; /* band is selected by il3945_scan_cmd "flags" field */
2379 struct il3945_tx_power tpc;
2380 __le16 active_dwell; /* in 1024-uSec TU (time units), typ 5-50 */
2381 __le16 passive_dwell; /* in 1024-uSec TU (time units), typ 20-500 */
2382} __packed;
2383
2384/* set number of direct probes u8 type */
2385#define IL39_SCAN_PROBE_MASK(n) ((BIT(n) | (BIT(n) - BIT(1))))
2386
2387struct il_scan_channel {
2388 /*
2389 * type is defined as:
2390 * 0:0 1 = active, 0 = passive
2391 * 1:20 SSID direct bit map; if a bit is set, then corresponding
2392 * SSID IE is transmitted in probe request.
2393 * 21:31 reserved
2394 */
2395 __le32 type;
2396 __le16 channel; /* band is selected by il_scan_cmd "flags" field */
2397 u8 tx_gain; /* gain for analog radio */
2398 u8 dsp_atten; /* gain for DSP */
2399 __le16 active_dwell; /* in 1024-uSec TU (time units), typ 5-50 */
2400 __le16 passive_dwell; /* in 1024-uSec TU (time units), typ 20-500 */
2401} __packed;
2402
2403/* set number of direct probes __le32 type */
2404#define IL_SCAN_PROBE_MASK(n) cpu_to_le32((BIT(n) | (BIT(n) - BIT(1))))
2405
2406/**
2407 * struct il_ssid_ie - directed scan network information element
2408 *
2409 * Up to 20 of these may appear in C_SCAN (Note: Only 4 are in
2410 * 3945 SCAN api), selected by "type" bit field in struct il_scan_channel;
2411 * each channel may select different ssids from among the 20 (4) entries.
2412 * SSID IEs get transmitted in reverse order of entry.
2413 */
2414struct il_ssid_ie {
2415 u8 id;
2416 u8 len;
2417 u8 ssid[32];
2418} __packed;
2419
2420#define PROBE_OPTION_MAX_3945 4
2421#define PROBE_OPTION_MAX 20
2422#define TX_CMD_LIFE_TIME_INFINITE cpu_to_le32(0xFFFFFFFF)
2423#define IL_GOOD_CRC_TH_DISABLED 0
2424#define IL_GOOD_CRC_TH_DEFAULT cpu_to_le16(1)
2425#define IL_GOOD_CRC_TH_NEVER cpu_to_le16(0xffff)
2426#define IL_MAX_SCAN_SIZE 1024
2427#define IL_MAX_CMD_SIZE 4096
2428
2429/*
2430 * C_SCAN = 0x80 (command)
2431 *
2432 * The hardware scan command is very powerful; the driver can set it up to
2433 * maintain (relatively) normal network traffic while doing a scan in the
2434 * background. The max_out_time and suspend_time control the ratio of how
2435 * long the device stays on an associated network channel ("service channel")
2436 * vs. how long it's away from the service channel, i.e. tuned to other channels
2437 * for scanning.
2438 *
2439 * max_out_time is the max time off-channel (in usec), and suspend_time
2440 * is how long (in "extended beacon" format) that the scan is "suspended"
2441 * after returning to the service channel. That is, suspend_time is the
2442 * time that we stay on the service channel, doing normal work, between
2443 * scan segments. The driver may set these parameters differently to support
2444 * scanning when associated vs. not associated, and light vs. heavy traffic
2445 * loads when associated.
2446 *
2447 * After receiving this command, the device's scan engine does the following;
2448 *
2449 * 1) Sends SCAN_START notification to driver
2450 * 2) Checks to see if it has time to do scan for one channel
2451 * 3) Sends NULL packet, with power-save (PS) bit set to 1,
2452 * to tell AP that we're going off-channel
2453 * 4) Tunes to first channel in scan list, does active or passive scan
2454 * 5) Sends SCAN_RESULT notification to driver
2455 * 6) Checks to see if it has time to do scan on *next* channel in list
2456 * 7) Repeats 4-6 until it no longer has time to scan the next channel
2457 * before max_out_time expires
2458 * 8) Returns to service channel
2459 * 9) Sends NULL packet with PS=0 to tell AP that we're back
2460 * 10) Stays on service channel until suspend_time expires
2461 * 11) Repeats entire process 2-10 until list is complete
2462 * 12) Sends SCAN_COMPLETE notification
2463 *
2464 * For fast, efficient scans, the scan command also has support for staying on
2465 * a channel for just a short time, if doing active scanning and getting no
2466 * responses to the transmitted probe request. This time is controlled by
2467 * quiet_time, and the number of received packets below which a channel is
2468 * considered "quiet" is controlled by quiet_plcp_threshold.
2469 *
2470 * For active scanning on channels that have regulatory restrictions against
2471 * blindly transmitting, the scan can listen before transmitting, to make sure
2472 * that there is already legitimate activity on the channel. If enough
2473 * packets are cleanly received on the channel (controlled by good_CRC_th,
2474 * typical value 1), the scan engine starts transmitting probe requests.
2475 *
2476 * Driver must use separate scan commands for 2.4 vs. 5 GHz bands.
2477 *
2478 * To avoid uCode errors, see timing restrictions described under
2479 * struct il_scan_channel.
2480 */
2481
2482struct il3945_scan_cmd {
2483 __le16 len;
2484 u8 reserved0;
2485 u8 channel_count; /* # channels in channel list */
2486 __le16 quiet_time; /* dwell only this # millisecs on quiet channel
2487 * (only for active scan) */
2488 __le16 quiet_plcp_th; /* quiet chnl is < this # pkts (typ. 1) */
2489 __le16 good_CRC_th; /* passive -> active promotion threshold */
2490 __le16 reserved1;
2491 __le32 max_out_time; /* max usec to be away from associated (service)
2492 * channel */
2493 __le32 suspend_time; /* pause scan this long (in "extended beacon
2494 * format") when returning to service channel:
2495 * 3945; 31:24 # beacons, 19:0 additional usec,
2496 * 4965; 31:22 # beacons, 21:0 additional usec.
2497 */
2498 __le32 flags; /* RXON_FLG_* */
2499 __le32 filter_flags; /* RXON_FILTER_* */
2500
2501 /* For active scans (set to all-0s for passive scans).
2502 * Does not include payload. Must specify Tx rate; no rate scaling. */
2503 struct il3945_tx_cmd tx_cmd;
2504
2505 /* For directed active scans (set to all-0s otherwise) */
2506 struct il_ssid_ie direct_scan[PROBE_OPTION_MAX_3945];
2507
2508 /*
2509 * Probe request frame, followed by channel list.
2510 *
2511 * Size of probe request frame is specified by byte count in tx_cmd.
2512 * Channel list follows immediately after probe request frame.
2513 * Number of channels in list is specified by channel_count.
2514 * Each channel in list is of type:
2515 *
2516 * struct il3945_scan_channel channels[0];
2517 *
2518 * NOTE: Only one band of channels can be scanned per pass. You
2519 * must not mix 2.4GHz channels and 5.2GHz channels, and you must wait
2520 * for one scan to complete (i.e. receive N_SCAN_COMPLETE)
2521 * before requesting another scan.
2522 */
2523 u8 data[0];
2524} __packed;
2525
2526struct il_scan_cmd {
2527 __le16 len;
2528 u8 reserved0;
2529 u8 channel_count; /* # channels in channel list */
2530 __le16 quiet_time; /* dwell only this # millisecs on quiet channel
2531 * (only for active scan) */
2532 __le16 quiet_plcp_th; /* quiet chnl is < this # pkts (typ. 1) */
2533 __le16 good_CRC_th; /* passive -> active promotion threshold */
2534 __le16 rx_chain; /* RXON_RX_CHAIN_* */
2535 __le32 max_out_time; /* max usec to be away from associated (service)
2536 * channel */
2537 __le32 suspend_time; /* pause scan this long (in "extended beacon
2538 * format") when returning to service chnl:
2539 * 3945; 31:24 # beacons, 19:0 additional usec,
2540 * 4965; 31:22 # beacons, 21:0 additional usec.
2541 */
2542 __le32 flags; /* RXON_FLG_* */
2543 __le32 filter_flags; /* RXON_FILTER_* */
2544
2545 /* For active scans (set to all-0s for passive scans).
2546 * Does not include payload. Must specify Tx rate; no rate scaling. */
2547 struct il_tx_cmd tx_cmd;
2548
2549 /* For directed active scans (set to all-0s otherwise) */
2550 struct il_ssid_ie direct_scan[PROBE_OPTION_MAX];
2551
2552 /*
2553 * Probe request frame, followed by channel list.
2554 *
2555 * Size of probe request frame is specified by byte count in tx_cmd.
2556 * Channel list follows immediately after probe request frame.
2557 * Number of channels in list is specified by channel_count.
2558 * Each channel in list is of type:
2559 *
2560 * struct il_scan_channel channels[0];
2561 *
2562 * NOTE: Only one band of channels can be scanned per pass. You
2563 * must not mix 2.4GHz channels and 5.2GHz channels, and you must wait
2564 * for one scan to complete (i.e. receive N_SCAN_COMPLETE)
2565 * before requesting another scan.
2566 */
2567 u8 data[0];
2568} __packed;
2569
2570/* Can abort will notify by complete notification with abort status. */
2571#define CAN_ABORT_STATUS cpu_to_le32(0x1)
2572/* complete notification statuses */
2573#define ABORT_STATUS 0x2
2574
2575/*
2576 * C_SCAN = 0x80 (response)
2577 */
2578struct il_scanreq_notification {
2579 __le32 status; /* 1: okay, 2: cannot fulfill request */
2580} __packed;
2581
2582/*
2583 * N_SCAN_START = 0x82 (notification only, not a command)
2584 */
2585struct il_scanstart_notification {
2586 __le32 tsf_low;
2587 __le32 tsf_high;
2588 __le32 beacon_timer;
2589 u8 channel;
2590 u8 band;
2591 u8 reserved[2];
2592 __le32 status;
2593} __packed;
2594
2595#define SCAN_OWNER_STATUS 0x1
2596#define MEASURE_OWNER_STATUS 0x2
2597
2598#define IL_PROBE_STATUS_OK 0
2599#define IL_PROBE_STATUS_TX_FAILED BIT(0)
2600/* error statuses combined with TX_FAILED */
2601#define IL_PROBE_STATUS_FAIL_TTL BIT(1)
2602#define IL_PROBE_STATUS_FAIL_BT BIT(2)
2603
2604#define NUMBER_OF_STATS 1 /* first __le32 is good CRC */
2605/*
2606 * N_SCAN_RESULTS = 0x83 (notification only, not a command)
2607 */
2608struct il_scanresults_notification {
2609 u8 channel;
2610 u8 band;
2611 u8 probe_status;
2612 u8 num_probe_not_sent; /* not enough time to send */
2613 __le32 tsf_low;
2614 __le32 tsf_high;
2615 __le32 stats[NUMBER_OF_STATS];
2616} __packed;
2617
2618/*
2619 * N_SCAN_COMPLETE = 0x84 (notification only, not a command)
2620 */
2621struct il_scancomplete_notification {
2622 u8 scanned_channels;
2623 u8 status;
2624 u8 last_channel;
2625 __le32 tsf_low;
2626 __le32 tsf_high;
2627} __packed;
2628
2629/******************************************************************************
2630 * (9)
2631 * IBSS/AP Commands and Notifications:
2632 *
2633 *****************************************************************************/
2634
2635enum il_ibss_manager {
2636 IL_NOT_IBSS_MANAGER = 0,
2637 IL_IBSS_MANAGER = 1,
2638};
2639
2640/*
2641 * N_BEACON = 0x90 (notification only, not a command)
2642 */
2643
2644struct il3945_beacon_notif {
2645 struct il3945_tx_resp beacon_notify_hdr;
2646 __le32 low_tsf;
2647 __le32 high_tsf;
2648 __le32 ibss_mgr_status;
2649} __packed;
2650
2651struct il4965_beacon_notif {
2652 struct il4965_tx_resp beacon_notify_hdr;
2653 __le32 low_tsf;
2654 __le32 high_tsf;
2655 __le32 ibss_mgr_status;
2656} __packed;
2657
2658/*
2659 * C_TX_BEACON= 0x91 (command, has simple generic response)
2660 */
2661
2662struct il3945_tx_beacon_cmd {
2663 struct il3945_tx_cmd tx;
2664 __le16 tim_idx;
2665 u8 tim_size;
2666 u8 reserved1;
2667 struct ieee80211_hdr frame[0]; /* beacon frame */
2668} __packed;
2669
2670struct il_tx_beacon_cmd {
2671 struct il_tx_cmd tx;
2672 __le16 tim_idx;
2673 u8 tim_size;
2674 u8 reserved1;
2675 struct ieee80211_hdr frame[0]; /* beacon frame */
2676} __packed;
2677
2678/******************************************************************************
2679 * (10)
2680 * Statistics Commands and Notifications:
2681 *
2682 *****************************************************************************/
2683
2684#define IL_TEMP_CONVERT 260
2685
2686#define SUP_RATE_11A_MAX_NUM_CHANNELS 8
2687#define SUP_RATE_11B_MAX_NUM_CHANNELS 4
2688#define SUP_RATE_11G_MAX_NUM_CHANNELS 12
2689
2690/* Used for passing to driver number of successes and failures per rate */
2691struct rate_histogram {
2692 union {
2693 __le32 a[SUP_RATE_11A_MAX_NUM_CHANNELS];
2694 __le32 b[SUP_RATE_11B_MAX_NUM_CHANNELS];
2695 __le32 g[SUP_RATE_11G_MAX_NUM_CHANNELS];
2696 } success;
2697 union {
2698 __le32 a[SUP_RATE_11A_MAX_NUM_CHANNELS];
2699 __le32 b[SUP_RATE_11B_MAX_NUM_CHANNELS];
2700 __le32 g[SUP_RATE_11G_MAX_NUM_CHANNELS];
2701 } failed;
2702} __packed;
2703
2704/* stats command response */
2705
2706struct iwl39_stats_rx_phy {
2707 __le32 ina_cnt;
2708 __le32 fina_cnt;
2709 __le32 plcp_err;
2710 __le32 crc32_err;
2711 __le32 overrun_err;
2712 __le32 early_overrun_err;
2713 __le32 crc32_good;
2714 __le32 false_alarm_cnt;
2715 __le32 fina_sync_err_cnt;
2716 __le32 sfd_timeout;
2717 __le32 fina_timeout;
2718 __le32 unresponded_rts;
2719 __le32 rxe_frame_limit_overrun;
2720 __le32 sent_ack_cnt;
2721 __le32 sent_cts_cnt;
2722} __packed;
2723
2724struct iwl39_stats_rx_non_phy {
2725 __le32 bogus_cts; /* CTS received when not expecting CTS */
2726 __le32 bogus_ack; /* ACK received when not expecting ACK */
2727 __le32 non_bssid_frames; /* number of frames with BSSID that
2728 * doesn't belong to the STA BSSID */
2729 __le32 filtered_frames; /* count frames that were dumped in the
2730 * filtering process */
2731 __le32 non_channel_beacons; /* beacons with our bss id but not on
2732 * our serving channel */
2733} __packed;
2734
2735struct iwl39_stats_rx {
2736 struct iwl39_stats_rx_phy ofdm;
2737 struct iwl39_stats_rx_phy cck;
2738 struct iwl39_stats_rx_non_phy general;
2739} __packed;
2740
2741struct iwl39_stats_tx {
2742 __le32 preamble_cnt;
2743 __le32 rx_detected_cnt;
2744 __le32 bt_prio_defer_cnt;
2745 __le32 bt_prio_kill_cnt;
2746 __le32 few_bytes_cnt;
2747 __le32 cts_timeout;
2748 __le32 ack_timeout;
2749 __le32 expected_ack_cnt;
2750 __le32 actual_ack_cnt;
2751} __packed;
2752
2753struct stats_dbg {
2754 __le32 burst_check;
2755 __le32 burst_count;
2756 __le32 wait_for_silence_timeout_cnt;
2757 __le32 reserved[3];
2758} __packed;
2759
2760struct iwl39_stats_div {
2761 __le32 tx_on_a;
2762 __le32 tx_on_b;
2763 __le32 exec_time;
2764 __le32 probe_time;
2765} __packed;
2766
2767struct iwl39_stats_general {
2768 __le32 temperature;
2769 struct stats_dbg dbg;
2770 __le32 sleep_time;
2771 __le32 slots_out;
2772 __le32 slots_idle;
2773 __le32 ttl_timestamp;
2774 struct iwl39_stats_div div;
2775} __packed;
2776
2777struct stats_rx_phy {
2778 __le32 ina_cnt;
2779 __le32 fina_cnt;
2780 __le32 plcp_err;
2781 __le32 crc32_err;
2782 __le32 overrun_err;
2783 __le32 early_overrun_err;
2784 __le32 crc32_good;
2785 __le32 false_alarm_cnt;
2786 __le32 fina_sync_err_cnt;
2787 __le32 sfd_timeout;
2788 __le32 fina_timeout;
2789 __le32 unresponded_rts;
2790 __le32 rxe_frame_limit_overrun;
2791 __le32 sent_ack_cnt;
2792 __le32 sent_cts_cnt;
2793 __le32 sent_ba_rsp_cnt;
2794 __le32 dsp_self_kill;
2795 __le32 mh_format_err;
2796 __le32 re_acq_main_rssi_sum;
2797 __le32 reserved3;
2798} __packed;
2799
2800struct stats_rx_ht_phy {
2801 __le32 plcp_err;
2802 __le32 overrun_err;
2803 __le32 early_overrun_err;
2804 __le32 crc32_good;
2805 __le32 crc32_err;
2806 __le32 mh_format_err;
2807 __le32 agg_crc32_good;
2808 __le32 agg_mpdu_cnt;
2809 __le32 agg_cnt;
2810 __le32 unsupport_mcs;
2811} __packed;
2812
2813#define INTERFERENCE_DATA_AVAILABLE cpu_to_le32(1)
2814
2815struct stats_rx_non_phy {
2816 __le32 bogus_cts; /* CTS received when not expecting CTS */
2817 __le32 bogus_ack; /* ACK received when not expecting ACK */
2818 __le32 non_bssid_frames; /* number of frames with BSSID that
2819 * doesn't belong to the STA BSSID */
2820 __le32 filtered_frames; /* count frames that were dumped in the
2821 * filtering process */
2822 __le32 non_channel_beacons; /* beacons with our bss id but not on
2823 * our serving channel */
2824 __le32 channel_beacons; /* beacons with our bss id and in our
2825 * serving channel */
2826 __le32 num_missed_bcon; /* number of missed beacons */
2827 __le32 adc_rx_saturation_time; /* count in 0.8us units the time the
2828 * ADC was in saturation */
2829 __le32 ina_detection_search_time; /* total time (in 0.8us) searched
2830 * for INA */
2831 __le32 beacon_silence_rssi_a; /* RSSI silence after beacon frame */
2832 __le32 beacon_silence_rssi_b; /* RSSI silence after beacon frame */
2833 __le32 beacon_silence_rssi_c; /* RSSI silence after beacon frame */
2834 __le32 interference_data_flag; /* flag for interference data
2835 * availability. 1 when data is
2836 * available. */
2837 __le32 channel_load; /* counts RX Enable time in uSec */
2838 __le32 dsp_false_alarms; /* DSP false alarm (both OFDM
2839 * and CCK) counter */
2840 __le32 beacon_rssi_a;
2841 __le32 beacon_rssi_b;
2842 __le32 beacon_rssi_c;
2843 __le32 beacon_energy_a;
2844 __le32 beacon_energy_b;
2845 __le32 beacon_energy_c;
2846} __packed;
2847
2848struct stats_rx {
2849 struct stats_rx_phy ofdm;
2850 struct stats_rx_phy cck;
2851 struct stats_rx_non_phy general;
2852 struct stats_rx_ht_phy ofdm_ht;
2853} __packed;
2854
2855/**
2856 * struct stats_tx_power - current tx power
2857 *
2858 * @ant_a: current tx power on chain a in 1/2 dB step
2859 * @ant_b: current tx power on chain b in 1/2 dB step
2860 * @ant_c: current tx power on chain c in 1/2 dB step
2861 */
2862struct stats_tx_power {
2863 u8 ant_a;
2864 u8 ant_b;
2865 u8 ant_c;
2866 u8 reserved;
2867} __packed;
2868
2869struct stats_tx_non_phy_agg {
2870 __le32 ba_timeout;
2871 __le32 ba_reschedule_frames;
2872 __le32 scd_query_agg_frame_cnt;
2873 __le32 scd_query_no_agg;
2874 __le32 scd_query_agg;
2875 __le32 scd_query_mismatch;
2876 __le32 frame_not_ready;
2877 __le32 underrun;
2878 __le32 bt_prio_kill;
2879 __le32 rx_ba_rsp_cnt;
2880} __packed;
2881
2882struct stats_tx {
2883 __le32 preamble_cnt;
2884 __le32 rx_detected_cnt;
2885 __le32 bt_prio_defer_cnt;
2886 __le32 bt_prio_kill_cnt;
2887 __le32 few_bytes_cnt;
2888 __le32 cts_timeout;
2889 __le32 ack_timeout;
2890 __le32 expected_ack_cnt;
2891 __le32 actual_ack_cnt;
2892 __le32 dump_msdu_cnt;
2893 __le32 burst_abort_next_frame_mismatch_cnt;
2894 __le32 burst_abort_missing_next_frame_cnt;
2895 __le32 cts_timeout_collision;
2896 __le32 ack_or_ba_timeout_collision;
2897 struct stats_tx_non_phy_agg agg;
2898
2899 __le32 reserved1;
2900} __packed;
2901
2902struct stats_div {
2903 __le32 tx_on_a;
2904 __le32 tx_on_b;
2905 __le32 exec_time;
2906 __le32 probe_time;
2907 __le32 reserved1;
2908 __le32 reserved2;
2909} __packed;
2910
2911struct stats_general_common {
2912 __le32 temperature; /* radio temperature */
2913 struct stats_dbg dbg;
2914 __le32 sleep_time;
2915 __le32 slots_out;
2916 __le32 slots_idle;
2917 __le32 ttl_timestamp;
2918 struct stats_div div;
2919 __le32 rx_enable_counter;
2920 /*
2921 * num_of_sos_states:
2922 * count the number of times we have to re-tune
2923 * in order to get out of bad PHY status
2924 */
2925 __le32 num_of_sos_states;
2926} __packed;
2927
2928struct stats_general {
2929 struct stats_general_common common;
2930 __le32 reserved2;
2931 __le32 reserved3;
2932} __packed;
2933
2934#define UCODE_STATS_CLEAR_MSK (0x1 << 0)
2935#define UCODE_STATS_FREQUENCY_MSK (0x1 << 1)
2936#define UCODE_STATS_NARROW_BAND_MSK (0x1 << 2)
2937
2938/*
2939 * C_STATS = 0x9c,
2940 * all devices identical.
2941 *
2942 * This command triggers an immediate response containing uCode stats.
2943 * The response is in the same format as N_STATS 0x9d, below.
2944 *
2945 * If the CLEAR_STATS configuration flag is set, uCode will clear its
2946 * internal copy of the stats (counters) after issuing the response.
2947 * This flag does not affect N_STATSs after beacons (see below).
2948 *
2949 * If the DISABLE_NOTIF configuration flag is set, uCode will not issue
2950 * N_STATSs after received beacons (see below). This flag
2951 * does not affect the response to the C_STATS 0x9c itself.
2952 */
2953#define IL_STATS_CONF_CLEAR_STATS cpu_to_le32(0x1) /* see above */
2954#define IL_STATS_CONF_DISABLE_NOTIF cpu_to_le32(0x2) /* see above */
2955struct il_stats_cmd {
2956 __le32 configuration_flags; /* IL_STATS_CONF_* */
2957} __packed;
2958
2959/*
2960 * N_STATS = 0x9d (notification only, not a command)
2961 *
2962 * By default, uCode issues this notification after receiving a beacon
2963 * while associated. To disable this behavior, set DISABLE_NOTIF flag in the
2964 * C_STATS 0x9c, above.
2965 *
2966 * Statistics counters continue to increment beacon after beacon, but are
2967 * cleared when changing channels or when driver issues C_STATS
2968 * 0x9c with CLEAR_STATS bit set (see above).
2969 *
2970 * uCode also issues this notification during scans. uCode clears stats
2971 * appropriately so that each notification contains stats for only the
2972 * one channel that has just been scanned.
2973 */
2974#define STATS_REPLY_FLG_BAND_24G_MSK cpu_to_le32(0x2)
2975#define STATS_REPLY_FLG_HT40_MODE_MSK cpu_to_le32(0x8)
2976
2977struct il3945_notif_stats {
2978 __le32 flag;
2979 struct iwl39_stats_rx rx;
2980 struct iwl39_stats_tx tx;
2981 struct iwl39_stats_general general;
2982} __packed;
2983
2984struct il_notif_stats {
2985 __le32 flag;
2986 struct stats_rx rx;
2987 struct stats_tx tx;
2988 struct stats_general general;
2989} __packed;
2990
2991/*
2992 * N_MISSED_BEACONS = 0xa2 (notification only, not a command)
2993 *
2994 * uCode send N_MISSED_BEACONS to driver when detect beacon missed
2995 * in regardless of how many missed beacons, which mean when driver receive the
2996 * notification, inside the command, it can find all the beacons information
2997 * which include number of total missed beacons, number of consecutive missed
2998 * beacons, number of beacons received and number of beacons expected to
2999 * receive.
3000 *
3001 * If uCode detected consecutive_missed_beacons > 5, it will reset the radio
3002 * in order to bring the radio/PHY back to working state; which has no relation
3003 * to when driver will perform sensitivity calibration.
3004 *
3005 * Driver should set it own missed_beacon_threshold to decide when to perform
3006 * sensitivity calibration based on number of consecutive missed beacons in
3007 * order to improve overall performance, especially in noisy environment.
3008 *
3009 */
3010
3011#define IL_MISSED_BEACON_THRESHOLD_MIN (1)
3012#define IL_MISSED_BEACON_THRESHOLD_DEF (5)
3013#define IL_MISSED_BEACON_THRESHOLD_MAX IL_MISSED_BEACON_THRESHOLD_DEF
3014
3015struct il_missed_beacon_notif {
3016 __le32 consecutive_missed_beacons;
3017 __le32 total_missed_becons;
3018 __le32 num_expected_beacons;
3019 __le32 num_recvd_beacons;
3020} __packed;
3021
3022/******************************************************************************
3023 * (11)
3024 * Rx Calibration Commands:
3025 *
3026 * With the uCode used for open source drivers, most Tx calibration (except
3027 * for Tx Power) and most Rx calibration is done by uCode during the
3028 * "initialize" phase of uCode boot. Driver must calibrate only:
3029 *
3030 * 1) Tx power (depends on temperature), described elsewhere
3031 * 2) Receiver gain balance (optimize MIMO, and detect disconnected antennas)
3032 * 3) Receiver sensitivity (to optimize signal detection)
3033 *
3034 *****************************************************************************/
3035
3036/**
3037 * C_SENSITIVITY = 0xa8 (command, has simple generic response)
3038 *
3039 * This command sets up the Rx signal detector for a sensitivity level that
3040 * is high enough to lock onto all signals within the associated network,
3041 * but low enough to ignore signals that are below a certain threshold, so as
3042 * not to have too many "false alarms". False alarms are signals that the
3043 * Rx DSP tries to lock onto, but then discards after determining that they
3044 * are noise.
3045 *
3046 * The optimum number of false alarms is between 5 and 50 per 200 TUs
3047 * (200 * 1024 uSecs, i.e. 204.8 milliseconds) of actual Rx time (i.e.
3048 * time listening, not transmitting). Driver must adjust sensitivity so that
3049 * the ratio of actual false alarms to actual Rx time falls within this range.
3050 *
3051 * While associated, uCode delivers N_STATSs after each
3052 * received beacon. These provide information to the driver to analyze the
3053 * sensitivity. Don't analyze stats that come in from scanning, or any
3054 * other non-associated-network source. Pertinent stats include:
3055 *
3056 * From "general" stats (struct stats_rx_non_phy):
3057 *
3058 * (beacon_energy_[abc] & 0x0FF00) >> 8 (unsigned, higher value is lower level)
3059 * Measure of energy of desired signal. Used for establishing a level
3060 * below which the device does not detect signals.
3061 *
3062 * (beacon_silence_rssi_[abc] & 0x0FF00) >> 8 (unsigned, units in dB)
3063 * Measure of background noise in silent period after beacon.
3064 *
3065 * channel_load
3066 * uSecs of actual Rx time during beacon period (varies according to
3067 * how much time was spent transmitting).
3068 *
3069 * From "cck" and "ofdm" stats (struct stats_rx_phy), separately:
3070 *
3071 * false_alarm_cnt
3072 * Signal locks abandoned early (before phy-level header).
3073 *
3074 * plcp_err
3075 * Signal locks abandoned late (during phy-level header).
3076 *
3077 * NOTE: Both false_alarm_cnt and plcp_err increment monotonically from
3078 * beacon to beacon, i.e. each value is an accumulation of all errors
3079 * before and including the latest beacon. Values will wrap around to 0
3080 * after counting up to 2^32 - 1. Driver must differentiate vs.
3081 * previous beacon's values to determine # false alarms in the current
3082 * beacon period.
3083 *
3084 * Total number of false alarms = false_alarms + plcp_errs
3085 *
3086 * For OFDM, adjust the following table entries in struct il_sensitivity_cmd
3087 * (notice that the start points for OFDM are at or close to settings for
3088 * maximum sensitivity):
3089 *
3090 * START / MIN / MAX
3091 * HD_AUTO_CORR32_X1_TH_ADD_MIN_IDX 90 / 85 / 120
3092 * HD_AUTO_CORR32_X1_TH_ADD_MIN_MRC_IDX 170 / 170 / 210
3093 * HD_AUTO_CORR32_X4_TH_ADD_MIN_IDX 105 / 105 / 140
3094 * HD_AUTO_CORR32_X4_TH_ADD_MIN_MRC_IDX 220 / 220 / 270
3095 *
3096 * If actual rate of OFDM false alarms (+ plcp_errors) is too high
3097 * (greater than 50 for each 204.8 msecs listening), reduce sensitivity
3098 * by *adding* 1 to all 4 of the table entries above, up to the max for
3099 * each entry. Conversely, if false alarm rate is too low (less than 5
3100 * for each 204.8 msecs listening), *subtract* 1 from each entry to
3101 * increase sensitivity.
3102 *
3103 * For CCK sensitivity, keep track of the following:
3104 *
3105 * 1). 20-beacon history of maximum background noise, indicated by
3106 * (beacon_silence_rssi_[abc] & 0x0FF00), units in dB, across the
3107 * 3 receivers. For any given beacon, the "silence reference" is
3108 * the maximum of last 60 samples (20 beacons * 3 receivers).
3109 *
3110 * 2). 10-beacon history of strongest signal level, as indicated
3111 * by (beacon_energy_[abc] & 0x0FF00) >> 8, across the 3 receivers,
3112 * i.e. the strength of the signal through the best receiver at the
3113 * moment. These measurements are "upside down", with lower values
3114 * for stronger signals, so max energy will be *minimum* value.
3115 *
3116 * Then for any given beacon, the driver must determine the *weakest*
3117 * of the strongest signals; this is the minimum level that needs to be
3118 * successfully detected, when using the best receiver at the moment.
3119 * "Max cck energy" is the maximum (higher value means lower energy!)
3120 * of the last 10 minima. Once this is determined, driver must add
3121 * a little margin by adding "6" to it.
3122 *
3123 * 3). Number of consecutive beacon periods with too few false alarms.
3124 * Reset this to 0 at the first beacon period that falls within the
3125 * "good" range (5 to 50 false alarms per 204.8 milliseconds rx).
3126 *
3127 * Then, adjust the following CCK table entries in struct il_sensitivity_cmd
3128 * (notice that the start points for CCK are at maximum sensitivity):
3129 *
3130 * START / MIN / MAX
3131 * HD_AUTO_CORR40_X4_TH_ADD_MIN_IDX 125 / 125 / 200
3132 * HD_AUTO_CORR40_X4_TH_ADD_MIN_MRC_IDX 200 / 200 / 400
3133 * HD_MIN_ENERGY_CCK_DET_IDX 100 / 0 / 100
3134 *
3135 * If actual rate of CCK false alarms (+ plcp_errors) is too high
3136 * (greater than 50 for each 204.8 msecs listening), method for reducing
3137 * sensitivity is:
3138 *
3139 * 1) *Add* 3 to value in HD_AUTO_CORR40_X4_TH_ADD_MIN_MRC_IDX,
3140 * up to max 400.
3141 *
3142 * 2) If current value in HD_AUTO_CORR40_X4_TH_ADD_MIN_IDX is < 160,
3143 * sensitivity has been reduced a significant amount; bring it up to
3144 * a moderate 161. Otherwise, *add* 3, up to max 200.
3145 *
3146 * 3) a) If current value in HD_AUTO_CORR40_X4_TH_ADD_MIN_IDX is > 160,
3147 * sensitivity has been reduced only a moderate or small amount;
3148 * *subtract* 2 from value in HD_MIN_ENERGY_CCK_DET_IDX,
3149 * down to min 0. Otherwise (if gain has been significantly reduced),
3150 * don't change the HD_MIN_ENERGY_CCK_DET_IDX value.
3151 *
3152 * b) Save a snapshot of the "silence reference".
3153 *
3154 * If actual rate of CCK false alarms (+ plcp_errors) is too low
3155 * (less than 5 for each 204.8 msecs listening), method for increasing
3156 * sensitivity is used only if:
3157 *
3158 * 1a) Previous beacon did not have too many false alarms
3159 * 1b) AND difference between previous "silence reference" and current
3160 * "silence reference" (prev - current) is 2 or more,
3161 * OR 2) 100 or more consecutive beacon periods have had rate of
3162 * less than 5 false alarms per 204.8 milliseconds rx time.
3163 *
3164 * Method for increasing sensitivity:
3165 *
3166 * 1) *Subtract* 3 from value in HD_AUTO_CORR40_X4_TH_ADD_MIN_IDX,
3167 * down to min 125.
3168 *
3169 * 2) *Subtract* 3 from value in HD_AUTO_CORR40_X4_TH_ADD_MIN_MRC_IDX,
3170 * down to min 200.
3171 *
3172 * 3) *Add* 2 to value in HD_MIN_ENERGY_CCK_DET_IDX, up to max 100.
3173 *
3174 * If actual rate of CCK false alarms (+ plcp_errors) is within good range
3175 * (between 5 and 50 for each 204.8 msecs listening):
3176 *
3177 * 1) Save a snapshot of the silence reference.
3178 *
3179 * 2) If previous beacon had too many CCK false alarms (+ plcp_errors),
3180 * give some extra margin to energy threshold by *subtracting* 8
3181 * from value in HD_MIN_ENERGY_CCK_DET_IDX.
3182 *
3183 * For all cases (too few, too many, good range), make sure that the CCK
3184 * detection threshold (energy) is below the energy level for robust
3185 * detection over the past 10 beacon periods, the "Max cck energy".
3186 * Lower values mean higher energy; this means making sure that the value
3187 * in HD_MIN_ENERGY_CCK_DET_IDX is at or *above* "Max cck energy".
3188 *
3189 */
3190
3191/*
3192 * Table entries in C_SENSITIVITY (struct il_sensitivity_cmd)
3193 */
3194#define HD_TBL_SIZE (11) /* number of entries */
3195#define HD_MIN_ENERGY_CCK_DET_IDX (0) /* table idxes */
3196#define HD_MIN_ENERGY_OFDM_DET_IDX (1)
3197#define HD_AUTO_CORR32_X1_TH_ADD_MIN_IDX (2)
3198#define HD_AUTO_CORR32_X1_TH_ADD_MIN_MRC_IDX (3)
3199#define HD_AUTO_CORR40_X4_TH_ADD_MIN_MRC_IDX (4)
3200#define HD_AUTO_CORR32_X4_TH_ADD_MIN_IDX (5)
3201#define HD_AUTO_CORR32_X4_TH_ADD_MIN_MRC_IDX (6)
3202#define HD_BARKER_CORR_TH_ADD_MIN_IDX (7)
3203#define HD_BARKER_CORR_TH_ADD_MIN_MRC_IDX (8)
3204#define HD_AUTO_CORR40_X4_TH_ADD_MIN_IDX (9)
3205#define HD_OFDM_ENERGY_TH_IN_IDX (10)
3206
3207/* Control field in struct il_sensitivity_cmd */
3208#define C_SENSITIVITY_CONTROL_DEFAULT_TBL cpu_to_le16(0)
3209#define C_SENSITIVITY_CONTROL_WORK_TBL cpu_to_le16(1)
3210
3211/**
3212 * struct il_sensitivity_cmd
3213 * @control: (1) updates working table, (0) updates default table
3214 * @table: energy threshold values, use HD_* as idx into table
3215 *
3216 * Always use "1" in "control" to update uCode's working table and DSP.
3217 */
3218struct il_sensitivity_cmd {
3219 __le16 control; /* always use "1" */
3220 __le16 table[HD_TBL_SIZE]; /* use HD_* as idx */
3221} __packed;
3222
3223/**
3224 * C_PHY_CALIBRATION = 0xb0 (command, has simple generic response)
3225 *
3226 * This command sets the relative gains of 4965 device's 3 radio receiver chains.
3227 *
3228 * After the first association, driver should accumulate signal and noise
3229 * stats from the N_STATSs that follow the first 20
3230 * beacons from the associated network (don't collect stats that come
3231 * in from scanning, or any other non-network source).
3232 *
3233 * DISCONNECTED ANTENNA:
3234 *
3235 * Driver should determine which antennas are actually connected, by comparing
3236 * average beacon signal levels for the 3 Rx chains. Accumulate (add) the
3237 * following values over 20 beacons, one accumulator for each of the chains
3238 * a/b/c, from struct stats_rx_non_phy:
3239 *
3240 * beacon_rssi_[abc] & 0x0FF (unsigned, units in dB)
3241 *
3242 * Find the strongest signal from among a/b/c. Compare the other two to the
3243 * strongest. If any signal is more than 15 dB (times 20, unless you
3244 * divide the accumulated values by 20) below the strongest, the driver
3245 * considers that antenna to be disconnected, and should not try to use that
3246 * antenna/chain for Rx or Tx. If both A and B seem to be disconnected,
3247 * driver should declare the stronger one as connected, and attempt to use it
3248 * (A and B are the only 2 Tx chains!).
3249 *
3250 *
3251 * RX BALANCE:
3252 *
3253 * Driver should balance the 3 receivers (but just the ones that are connected
3254 * to antennas, see above) for gain, by comparing the average signal levels
3255 * detected during the silence after each beacon (background noise).
3256 * Accumulate (add) the following values over 20 beacons, one accumulator for
3257 * each of the chains a/b/c, from struct stats_rx_non_phy:
3258 *
3259 * beacon_silence_rssi_[abc] & 0x0FF (unsigned, units in dB)
3260 *
3261 * Find the weakest background noise level from among a/b/c. This Rx chain
3262 * will be the reference, with 0 gain adjustment. Attenuate other channels by
3263 * finding noise difference:
3264 *
3265 * (accum_noise[i] - accum_noise[reference]) / 30
3266 *
3267 * The "30" adjusts the dB in the 20 accumulated samples to units of 1.5 dB.
3268 * For use in diff_gain_[abc] fields of struct il_calibration_cmd, the
3269 * driver should limit the difference results to a range of 0-3 (0-4.5 dB),
3270 * and set bit 2 to indicate "reduce gain". The value for the reference
3271 * (weakest) chain should be "0".
3272 *
3273 * diff_gain_[abc] bit fields:
3274 * 2: (1) reduce gain, (0) increase gain
3275 * 1-0: amount of gain, units of 1.5 dB
3276 */
3277
3278/* Phy calibration command for series */
3279/* The default calibrate table size if not specified by firmware */
3280#define IL_DEFAULT_STANDARD_PHY_CALIBRATE_TBL_SIZE 18
3281enum {
3282 IL_PHY_CALIBRATE_DIFF_GAIN_CMD = 7,
3283 IL_MAX_STANDARD_PHY_CALIBRATE_TBL_SIZE = 19,
3284};
3285
3286#define IL_MAX_PHY_CALIBRATE_TBL_SIZE (253)
3287
3288struct il_calib_hdr {
3289 u8 op_code;
3290 u8 first_group;
3291 u8 groups_num;
3292 u8 data_valid;
3293} __packed;
3294
3295/* IL_PHY_CALIBRATE_DIFF_GAIN_CMD (7) */
3296struct il_calib_diff_gain_cmd {
3297 struct il_calib_hdr hdr;
3298 s8 diff_gain_a; /* see above */
3299 s8 diff_gain_b;
3300 s8 diff_gain_c;
3301 u8 reserved1;
3302} __packed;
3303
3304/******************************************************************************
3305 * (12)
3306 * Miscellaneous Commands:
3307 *
3308 *****************************************************************************/
3309
3310/*
3311 * LEDs Command & Response
3312 * C_LEDS = 0x48 (command, has simple generic response)
3313 *
3314 * For each of 3 possible LEDs (Activity/Link/Tech, selected by "id" field),
3315 * this command turns it on or off, or sets up a periodic blinking cycle.
3316 */
3317struct il_led_cmd {
3318 __le32 interval; /* "interval" in uSec */
3319 u8 id; /* 1: Activity, 2: Link, 3: Tech */
3320 u8 off; /* # intervals off while blinking;
3321 * "0", with >0 "on" value, turns LED on */
3322 u8 on; /* # intervals on while blinking;
3323 * "0", regardless of "off", turns LED off */
3324 u8 reserved;
3325} __packed;
3326
3327/******************************************************************************
3328 * (13)
3329 * Union of all expected notifications/responses:
3330 *
3331 *****************************************************************************/
3332
3333#define IL_RX_FRAME_SIZE_MSK 0x00003fff
3334
3335struct il_rx_pkt {
3336 /*
3337 * The first 4 bytes of the RX frame header contain both the RX frame
3338 * size and some flags.
3339 * Bit fields:
3340 * 31: flag flush RB request
3341 * 30: flag ignore TC (terminal counter) request
3342 * 29: flag fast IRQ request
3343 * 28-14: Reserved
3344 * 13-00: RX frame size
3345 */
3346 __le32 len_n_flags;
3347 struct il_cmd_header hdr;
3348 union {
3349 struct il3945_rx_frame rx_frame;
3350 struct il3945_tx_resp tx_resp;
3351 struct il3945_beacon_notif beacon_status;
3352
3353 struct il_alive_resp alive_frame;
3354 struct il_spectrum_notification spectrum_notif;
3355 struct il_csa_notification csa_notif;
3356 struct il_error_resp err_resp;
3357 struct il_card_state_notif card_state_notif;
3358 struct il_add_sta_resp add_sta;
3359 struct il_rem_sta_resp rem_sta;
3360 struct il_sleep_notification sleep_notif;
3361 struct il_spectrum_resp spectrum;
3362 struct il_notif_stats stats;
3363 struct il_compressed_ba_resp compressed_ba;
3364 struct il_missed_beacon_notif missed_beacon;
3365 __le32 status;
3366 u8 raw[0];
3367 } u;
3368} __packed;
3369
3370#endif /* __il_commands_h__ */
diff --git a/drivers/net/wireless/intel/iwlegacy/common.c b/drivers/net/wireless/intel/iwlegacy/common.c
new file mode 100644
index 000000000000..887114582583
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlegacy/common.c
@@ -0,0 +1,5586 @@
1/******************************************************************************
2 *
3 * GPL LICENSE SUMMARY
4 *
5 * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of version 2 of the GNU General Public License as
9 * published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful, but
12 * WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
19 * USA
20 *
21 * The full GNU General Public License is included in this distribution
22 * in the file called LICENSE.GPL.
23 *
24 * Contact Information:
25 * Intel Linux Wireless <ilw@linux.intel.com>
26 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
27 *****************************************************************************/
28
29#include <linux/kernel.h>
30#include <linux/module.h>
31#include <linux/etherdevice.h>
32#include <linux/sched.h>
33#include <linux/slab.h>
34#include <linux/types.h>
35#include <linux/lockdep.h>
36#include <linux/pci.h>
37#include <linux/dma-mapping.h>
38#include <linux/delay.h>
39#include <linux/skbuff.h>
40#include <net/mac80211.h>
41
42#include "common.h"
43
44int
45_il_poll_bit(struct il_priv *il, u32 addr, u32 bits, u32 mask, int timeout)
46{
47 const int interval = 10; /* microseconds */
48 int t = 0;
49
50 do {
51 if ((_il_rd(il, addr) & mask) == (bits & mask))
52 return t;
53 udelay(interval);
54 t += interval;
55 } while (t < timeout);
56
57 return -ETIMEDOUT;
58}
59EXPORT_SYMBOL(_il_poll_bit);
60
61void
62il_set_bit(struct il_priv *p, u32 r, u32 m)
63{
64 unsigned long reg_flags;
65
66 spin_lock_irqsave(&p->reg_lock, reg_flags);
67 _il_set_bit(p, r, m);
68 spin_unlock_irqrestore(&p->reg_lock, reg_flags);
69}
70EXPORT_SYMBOL(il_set_bit);
71
72void
73il_clear_bit(struct il_priv *p, u32 r, u32 m)
74{
75 unsigned long reg_flags;
76
77 spin_lock_irqsave(&p->reg_lock, reg_flags);
78 _il_clear_bit(p, r, m);
79 spin_unlock_irqrestore(&p->reg_lock, reg_flags);
80}
81EXPORT_SYMBOL(il_clear_bit);
82
83bool
84_il_grab_nic_access(struct il_priv *il)
85{
86 int ret;
87 u32 val;
88
89 /* this bit wakes up the NIC */
90 _il_set_bit(il, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
91
92 /*
93 * These bits say the device is running, and should keep running for
94 * at least a short while (at least as long as MAC_ACCESS_REQ stays 1),
95 * but they do not indicate that embedded SRAM is restored yet;
96 * 3945 and 4965 have volatile SRAM, and must save/restore contents
97 * to/from host DRAM when sleeping/waking for power-saving.
98 * Each direction takes approximately 1/4 millisecond; with this
99 * overhead, it's a good idea to grab and hold MAC_ACCESS_REQUEST if a
100 * series of register accesses are expected (e.g. reading Event Log),
101 * to keep device from sleeping.
102 *
103 * CSR_UCODE_DRV_GP1 register bit MAC_SLEEP == 0 indicates that
104 * SRAM is okay/restored. We don't check that here because this call
105 * is just for hardware register access; but GP1 MAC_SLEEP check is a
106 * good idea before accessing 3945/4965 SRAM (e.g. reading Event Log).
107 *
108 */
109 ret =
110 _il_poll_bit(il, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_VAL_MAC_ACCESS_EN,
111 (CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY |
112 CSR_GP_CNTRL_REG_FLAG_GOING_TO_SLEEP), 15000);
113 if (unlikely(ret < 0)) {
114 val = _il_rd(il, CSR_GP_CNTRL);
115 WARN_ONCE(1, "Timeout waiting for ucode processor access "
116 "(CSR_GP_CNTRL 0x%08x)\n", val);
117 _il_wr(il, CSR_RESET, CSR_RESET_REG_FLAG_FORCE_NMI);
118 return false;
119 }
120
121 return true;
122}
123EXPORT_SYMBOL_GPL(_il_grab_nic_access);
124
125int
126il_poll_bit(struct il_priv *il, u32 addr, u32 mask, int timeout)
127{
128 const int interval = 10; /* microseconds */
129 int t = 0;
130
131 do {
132 if ((il_rd(il, addr) & mask) == mask)
133 return t;
134 udelay(interval);
135 t += interval;
136 } while (t < timeout);
137
138 return -ETIMEDOUT;
139}
140EXPORT_SYMBOL(il_poll_bit);
141
142u32
143il_rd_prph(struct il_priv *il, u32 reg)
144{
145 unsigned long reg_flags;
146 u32 val;
147
148 spin_lock_irqsave(&il->reg_lock, reg_flags);
149 _il_grab_nic_access(il);
150 val = _il_rd_prph(il, reg);
151 _il_release_nic_access(il);
152 spin_unlock_irqrestore(&il->reg_lock, reg_flags);
153 return val;
154}
155EXPORT_SYMBOL(il_rd_prph);
156
157void
158il_wr_prph(struct il_priv *il, u32 addr, u32 val)
159{
160 unsigned long reg_flags;
161
162 spin_lock_irqsave(&il->reg_lock, reg_flags);
163 if (likely(_il_grab_nic_access(il))) {
164 _il_wr_prph(il, addr, val);
165 _il_release_nic_access(il);
166 }
167 spin_unlock_irqrestore(&il->reg_lock, reg_flags);
168}
169EXPORT_SYMBOL(il_wr_prph);
170
171u32
172il_read_targ_mem(struct il_priv *il, u32 addr)
173{
174 unsigned long reg_flags;
175 u32 value;
176
177 spin_lock_irqsave(&il->reg_lock, reg_flags);
178 _il_grab_nic_access(il);
179
180 _il_wr(il, HBUS_TARG_MEM_RADDR, addr);
181 value = _il_rd(il, HBUS_TARG_MEM_RDAT);
182
183 _il_release_nic_access(il);
184 spin_unlock_irqrestore(&il->reg_lock, reg_flags);
185 return value;
186}
187EXPORT_SYMBOL(il_read_targ_mem);
188
189void
190il_write_targ_mem(struct il_priv *il, u32 addr, u32 val)
191{
192 unsigned long reg_flags;
193
194 spin_lock_irqsave(&il->reg_lock, reg_flags);
195 if (likely(_il_grab_nic_access(il))) {
196 _il_wr(il, HBUS_TARG_MEM_WADDR, addr);
197 _il_wr(il, HBUS_TARG_MEM_WDAT, val);
198 _il_release_nic_access(il);
199 }
200 spin_unlock_irqrestore(&il->reg_lock, reg_flags);
201}
202EXPORT_SYMBOL(il_write_targ_mem);
203
204const char *
205il_get_cmd_string(u8 cmd)
206{
207 switch (cmd) {
208 IL_CMD(N_ALIVE);
209 IL_CMD(N_ERROR);
210 IL_CMD(C_RXON);
211 IL_CMD(C_RXON_ASSOC);
212 IL_CMD(C_QOS_PARAM);
213 IL_CMD(C_RXON_TIMING);
214 IL_CMD(C_ADD_STA);
215 IL_CMD(C_REM_STA);
216 IL_CMD(C_WEPKEY);
217 IL_CMD(N_3945_RX);
218 IL_CMD(C_TX);
219 IL_CMD(C_RATE_SCALE);
220 IL_CMD(C_LEDS);
221 IL_CMD(C_TX_LINK_QUALITY_CMD);
222 IL_CMD(C_CHANNEL_SWITCH);
223 IL_CMD(N_CHANNEL_SWITCH);
224 IL_CMD(C_SPECTRUM_MEASUREMENT);
225 IL_CMD(N_SPECTRUM_MEASUREMENT);
226 IL_CMD(C_POWER_TBL);
227 IL_CMD(N_PM_SLEEP);
228 IL_CMD(N_PM_DEBUG_STATS);
229 IL_CMD(C_SCAN);
230 IL_CMD(C_SCAN_ABORT);
231 IL_CMD(N_SCAN_START);
232 IL_CMD(N_SCAN_RESULTS);
233 IL_CMD(N_SCAN_COMPLETE);
234 IL_CMD(N_BEACON);
235 IL_CMD(C_TX_BEACON);
236 IL_CMD(C_TX_PWR_TBL);
237 IL_CMD(C_BT_CONFIG);
238 IL_CMD(C_STATS);
239 IL_CMD(N_STATS);
240 IL_CMD(N_CARD_STATE);
241 IL_CMD(N_MISSED_BEACONS);
242 IL_CMD(C_CT_KILL_CONFIG);
243 IL_CMD(C_SENSITIVITY);
244 IL_CMD(C_PHY_CALIBRATION);
245 IL_CMD(N_RX_PHY);
246 IL_CMD(N_RX_MPDU);
247 IL_CMD(N_RX);
248 IL_CMD(N_COMPRESSED_BA);
249 default:
250 return "UNKNOWN";
251
252 }
253}
254EXPORT_SYMBOL(il_get_cmd_string);
255
256#define HOST_COMPLETE_TIMEOUT (HZ / 2)
257
258static void
259il_generic_cmd_callback(struct il_priv *il, struct il_device_cmd *cmd,
260 struct il_rx_pkt *pkt)
261{
262 if (pkt->hdr.flags & IL_CMD_FAILED_MSK) {
263 IL_ERR("Bad return from %s (0x%08X)\n",
264 il_get_cmd_string(cmd->hdr.cmd), pkt->hdr.flags);
265 return;
266 }
267#ifdef CONFIG_IWLEGACY_DEBUG
268 switch (cmd->hdr.cmd) {
269 case C_TX_LINK_QUALITY_CMD:
270 case C_SENSITIVITY:
271 D_HC_DUMP("back from %s (0x%08X)\n",
272 il_get_cmd_string(cmd->hdr.cmd), pkt->hdr.flags);
273 break;
274 default:
275 D_HC("back from %s (0x%08X)\n", il_get_cmd_string(cmd->hdr.cmd),
276 pkt->hdr.flags);
277 }
278#endif
279}
280
281static int
282il_send_cmd_async(struct il_priv *il, struct il_host_cmd *cmd)
283{
284 int ret;
285
286 BUG_ON(!(cmd->flags & CMD_ASYNC));
287
288 /* An asynchronous command can not expect an SKB to be set. */
289 BUG_ON(cmd->flags & CMD_WANT_SKB);
290
291 /* Assign a generic callback if one is not provided */
292 if (!cmd->callback)
293 cmd->callback = il_generic_cmd_callback;
294
295 if (test_bit(S_EXIT_PENDING, &il->status))
296 return -EBUSY;
297
298 ret = il_enqueue_hcmd(il, cmd);
299 if (ret < 0) {
300 IL_ERR("Error sending %s: enqueue_hcmd failed: %d\n",
301 il_get_cmd_string(cmd->id), ret);
302 return ret;
303 }
304 return 0;
305}
306
307int
308il_send_cmd_sync(struct il_priv *il, struct il_host_cmd *cmd)
309{
310 int cmd_idx;
311 int ret;
312
313 lockdep_assert_held(&il->mutex);
314
315 BUG_ON(cmd->flags & CMD_ASYNC);
316
317 /* A synchronous command can not have a callback set. */
318 BUG_ON(cmd->callback);
319
320 D_INFO("Attempting to send sync command %s\n",
321 il_get_cmd_string(cmd->id));
322
323 set_bit(S_HCMD_ACTIVE, &il->status);
324 D_INFO("Setting HCMD_ACTIVE for command %s\n",
325 il_get_cmd_string(cmd->id));
326
327 cmd_idx = il_enqueue_hcmd(il, cmd);
328 if (cmd_idx < 0) {
329 ret = cmd_idx;
330 IL_ERR("Error sending %s: enqueue_hcmd failed: %d\n",
331 il_get_cmd_string(cmd->id), ret);
332 goto out;
333 }
334
335 ret = wait_event_timeout(il->wait_command_queue,
336 !test_bit(S_HCMD_ACTIVE, &il->status),
337 HOST_COMPLETE_TIMEOUT);
338 if (!ret) {
339 if (test_bit(S_HCMD_ACTIVE, &il->status)) {
340 IL_ERR("Error sending %s: time out after %dms.\n",
341 il_get_cmd_string(cmd->id),
342 jiffies_to_msecs(HOST_COMPLETE_TIMEOUT));
343
344 clear_bit(S_HCMD_ACTIVE, &il->status);
345 D_INFO("Clearing HCMD_ACTIVE for command %s\n",
346 il_get_cmd_string(cmd->id));
347 ret = -ETIMEDOUT;
348 goto cancel;
349 }
350 }
351
352 if (test_bit(S_RFKILL, &il->status)) {
353 IL_ERR("Command %s aborted: RF KILL Switch\n",
354 il_get_cmd_string(cmd->id));
355 ret = -ECANCELED;
356 goto fail;
357 }
358 if (test_bit(S_FW_ERROR, &il->status)) {
359 IL_ERR("Command %s failed: FW Error\n",
360 il_get_cmd_string(cmd->id));
361 ret = -EIO;
362 goto fail;
363 }
364 if ((cmd->flags & CMD_WANT_SKB) && !cmd->reply_page) {
365 IL_ERR("Error: Response NULL in '%s'\n",
366 il_get_cmd_string(cmd->id));
367 ret = -EIO;
368 goto cancel;
369 }
370
371 ret = 0;
372 goto out;
373
374cancel:
375 if (cmd->flags & CMD_WANT_SKB) {
376 /*
377 * Cancel the CMD_WANT_SKB flag for the cmd in the
378 * TX cmd queue. Otherwise in case the cmd comes
379 * in later, it will possibly set an invalid
380 * address (cmd->meta.source).
381 */
382 il->txq[il->cmd_queue].meta[cmd_idx].flags &= ~CMD_WANT_SKB;
383 }
384fail:
385 if (cmd->reply_page) {
386 il_free_pages(il, cmd->reply_page);
387 cmd->reply_page = 0;
388 }
389out:
390 return ret;
391}
392EXPORT_SYMBOL(il_send_cmd_sync);
393
394int
395il_send_cmd(struct il_priv *il, struct il_host_cmd *cmd)
396{
397 if (cmd->flags & CMD_ASYNC)
398 return il_send_cmd_async(il, cmd);
399
400 return il_send_cmd_sync(il, cmd);
401}
402EXPORT_SYMBOL(il_send_cmd);
403
404int
405il_send_cmd_pdu(struct il_priv *il, u8 id, u16 len, const void *data)
406{
407 struct il_host_cmd cmd = {
408 .id = id,
409 .len = len,
410 .data = data,
411 };
412
413 return il_send_cmd_sync(il, &cmd);
414}
415EXPORT_SYMBOL(il_send_cmd_pdu);
416
417int
418il_send_cmd_pdu_async(struct il_priv *il, u8 id, u16 len, const void *data,
419 void (*callback) (struct il_priv *il,
420 struct il_device_cmd *cmd,
421 struct il_rx_pkt *pkt))
422{
423 struct il_host_cmd cmd = {
424 .id = id,
425 .len = len,
426 .data = data,
427 };
428
429 cmd.flags |= CMD_ASYNC;
430 cmd.callback = callback;
431
432 return il_send_cmd_async(il, &cmd);
433}
434EXPORT_SYMBOL(il_send_cmd_pdu_async);
435
436/* default: IL_LED_BLINK(0) using blinking idx table */
437static int led_mode;
438module_param(led_mode, int, S_IRUGO);
439MODULE_PARM_DESC(led_mode,
440 "0=system default, " "1=On(RF On)/Off(RF Off), 2=blinking");
441
442/* Throughput OFF time(ms) ON time (ms)
443 * >300 25 25
444 * >200 to 300 40 40
445 * >100 to 200 55 55
446 * >70 to 100 65 65
447 * >50 to 70 75 75
448 * >20 to 50 85 85
449 * >10 to 20 95 95
450 * >5 to 10 110 110
451 * >1 to 5 130 130
452 * >0 to 1 167 167
453 * <=0 SOLID ON
454 */
455static const struct ieee80211_tpt_blink il_blink[] = {
456 {.throughput = 0, .blink_time = 334},
457 {.throughput = 1 * 1024 - 1, .blink_time = 260},
458 {.throughput = 5 * 1024 - 1, .blink_time = 220},
459 {.throughput = 10 * 1024 - 1, .blink_time = 190},
460 {.throughput = 20 * 1024 - 1, .blink_time = 170},
461 {.throughput = 50 * 1024 - 1, .blink_time = 150},
462 {.throughput = 70 * 1024 - 1, .blink_time = 130},
463 {.throughput = 100 * 1024 - 1, .blink_time = 110},
464 {.throughput = 200 * 1024 - 1, .blink_time = 80},
465 {.throughput = 300 * 1024 - 1, .blink_time = 50},
466};
467
468/*
469 * Adjust led blink rate to compensate on a MAC Clock difference on every HW
470 * Led blink rate analysis showed an average deviation of 0% on 3945,
471 * 5% on 4965 HW.
472 * Need to compensate on the led on/off time per HW according to the deviation
473 * to achieve the desired led frequency
474 * The calculation is: (100-averageDeviation)/100 * blinkTime
475 * For code efficiency the calculation will be:
476 * compensation = (100 - averageDeviation) * 64 / 100
477 * NewBlinkTime = (compensation * BlinkTime) / 64
478 */
479static inline u8
480il_blink_compensation(struct il_priv *il, u8 time, u16 compensation)
481{
482 if (!compensation) {
483 IL_ERR("undefined blink compensation: "
484 "use pre-defined blinking time\n");
485 return time;
486 }
487
488 return (u8) ((time * compensation) >> 6);
489}
490
491/* Set led pattern command */
492static int
493il_led_cmd(struct il_priv *il, unsigned long on, unsigned long off)
494{
495 struct il_led_cmd led_cmd = {
496 .id = IL_LED_LINK,
497 .interval = IL_DEF_LED_INTRVL
498 };
499 int ret;
500
501 if (!test_bit(S_READY, &il->status))
502 return -EBUSY;
503
504 if (il->blink_on == on && il->blink_off == off)
505 return 0;
506
507 if (off == 0) {
508 /* led is SOLID_ON */
509 on = IL_LED_SOLID;
510 }
511
512 D_LED("Led blink time compensation=%u\n",
513 il->cfg->led_compensation);
514 led_cmd.on =
515 il_blink_compensation(il, on,
516 il->cfg->led_compensation);
517 led_cmd.off =
518 il_blink_compensation(il, off,
519 il->cfg->led_compensation);
520
521 ret = il->ops->send_led_cmd(il, &led_cmd);
522 if (!ret) {
523 il->blink_on = on;
524 il->blink_off = off;
525 }
526 return ret;
527}
528
529static void
530il_led_brightness_set(struct led_classdev *led_cdev,
531 enum led_brightness brightness)
532{
533 struct il_priv *il = container_of(led_cdev, struct il_priv, led);
534 unsigned long on = 0;
535
536 if (brightness > 0)
537 on = IL_LED_SOLID;
538
539 il_led_cmd(il, on, 0);
540}
541
542static int
543il_led_blink_set(struct led_classdev *led_cdev, unsigned long *delay_on,
544 unsigned long *delay_off)
545{
546 struct il_priv *il = container_of(led_cdev, struct il_priv, led);
547
548 return il_led_cmd(il, *delay_on, *delay_off);
549}
550
551void
552il_leds_init(struct il_priv *il)
553{
554 int mode = led_mode;
555 int ret;
556
557 if (mode == IL_LED_DEFAULT)
558 mode = il->cfg->led_mode;
559
560 il->led.name =
561 kasprintf(GFP_KERNEL, "%s-led", wiphy_name(il->hw->wiphy));
562 il->led.brightness_set = il_led_brightness_set;
563 il->led.blink_set = il_led_blink_set;
564 il->led.max_brightness = 1;
565
566 switch (mode) {
567 case IL_LED_DEFAULT:
568 WARN_ON(1);
569 break;
570 case IL_LED_BLINK:
571 il->led.default_trigger =
572 ieee80211_create_tpt_led_trigger(il->hw,
573 IEEE80211_TPT_LEDTRIG_FL_CONNECTED,
574 il_blink,
575 ARRAY_SIZE(il_blink));
576 break;
577 case IL_LED_RF_STATE:
578 il->led.default_trigger = ieee80211_get_radio_led_name(il->hw);
579 break;
580 }
581
582 ret = led_classdev_register(&il->pci_dev->dev, &il->led);
583 if (ret) {
584 kfree(il->led.name);
585 return;
586 }
587
588 il->led_registered = true;
589}
590EXPORT_SYMBOL(il_leds_init);
591
592void
593il_leds_exit(struct il_priv *il)
594{
595 if (!il->led_registered)
596 return;
597
598 led_classdev_unregister(&il->led);
599 kfree(il->led.name);
600}
601EXPORT_SYMBOL(il_leds_exit);
602
603/************************** EEPROM BANDS ****************************
604 *
605 * The il_eeprom_band definitions below provide the mapping from the
606 * EEPROM contents to the specific channel number supported for each
607 * band.
608 *
609 * For example, il_priv->eeprom.band_3_channels[4] from the band_3
610 * definition below maps to physical channel 42 in the 5.2GHz spectrum.
611 * The specific geography and calibration information for that channel
612 * is contained in the eeprom map itself.
613 *
614 * During init, we copy the eeprom information and channel map
615 * information into il->channel_info_24/52 and il->channel_map_24/52
616 *
617 * channel_map_24/52 provides the idx in the channel_info array for a
618 * given channel. We have to have two separate maps as there is channel
619 * overlap with the 2.4GHz and 5.2GHz spectrum as seen in band_1 and
620 * band_2
621 *
622 * A value of 0xff stored in the channel_map indicates that the channel
623 * is not supported by the hardware at all.
624 *
625 * A value of 0xfe in the channel_map indicates that the channel is not
626 * valid for Tx with the current hardware. This means that
627 * while the system can tune and receive on a given channel, it may not
628 * be able to associate or transmit any frames on that
629 * channel. There is no corresponding channel information for that
630 * entry.
631 *
632 *********************************************************************/
633
634/* 2.4 GHz */
635const u8 il_eeprom_band_1[14] = {
636 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14
637};
638
639/* 5.2 GHz bands */
640static const u8 il_eeprom_band_2[] = { /* 4915-5080MHz */
641 183, 184, 185, 187, 188, 189, 192, 196, 7, 8, 11, 12, 16
642};
643
644static const u8 il_eeprom_band_3[] = { /* 5170-5320MHz */
645 34, 36, 38, 40, 42, 44, 46, 48, 52, 56, 60, 64
646};
647
648static const u8 il_eeprom_band_4[] = { /* 5500-5700MHz */
649 100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140
650};
651
652static const u8 il_eeprom_band_5[] = { /* 5725-5825MHz */
653 145, 149, 153, 157, 161, 165
654};
655
656static const u8 il_eeprom_band_6[] = { /* 2.4 ht40 channel */
657 1, 2, 3, 4, 5, 6, 7
658};
659
660static const u8 il_eeprom_band_7[] = { /* 5.2 ht40 channel */
661 36, 44, 52, 60, 100, 108, 116, 124, 132, 149, 157
662};
663
664/******************************************************************************
665 *
666 * EEPROM related functions
667 *
668******************************************************************************/
669
670static int
671il_eeprom_verify_signature(struct il_priv *il)
672{
673 u32 gp = _il_rd(il, CSR_EEPROM_GP) & CSR_EEPROM_GP_VALID_MSK;
674 int ret = 0;
675
676 D_EEPROM("EEPROM signature=0x%08x\n", gp);
677 switch (gp) {
678 case CSR_EEPROM_GP_GOOD_SIG_EEP_LESS_THAN_4K:
679 case CSR_EEPROM_GP_GOOD_SIG_EEP_MORE_THAN_4K:
680 break;
681 default:
682 IL_ERR("bad EEPROM signature," "EEPROM_GP=0x%08x\n", gp);
683 ret = -ENOENT;
684 break;
685 }
686 return ret;
687}
688
689const u8 *
690il_eeprom_query_addr(const struct il_priv *il, size_t offset)
691{
692 BUG_ON(offset >= il->cfg->eeprom_size);
693 return &il->eeprom[offset];
694}
695EXPORT_SYMBOL(il_eeprom_query_addr);
696
697u16
698il_eeprom_query16(const struct il_priv *il, size_t offset)
699{
700 if (!il->eeprom)
701 return 0;
702 return (u16) il->eeprom[offset] | ((u16) il->eeprom[offset + 1] << 8);
703}
704EXPORT_SYMBOL(il_eeprom_query16);
705
706/**
707 * il_eeprom_init - read EEPROM contents
708 *
709 * Load the EEPROM contents from adapter into il->eeprom
710 *
711 * NOTE: This routine uses the non-debug IO access functions.
712 */
713int
714il_eeprom_init(struct il_priv *il)
715{
716 __le16 *e;
717 u32 gp = _il_rd(il, CSR_EEPROM_GP);
718 int sz;
719 int ret;
720 u16 addr;
721
722 /* allocate eeprom */
723 sz = il->cfg->eeprom_size;
724 D_EEPROM("NVM size = %d\n", sz);
725 il->eeprom = kzalloc(sz, GFP_KERNEL);
726 if (!il->eeprom) {
727 ret = -ENOMEM;
728 goto alloc_err;
729 }
730 e = (__le16 *) il->eeprom;
731
732 il->ops->apm_init(il);
733
734 ret = il_eeprom_verify_signature(il);
735 if (ret < 0) {
736 IL_ERR("EEPROM not found, EEPROM_GP=0x%08x\n", gp);
737 ret = -ENOENT;
738 goto err;
739 }
740
741 /* Make sure driver (instead of uCode) is allowed to read EEPROM */
742 ret = il->ops->eeprom_acquire_semaphore(il);
743 if (ret < 0) {
744 IL_ERR("Failed to acquire EEPROM semaphore.\n");
745 ret = -ENOENT;
746 goto err;
747 }
748
749 /* eeprom is an array of 16bit values */
750 for (addr = 0; addr < sz; addr += sizeof(u16)) {
751 u32 r;
752
753 _il_wr(il, CSR_EEPROM_REG,
754 CSR_EEPROM_REG_MSK_ADDR & (addr << 1));
755
756 ret =
757 _il_poll_bit(il, CSR_EEPROM_REG,
758 CSR_EEPROM_REG_READ_VALID_MSK,
759 CSR_EEPROM_REG_READ_VALID_MSK,
760 IL_EEPROM_ACCESS_TIMEOUT);
761 if (ret < 0) {
762 IL_ERR("Time out reading EEPROM[%d]\n", addr);
763 goto done;
764 }
765 r = _il_rd(il, CSR_EEPROM_REG);
766 e[addr / 2] = cpu_to_le16(r >> 16);
767 }
768
769 D_EEPROM("NVM Type: %s, version: 0x%x\n", "EEPROM",
770 il_eeprom_query16(il, EEPROM_VERSION));
771
772 ret = 0;
773done:
774 il->ops->eeprom_release_semaphore(il);
775
776err:
777 if (ret)
778 il_eeprom_free(il);
779 /* Reset chip to save power until we load uCode during "up". */
780 il_apm_stop(il);
781alloc_err:
782 return ret;
783}
784EXPORT_SYMBOL(il_eeprom_init);
785
786void
787il_eeprom_free(struct il_priv *il)
788{
789 kfree(il->eeprom);
790 il->eeprom = NULL;
791}
792EXPORT_SYMBOL(il_eeprom_free);
793
794static void
795il_init_band_reference(const struct il_priv *il, int eep_band,
796 int *eeprom_ch_count,
797 const struct il_eeprom_channel **eeprom_ch_info,
798 const u8 **eeprom_ch_idx)
799{
800 u32 offset = il->cfg->regulatory_bands[eep_band - 1];
801
802 switch (eep_band) {
803 case 1: /* 2.4GHz band */
804 *eeprom_ch_count = ARRAY_SIZE(il_eeprom_band_1);
805 *eeprom_ch_info =
806 (struct il_eeprom_channel *)il_eeprom_query_addr(il,
807 offset);
808 *eeprom_ch_idx = il_eeprom_band_1;
809 break;
810 case 2: /* 4.9GHz band */
811 *eeprom_ch_count = ARRAY_SIZE(il_eeprom_band_2);
812 *eeprom_ch_info =
813 (struct il_eeprom_channel *)il_eeprom_query_addr(il,
814 offset);
815 *eeprom_ch_idx = il_eeprom_band_2;
816 break;
817 case 3: /* 5.2GHz band */
818 *eeprom_ch_count = ARRAY_SIZE(il_eeprom_band_3);
819 *eeprom_ch_info =
820 (struct il_eeprom_channel *)il_eeprom_query_addr(il,
821 offset);
822 *eeprom_ch_idx = il_eeprom_band_3;
823 break;
824 case 4: /* 5.5GHz band */
825 *eeprom_ch_count = ARRAY_SIZE(il_eeprom_band_4);
826 *eeprom_ch_info =
827 (struct il_eeprom_channel *)il_eeprom_query_addr(il,
828 offset);
829 *eeprom_ch_idx = il_eeprom_band_4;
830 break;
831 case 5: /* 5.7GHz band */
832 *eeprom_ch_count = ARRAY_SIZE(il_eeprom_band_5);
833 *eeprom_ch_info =
834 (struct il_eeprom_channel *)il_eeprom_query_addr(il,
835 offset);
836 *eeprom_ch_idx = il_eeprom_band_5;
837 break;
838 case 6: /* 2.4GHz ht40 channels */
839 *eeprom_ch_count = ARRAY_SIZE(il_eeprom_band_6);
840 *eeprom_ch_info =
841 (struct il_eeprom_channel *)il_eeprom_query_addr(il,
842 offset);
843 *eeprom_ch_idx = il_eeprom_band_6;
844 break;
845 case 7: /* 5 GHz ht40 channels */
846 *eeprom_ch_count = ARRAY_SIZE(il_eeprom_band_7);
847 *eeprom_ch_info =
848 (struct il_eeprom_channel *)il_eeprom_query_addr(il,
849 offset);
850 *eeprom_ch_idx = il_eeprom_band_7;
851 break;
852 default:
853 BUG();
854 }
855}
856
857#define CHECK_AND_PRINT(x) ((eeprom_ch->flags & EEPROM_CHANNEL_##x) \
858 ? # x " " : "")
859/**
860 * il_mod_ht40_chan_info - Copy ht40 channel info into driver's il.
861 *
862 * Does not set up a command, or touch hardware.
863 */
864static int
865il_mod_ht40_chan_info(struct il_priv *il, enum ieee80211_band band, u16 channel,
866 const struct il_eeprom_channel *eeprom_ch,
867 u8 clear_ht40_extension_channel)
868{
869 struct il_channel_info *ch_info;
870
871 ch_info =
872 (struct il_channel_info *)il_get_channel_info(il, band, channel);
873
874 if (!il_is_channel_valid(ch_info))
875 return -1;
876
877 D_EEPROM("HT40 Ch. %d [%sGHz] %s%s%s%s%s(0x%02x %ddBm):"
878 " Ad-Hoc %ssupported\n", ch_info->channel,
879 il_is_channel_a_band(ch_info) ? "5.2" : "2.4",
880 CHECK_AND_PRINT(IBSS), CHECK_AND_PRINT(ACTIVE),
881 CHECK_AND_PRINT(RADAR), CHECK_AND_PRINT(WIDE),
882 CHECK_AND_PRINT(DFS), eeprom_ch->flags,
883 eeprom_ch->max_power_avg,
884 ((eeprom_ch->flags & EEPROM_CHANNEL_IBSS) &&
885 !(eeprom_ch->flags & EEPROM_CHANNEL_RADAR)) ? "" : "not ");
886
887 ch_info->ht40_eeprom = *eeprom_ch;
888 ch_info->ht40_max_power_avg = eeprom_ch->max_power_avg;
889 ch_info->ht40_flags = eeprom_ch->flags;
890 if (eeprom_ch->flags & EEPROM_CHANNEL_VALID)
891 ch_info->ht40_extension_channel &=
892 ~clear_ht40_extension_channel;
893
894 return 0;
895}
896
897#define CHECK_AND_PRINT_I(x) ((eeprom_ch_info[ch].flags & EEPROM_CHANNEL_##x) \
898 ? # x " " : "")
899
900/**
901 * il_init_channel_map - Set up driver's info for all possible channels
902 */
903int
904il_init_channel_map(struct il_priv *il)
905{
906 int eeprom_ch_count = 0;
907 const u8 *eeprom_ch_idx = NULL;
908 const struct il_eeprom_channel *eeprom_ch_info = NULL;
909 int band, ch;
910 struct il_channel_info *ch_info;
911
912 if (il->channel_count) {
913 D_EEPROM("Channel map already initialized.\n");
914 return 0;
915 }
916
917 D_EEPROM("Initializing regulatory info from EEPROM\n");
918
919 il->channel_count =
920 ARRAY_SIZE(il_eeprom_band_1) + ARRAY_SIZE(il_eeprom_band_2) +
921 ARRAY_SIZE(il_eeprom_band_3) + ARRAY_SIZE(il_eeprom_band_4) +
922 ARRAY_SIZE(il_eeprom_band_5);
923
924 D_EEPROM("Parsing data for %d channels.\n", il->channel_count);
925
926 il->channel_info =
927 kzalloc(sizeof(struct il_channel_info) * il->channel_count,
928 GFP_KERNEL);
929 if (!il->channel_info) {
930 IL_ERR("Could not allocate channel_info\n");
931 il->channel_count = 0;
932 return -ENOMEM;
933 }
934
935 ch_info = il->channel_info;
936
937 /* Loop through the 5 EEPROM bands adding them in order to the
938 * channel map we maintain (that contains additional information than
939 * what just in the EEPROM) */
940 for (band = 1; band <= 5; band++) {
941
942 il_init_band_reference(il, band, &eeprom_ch_count,
943 &eeprom_ch_info, &eeprom_ch_idx);
944
945 /* Loop through each band adding each of the channels */
946 for (ch = 0; ch < eeprom_ch_count; ch++) {
947 ch_info->channel = eeprom_ch_idx[ch];
948 ch_info->band =
949 (band ==
950 1) ? IEEE80211_BAND_2GHZ : IEEE80211_BAND_5GHZ;
951
952 /* permanently store EEPROM's channel regulatory flags
953 * and max power in channel info database. */
954 ch_info->eeprom = eeprom_ch_info[ch];
955
956 /* Copy the run-time flags so they are there even on
957 * invalid channels */
958 ch_info->flags = eeprom_ch_info[ch].flags;
959 /* First write that ht40 is not enabled, and then enable
960 * one by one */
961 ch_info->ht40_extension_channel =
962 IEEE80211_CHAN_NO_HT40;
963
964 if (!(il_is_channel_valid(ch_info))) {
965 D_EEPROM("Ch. %d Flags %x [%sGHz] - "
966 "No traffic\n", ch_info->channel,
967 ch_info->flags,
968 il_is_channel_a_band(ch_info) ? "5.2" :
969 "2.4");
970 ch_info++;
971 continue;
972 }
973
974 /* Initialize regulatory-based run-time data */
975 ch_info->max_power_avg = ch_info->curr_txpow =
976 eeprom_ch_info[ch].max_power_avg;
977 ch_info->scan_power = eeprom_ch_info[ch].max_power_avg;
978 ch_info->min_power = 0;
979
980 D_EEPROM("Ch. %d [%sGHz] " "%s%s%s%s%s%s(0x%02x %ddBm):"
981 " Ad-Hoc %ssupported\n", ch_info->channel,
982 il_is_channel_a_band(ch_info) ? "5.2" : "2.4",
983 CHECK_AND_PRINT_I(VALID),
984 CHECK_AND_PRINT_I(IBSS),
985 CHECK_AND_PRINT_I(ACTIVE),
986 CHECK_AND_PRINT_I(RADAR),
987 CHECK_AND_PRINT_I(WIDE),
988 CHECK_AND_PRINT_I(DFS),
989 eeprom_ch_info[ch].flags,
990 eeprom_ch_info[ch].max_power_avg,
991 ((eeprom_ch_info[ch].
992 flags & EEPROM_CHANNEL_IBSS) &&
993 !(eeprom_ch_info[ch].
994 flags & EEPROM_CHANNEL_RADAR)) ? "" :
995 "not ");
996
997 ch_info++;
998 }
999 }
1000
1001 /* Check if we do have HT40 channels */
1002 if (il->cfg->regulatory_bands[5] == EEPROM_REGULATORY_BAND_NO_HT40 &&
1003 il->cfg->regulatory_bands[6] == EEPROM_REGULATORY_BAND_NO_HT40)
1004 return 0;
1005
1006 /* Two additional EEPROM bands for 2.4 and 5 GHz HT40 channels */
1007 for (band = 6; band <= 7; band++) {
1008 enum ieee80211_band ieeeband;
1009
1010 il_init_band_reference(il, band, &eeprom_ch_count,
1011 &eeprom_ch_info, &eeprom_ch_idx);
1012
1013 /* EEPROM band 6 is 2.4, band 7 is 5 GHz */
1014 ieeeband =
1015 (band == 6) ? IEEE80211_BAND_2GHZ : IEEE80211_BAND_5GHZ;
1016
1017 /* Loop through each band adding each of the channels */
1018 for (ch = 0; ch < eeprom_ch_count; ch++) {
1019 /* Set up driver's info for lower half */
1020 il_mod_ht40_chan_info(il, ieeeband, eeprom_ch_idx[ch],
1021 &eeprom_ch_info[ch],
1022 IEEE80211_CHAN_NO_HT40PLUS);
1023
1024 /* Set up driver's info for upper half */
1025 il_mod_ht40_chan_info(il, ieeeband,
1026 eeprom_ch_idx[ch] + 4,
1027 &eeprom_ch_info[ch],
1028 IEEE80211_CHAN_NO_HT40MINUS);
1029 }
1030 }
1031
1032 return 0;
1033}
1034EXPORT_SYMBOL(il_init_channel_map);
1035
1036/*
1037 * il_free_channel_map - undo allocations in il_init_channel_map
1038 */
1039void
1040il_free_channel_map(struct il_priv *il)
1041{
1042 kfree(il->channel_info);
1043 il->channel_count = 0;
1044}
1045EXPORT_SYMBOL(il_free_channel_map);
1046
1047/**
1048 * il_get_channel_info - Find driver's ilate channel info
1049 *
1050 * Based on band and channel number.
1051 */
1052const struct il_channel_info *
1053il_get_channel_info(const struct il_priv *il, enum ieee80211_band band,
1054 u16 channel)
1055{
1056 int i;
1057
1058 switch (band) {
1059 case IEEE80211_BAND_5GHZ:
1060 for (i = 14; i < il->channel_count; i++) {
1061 if (il->channel_info[i].channel == channel)
1062 return &il->channel_info[i];
1063 }
1064 break;
1065 case IEEE80211_BAND_2GHZ:
1066 if (channel >= 1 && channel <= 14)
1067 return &il->channel_info[channel - 1];
1068 break;
1069 default:
1070 BUG();
1071 }
1072
1073 return NULL;
1074}
1075EXPORT_SYMBOL(il_get_channel_info);
1076
1077/*
1078 * Setting power level allows the card to go to sleep when not busy.
1079 *
1080 * We calculate a sleep command based on the required latency, which
1081 * we get from mac80211.
1082 */
1083
1084#define SLP_VEC(X0, X1, X2, X3, X4) { \
1085 cpu_to_le32(X0), \
1086 cpu_to_le32(X1), \
1087 cpu_to_le32(X2), \
1088 cpu_to_le32(X3), \
1089 cpu_to_le32(X4) \
1090}
1091
1092static void
1093il_build_powertable_cmd(struct il_priv *il, struct il_powertable_cmd *cmd)
1094{
1095 const __le32 interval[3][IL_POWER_VEC_SIZE] = {
1096 SLP_VEC(2, 2, 4, 6, 0xFF),
1097 SLP_VEC(2, 4, 7, 10, 10),
1098 SLP_VEC(4, 7, 10, 10, 0xFF)
1099 };
1100 int i, dtim_period, no_dtim;
1101 u32 max_sleep;
1102 bool skip;
1103
1104 memset(cmd, 0, sizeof(*cmd));
1105
1106 if (il->power_data.pci_pm)
1107 cmd->flags |= IL_POWER_PCI_PM_MSK;
1108
1109 /* if no Power Save, we are done */
1110 if (il->power_data.ps_disabled)
1111 return;
1112
1113 cmd->flags = IL_POWER_DRIVER_ALLOW_SLEEP_MSK;
1114 cmd->keep_alive_seconds = 0;
1115 cmd->debug_flags = 0;
1116 cmd->rx_data_timeout = cpu_to_le32(25 * 1024);
1117 cmd->tx_data_timeout = cpu_to_le32(25 * 1024);
1118 cmd->keep_alive_beacons = 0;
1119
1120 dtim_period = il->vif ? il->vif->bss_conf.dtim_period : 0;
1121
1122 if (dtim_period <= 2) {
1123 memcpy(cmd->sleep_interval, interval[0], sizeof(interval[0]));
1124 no_dtim = 2;
1125 } else if (dtim_period <= 10) {
1126 memcpy(cmd->sleep_interval, interval[1], sizeof(interval[1]));
1127 no_dtim = 2;
1128 } else {
1129 memcpy(cmd->sleep_interval, interval[2], sizeof(interval[2]));
1130 no_dtim = 0;
1131 }
1132
1133 if (dtim_period == 0) {
1134 dtim_period = 1;
1135 skip = false;
1136 } else {
1137 skip = !!no_dtim;
1138 }
1139
1140 if (skip) {
1141 __le32 tmp = cmd->sleep_interval[IL_POWER_VEC_SIZE - 1];
1142
1143 max_sleep = le32_to_cpu(tmp);
1144 if (max_sleep == 0xFF)
1145 max_sleep = dtim_period * (skip + 1);
1146 else if (max_sleep > dtim_period)
1147 max_sleep = (max_sleep / dtim_period) * dtim_period;
1148 cmd->flags |= IL_POWER_SLEEP_OVER_DTIM_MSK;
1149 } else {
1150 max_sleep = dtim_period;
1151 cmd->flags &= ~IL_POWER_SLEEP_OVER_DTIM_MSK;
1152 }
1153
1154 for (i = 0; i < IL_POWER_VEC_SIZE; i++)
1155 if (le32_to_cpu(cmd->sleep_interval[i]) > max_sleep)
1156 cmd->sleep_interval[i] = cpu_to_le32(max_sleep);
1157}
1158
1159static int
1160il_set_power(struct il_priv *il, struct il_powertable_cmd *cmd)
1161{
1162 D_POWER("Sending power/sleep command\n");
1163 D_POWER("Flags value = 0x%08X\n", cmd->flags);
1164 D_POWER("Tx timeout = %u\n", le32_to_cpu(cmd->tx_data_timeout));
1165 D_POWER("Rx timeout = %u\n", le32_to_cpu(cmd->rx_data_timeout));
1166 D_POWER("Sleep interval vector = { %d , %d , %d , %d , %d }\n",
1167 le32_to_cpu(cmd->sleep_interval[0]),
1168 le32_to_cpu(cmd->sleep_interval[1]),
1169 le32_to_cpu(cmd->sleep_interval[2]),
1170 le32_to_cpu(cmd->sleep_interval[3]),
1171 le32_to_cpu(cmd->sleep_interval[4]));
1172
1173 return il_send_cmd_pdu(il, C_POWER_TBL,
1174 sizeof(struct il_powertable_cmd), cmd);
1175}
1176
1177static int
1178il_power_set_mode(struct il_priv *il, struct il_powertable_cmd *cmd, bool force)
1179{
1180 int ret;
1181 bool update_chains;
1182
1183 lockdep_assert_held(&il->mutex);
1184
1185 /* Don't update the RX chain when chain noise calibration is running */
1186 update_chains = il->chain_noise_data.state == IL_CHAIN_NOISE_DONE ||
1187 il->chain_noise_data.state == IL_CHAIN_NOISE_ALIVE;
1188
1189 if (!memcmp(&il->power_data.sleep_cmd, cmd, sizeof(*cmd)) && !force)
1190 return 0;
1191
1192 if (!il_is_ready_rf(il))
1193 return -EIO;
1194
1195 /* scan complete use sleep_power_next, need to be updated */
1196 memcpy(&il->power_data.sleep_cmd_next, cmd, sizeof(*cmd));
1197 if (test_bit(S_SCANNING, &il->status) && !force) {
1198 D_INFO("Defer power set mode while scanning\n");
1199 return 0;
1200 }
1201
1202 if (cmd->flags & IL_POWER_DRIVER_ALLOW_SLEEP_MSK)
1203 set_bit(S_POWER_PMI, &il->status);
1204
1205 ret = il_set_power(il, cmd);
1206 if (!ret) {
1207 if (!(cmd->flags & IL_POWER_DRIVER_ALLOW_SLEEP_MSK))
1208 clear_bit(S_POWER_PMI, &il->status);
1209
1210 if (il->ops->update_chain_flags && update_chains)
1211 il->ops->update_chain_flags(il);
1212 else if (il->ops->update_chain_flags)
1213 D_POWER("Cannot update the power, chain noise "
1214 "calibration running: %d\n",
1215 il->chain_noise_data.state);
1216
1217 memcpy(&il->power_data.sleep_cmd, cmd, sizeof(*cmd));
1218 } else
1219 IL_ERR("set power fail, ret = %d", ret);
1220
1221 return ret;
1222}
1223
1224int
1225il_power_update_mode(struct il_priv *il, bool force)
1226{
1227 struct il_powertable_cmd cmd;
1228
1229 il_build_powertable_cmd(il, &cmd);
1230
1231 return il_power_set_mode(il, &cmd, force);
1232}
1233EXPORT_SYMBOL(il_power_update_mode);
1234
1235/* initialize to default */
1236void
1237il_power_initialize(struct il_priv *il)
1238{
1239 u16 lctl;
1240
1241 pcie_capability_read_word(il->pci_dev, PCI_EXP_LNKCTL, &lctl);
1242 il->power_data.pci_pm = !(lctl & PCI_EXP_LNKCTL_ASPM_L0S);
1243
1244 il->power_data.debug_sleep_level_override = -1;
1245
1246 memset(&il->power_data.sleep_cmd, 0, sizeof(il->power_data.sleep_cmd));
1247}
1248EXPORT_SYMBOL(il_power_initialize);
1249
1250/* For active scan, listen ACTIVE_DWELL_TIME (msec) on each channel after
1251 * sending probe req. This should be set long enough to hear probe responses
1252 * from more than one AP. */
1253#define IL_ACTIVE_DWELL_TIME_24 (30) /* all times in msec */
1254#define IL_ACTIVE_DWELL_TIME_52 (20)
1255
1256#define IL_ACTIVE_DWELL_FACTOR_24GHZ (3)
1257#define IL_ACTIVE_DWELL_FACTOR_52GHZ (2)
1258
1259/* For passive scan, listen PASSIVE_DWELL_TIME (msec) on each channel.
1260 * Must be set longer than active dwell time.
1261 * For the most reliable scan, set > AP beacon interval (typically 100msec). */
1262#define IL_PASSIVE_DWELL_TIME_24 (20) /* all times in msec */
1263#define IL_PASSIVE_DWELL_TIME_52 (10)
1264#define IL_PASSIVE_DWELL_BASE (100)
1265#define IL_CHANNEL_TUNE_TIME 5
1266
1267static int
1268il_send_scan_abort(struct il_priv *il)
1269{
1270 int ret;
1271 struct il_rx_pkt *pkt;
1272 struct il_host_cmd cmd = {
1273 .id = C_SCAN_ABORT,
1274 .flags = CMD_WANT_SKB,
1275 };
1276
1277 /* Exit instantly with error when device is not ready
1278 * to receive scan abort command or it does not perform
1279 * hardware scan currently */
1280 if (!test_bit(S_READY, &il->status) ||
1281 !test_bit(S_GEO_CONFIGURED, &il->status) ||
1282 !test_bit(S_SCAN_HW, &il->status) ||
1283 test_bit(S_FW_ERROR, &il->status) ||
1284 test_bit(S_EXIT_PENDING, &il->status))
1285 return -EIO;
1286
1287 ret = il_send_cmd_sync(il, &cmd);
1288 if (ret)
1289 return ret;
1290
1291 pkt = (struct il_rx_pkt *)cmd.reply_page;
1292 if (pkt->u.status != CAN_ABORT_STATUS) {
1293 /* The scan abort will return 1 for success or
1294 * 2 for "failure". A failure condition can be
1295 * due to simply not being in an active scan which
1296 * can occur if we send the scan abort before we
1297 * the microcode has notified us that a scan is
1298 * completed. */
1299 D_SCAN("SCAN_ABORT ret %d.\n", pkt->u.status);
1300 ret = -EIO;
1301 }
1302
1303 il_free_pages(il, cmd.reply_page);
1304 return ret;
1305}
1306
1307static void
1308il_complete_scan(struct il_priv *il, bool aborted)
1309{
1310 /* check if scan was requested from mac80211 */
1311 if (il->scan_request) {
1312 D_SCAN("Complete scan in mac80211\n");
1313 ieee80211_scan_completed(il->hw, aborted);
1314 }
1315
1316 il->scan_vif = NULL;
1317 il->scan_request = NULL;
1318}
1319
1320void
1321il_force_scan_end(struct il_priv *il)
1322{
1323 lockdep_assert_held(&il->mutex);
1324
1325 if (!test_bit(S_SCANNING, &il->status)) {
1326 D_SCAN("Forcing scan end while not scanning\n");
1327 return;
1328 }
1329
1330 D_SCAN("Forcing scan end\n");
1331 clear_bit(S_SCANNING, &il->status);
1332 clear_bit(S_SCAN_HW, &il->status);
1333 clear_bit(S_SCAN_ABORTING, &il->status);
1334 il_complete_scan(il, true);
1335}
1336
1337static void
1338il_do_scan_abort(struct il_priv *il)
1339{
1340 int ret;
1341
1342 lockdep_assert_held(&il->mutex);
1343
1344 if (!test_bit(S_SCANNING, &il->status)) {
1345 D_SCAN("Not performing scan to abort\n");
1346 return;
1347 }
1348
1349 if (test_and_set_bit(S_SCAN_ABORTING, &il->status)) {
1350 D_SCAN("Scan abort in progress\n");
1351 return;
1352 }
1353
1354 ret = il_send_scan_abort(il);
1355 if (ret) {
1356 D_SCAN("Send scan abort failed %d\n", ret);
1357 il_force_scan_end(il);
1358 } else
1359 D_SCAN("Successfully send scan abort\n");
1360}
1361
1362/**
1363 * il_scan_cancel - Cancel any currently executing HW scan
1364 */
1365int
1366il_scan_cancel(struct il_priv *il)
1367{
1368 D_SCAN("Queuing abort scan\n");
1369 queue_work(il->workqueue, &il->abort_scan);
1370 return 0;
1371}
1372EXPORT_SYMBOL(il_scan_cancel);
1373
1374/**
1375 * il_scan_cancel_timeout - Cancel any currently executing HW scan
1376 * @ms: amount of time to wait (in milliseconds) for scan to abort
1377 *
1378 */
1379int
1380il_scan_cancel_timeout(struct il_priv *il, unsigned long ms)
1381{
1382 unsigned long timeout = jiffies + msecs_to_jiffies(ms);
1383
1384 lockdep_assert_held(&il->mutex);
1385
1386 D_SCAN("Scan cancel timeout\n");
1387
1388 il_do_scan_abort(il);
1389
1390 while (time_before_eq(jiffies, timeout)) {
1391 if (!test_bit(S_SCAN_HW, &il->status))
1392 break;
1393 msleep(20);
1394 }
1395
1396 return test_bit(S_SCAN_HW, &il->status);
1397}
1398EXPORT_SYMBOL(il_scan_cancel_timeout);
1399
1400/* Service response to C_SCAN (0x80) */
1401static void
1402il_hdl_scan(struct il_priv *il, struct il_rx_buf *rxb)
1403{
1404#ifdef CONFIG_IWLEGACY_DEBUG
1405 struct il_rx_pkt *pkt = rxb_addr(rxb);
1406 struct il_scanreq_notification *notif =
1407 (struct il_scanreq_notification *)pkt->u.raw;
1408
1409 D_SCAN("Scan request status = 0x%x\n", notif->status);
1410#endif
1411}
1412
1413/* Service N_SCAN_START (0x82) */
1414static void
1415il_hdl_scan_start(struct il_priv *il, struct il_rx_buf *rxb)
1416{
1417 struct il_rx_pkt *pkt = rxb_addr(rxb);
1418 struct il_scanstart_notification *notif =
1419 (struct il_scanstart_notification *)pkt->u.raw;
1420 il->scan_start_tsf = le32_to_cpu(notif->tsf_low);
1421 D_SCAN("Scan start: " "%d [802.11%s] "
1422 "(TSF: 0x%08X:%08X) - %d (beacon timer %u)\n", notif->channel,
1423 notif->band ? "bg" : "a", le32_to_cpu(notif->tsf_high),
1424 le32_to_cpu(notif->tsf_low), notif->status, notif->beacon_timer);
1425}
1426
1427/* Service N_SCAN_RESULTS (0x83) */
1428static void
1429il_hdl_scan_results(struct il_priv *il, struct il_rx_buf *rxb)
1430{
1431#ifdef CONFIG_IWLEGACY_DEBUG
1432 struct il_rx_pkt *pkt = rxb_addr(rxb);
1433 struct il_scanresults_notification *notif =
1434 (struct il_scanresults_notification *)pkt->u.raw;
1435
1436 D_SCAN("Scan ch.res: " "%d [802.11%s] " "(TSF: 0x%08X:%08X) - %d "
1437 "elapsed=%lu usec\n", notif->channel, notif->band ? "bg" : "a",
1438 le32_to_cpu(notif->tsf_high), le32_to_cpu(notif->tsf_low),
1439 le32_to_cpu(notif->stats[0]),
1440 le32_to_cpu(notif->tsf_low) - il->scan_start_tsf);
1441#endif
1442}
1443
1444/* Service N_SCAN_COMPLETE (0x84) */
1445static void
1446il_hdl_scan_complete(struct il_priv *il, struct il_rx_buf *rxb)
1447{
1448
1449#ifdef CONFIG_IWLEGACY_DEBUG
1450 struct il_rx_pkt *pkt = rxb_addr(rxb);
1451 struct il_scancomplete_notification *scan_notif = (void *)pkt->u.raw;
1452#endif
1453
1454 D_SCAN("Scan complete: %d channels (TSF 0x%08X:%08X) - %d\n",
1455 scan_notif->scanned_channels, scan_notif->tsf_low,
1456 scan_notif->tsf_high, scan_notif->status);
1457
1458 /* The HW is no longer scanning */
1459 clear_bit(S_SCAN_HW, &il->status);
1460
1461 D_SCAN("Scan on %sGHz took %dms\n",
1462 (il->scan_band == IEEE80211_BAND_2GHZ) ? "2.4" : "5.2",
1463 jiffies_to_msecs(jiffies - il->scan_start));
1464
1465 queue_work(il->workqueue, &il->scan_completed);
1466}
1467
1468void
1469il_setup_rx_scan_handlers(struct il_priv *il)
1470{
1471 /* scan handlers */
1472 il->handlers[C_SCAN] = il_hdl_scan;
1473 il->handlers[N_SCAN_START] = il_hdl_scan_start;
1474 il->handlers[N_SCAN_RESULTS] = il_hdl_scan_results;
1475 il->handlers[N_SCAN_COMPLETE] = il_hdl_scan_complete;
1476}
1477EXPORT_SYMBOL(il_setup_rx_scan_handlers);
1478
1479u16
1480il_get_active_dwell_time(struct il_priv *il, enum ieee80211_band band,
1481 u8 n_probes)
1482{
1483 if (band == IEEE80211_BAND_5GHZ)
1484 return IL_ACTIVE_DWELL_TIME_52 +
1485 IL_ACTIVE_DWELL_FACTOR_52GHZ * (n_probes + 1);
1486 else
1487 return IL_ACTIVE_DWELL_TIME_24 +
1488 IL_ACTIVE_DWELL_FACTOR_24GHZ * (n_probes + 1);
1489}
1490EXPORT_SYMBOL(il_get_active_dwell_time);
1491
1492u16
1493il_get_passive_dwell_time(struct il_priv *il, enum ieee80211_band band,
1494 struct ieee80211_vif *vif)
1495{
1496 u16 value;
1497
1498 u16 passive =
1499 (band ==
1500 IEEE80211_BAND_2GHZ) ? IL_PASSIVE_DWELL_BASE +
1501 IL_PASSIVE_DWELL_TIME_24 : IL_PASSIVE_DWELL_BASE +
1502 IL_PASSIVE_DWELL_TIME_52;
1503
1504 if (il_is_any_associated(il)) {
1505 /*
1506 * If we're associated, we clamp the maximum passive
1507 * dwell time to be 98% of the smallest beacon interval
1508 * (minus 2 * channel tune time)
1509 */
1510 value = il->vif ? il->vif->bss_conf.beacon_int : 0;
1511 if (value > IL_PASSIVE_DWELL_BASE || !value)
1512 value = IL_PASSIVE_DWELL_BASE;
1513 value = (value * 98) / 100 - IL_CHANNEL_TUNE_TIME * 2;
1514 passive = min(value, passive);
1515 }
1516
1517 return passive;
1518}
1519EXPORT_SYMBOL(il_get_passive_dwell_time);
1520
1521void
1522il_init_scan_params(struct il_priv *il)
1523{
1524 u8 ant_idx = fls(il->hw_params.valid_tx_ant) - 1;
1525 if (!il->scan_tx_ant[IEEE80211_BAND_5GHZ])
1526 il->scan_tx_ant[IEEE80211_BAND_5GHZ] = ant_idx;
1527 if (!il->scan_tx_ant[IEEE80211_BAND_2GHZ])
1528 il->scan_tx_ant[IEEE80211_BAND_2GHZ] = ant_idx;
1529}
1530EXPORT_SYMBOL(il_init_scan_params);
1531
1532static int
1533il_scan_initiate(struct il_priv *il, struct ieee80211_vif *vif)
1534{
1535 int ret;
1536
1537 lockdep_assert_held(&il->mutex);
1538
1539 cancel_delayed_work(&il->scan_check);
1540
1541 if (!il_is_ready_rf(il)) {
1542 IL_WARN("Request scan called when driver not ready.\n");
1543 return -EIO;
1544 }
1545
1546 if (test_bit(S_SCAN_HW, &il->status)) {
1547 D_SCAN("Multiple concurrent scan requests in parallel.\n");
1548 return -EBUSY;
1549 }
1550
1551 if (test_bit(S_SCAN_ABORTING, &il->status)) {
1552 D_SCAN("Scan request while abort pending.\n");
1553 return -EBUSY;
1554 }
1555
1556 D_SCAN("Starting scan...\n");
1557
1558 set_bit(S_SCANNING, &il->status);
1559 il->scan_start = jiffies;
1560
1561 ret = il->ops->request_scan(il, vif);
1562 if (ret) {
1563 clear_bit(S_SCANNING, &il->status);
1564 return ret;
1565 }
1566
1567 queue_delayed_work(il->workqueue, &il->scan_check,
1568 IL_SCAN_CHECK_WATCHDOG);
1569
1570 return 0;
1571}
1572
1573int
1574il_mac_hw_scan(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
1575 struct ieee80211_scan_request *hw_req)
1576{
1577 struct cfg80211_scan_request *req = &hw_req->req;
1578 struct il_priv *il = hw->priv;
1579 int ret;
1580
1581 if (req->n_channels == 0) {
1582 IL_ERR("Can not scan on no channels.\n");
1583 return -EINVAL;
1584 }
1585
1586 mutex_lock(&il->mutex);
1587 D_MAC80211("enter\n");
1588
1589 if (test_bit(S_SCANNING, &il->status)) {
1590 D_SCAN("Scan already in progress.\n");
1591 ret = -EAGAIN;
1592 goto out_unlock;
1593 }
1594
1595 /* mac80211 will only ask for one band at a time */
1596 il->scan_request = req;
1597 il->scan_vif = vif;
1598 il->scan_band = req->channels[0]->band;
1599
1600 ret = il_scan_initiate(il, vif);
1601
1602out_unlock:
1603 D_MAC80211("leave ret %d\n", ret);
1604 mutex_unlock(&il->mutex);
1605
1606 return ret;
1607}
1608EXPORT_SYMBOL(il_mac_hw_scan);
1609
1610static void
1611il_bg_scan_check(struct work_struct *data)
1612{
1613 struct il_priv *il =
1614 container_of(data, struct il_priv, scan_check.work);
1615
1616 D_SCAN("Scan check work\n");
1617
1618 /* Since we are here firmware does not finish scan and
1619 * most likely is in bad shape, so we don't bother to
1620 * send abort command, just force scan complete to mac80211 */
1621 mutex_lock(&il->mutex);
1622 il_force_scan_end(il);
1623 mutex_unlock(&il->mutex);
1624}
1625
1626/**
1627 * il_fill_probe_req - fill in all required fields and IE for probe request
1628 */
1629
1630u16
1631il_fill_probe_req(struct il_priv *il, struct ieee80211_mgmt *frame,
1632 const u8 *ta, const u8 *ies, int ie_len, int left)
1633{
1634 int len = 0;
1635 u8 *pos = NULL;
1636
1637 /* Make sure there is enough space for the probe request,
1638 * two mandatory IEs and the data */
1639 left -= 24;
1640 if (left < 0)
1641 return 0;
1642
1643 frame->frame_control = cpu_to_le16(IEEE80211_STYPE_PROBE_REQ);
1644 eth_broadcast_addr(frame->da);
1645 memcpy(frame->sa, ta, ETH_ALEN);
1646 eth_broadcast_addr(frame->bssid);
1647 frame->seq_ctrl = 0;
1648
1649 len += 24;
1650
1651 /* ...next IE... */
1652 pos = &frame->u.probe_req.variable[0];
1653
1654 /* fill in our indirect SSID IE */
1655 left -= 2;
1656 if (left < 0)
1657 return 0;
1658 *pos++ = WLAN_EID_SSID;
1659 *pos++ = 0;
1660
1661 len += 2;
1662
1663 if (WARN_ON(left < ie_len))
1664 return len;
1665
1666 if (ies && ie_len) {
1667 memcpy(pos, ies, ie_len);
1668 len += ie_len;
1669 }
1670
1671 return (u16) len;
1672}
1673EXPORT_SYMBOL(il_fill_probe_req);
1674
1675static void
1676il_bg_abort_scan(struct work_struct *work)
1677{
1678 struct il_priv *il = container_of(work, struct il_priv, abort_scan);
1679
1680 D_SCAN("Abort scan work\n");
1681
1682 /* We keep scan_check work queued in case when firmware will not
1683 * report back scan completed notification */
1684 mutex_lock(&il->mutex);
1685 il_scan_cancel_timeout(il, 200);
1686 mutex_unlock(&il->mutex);
1687}
1688
1689static void
1690il_bg_scan_completed(struct work_struct *work)
1691{
1692 struct il_priv *il = container_of(work, struct il_priv, scan_completed);
1693 bool aborted;
1694
1695 D_SCAN("Completed scan.\n");
1696
1697 cancel_delayed_work(&il->scan_check);
1698
1699 mutex_lock(&il->mutex);
1700
1701 aborted = test_and_clear_bit(S_SCAN_ABORTING, &il->status);
1702 if (aborted)
1703 D_SCAN("Aborted scan completed.\n");
1704
1705 if (!test_and_clear_bit(S_SCANNING, &il->status)) {
1706 D_SCAN("Scan already completed.\n");
1707 goto out_settings;
1708 }
1709
1710 il_complete_scan(il, aborted);
1711
1712out_settings:
1713 /* Can we still talk to firmware ? */
1714 if (!il_is_ready_rf(il))
1715 goto out;
1716
1717 /*
1718 * We do not commit power settings while scan is pending,
1719 * do it now if the settings changed.
1720 */
1721 il_power_set_mode(il, &il->power_data.sleep_cmd_next, false);
1722 il_set_tx_power(il, il->tx_power_next, false);
1723
1724 il->ops->post_scan(il);
1725
1726out:
1727 mutex_unlock(&il->mutex);
1728}
1729
1730void
1731il_setup_scan_deferred_work(struct il_priv *il)
1732{
1733 INIT_WORK(&il->scan_completed, il_bg_scan_completed);
1734 INIT_WORK(&il->abort_scan, il_bg_abort_scan);
1735 INIT_DELAYED_WORK(&il->scan_check, il_bg_scan_check);
1736}
1737EXPORT_SYMBOL(il_setup_scan_deferred_work);
1738
1739void
1740il_cancel_scan_deferred_work(struct il_priv *il)
1741{
1742 cancel_work_sync(&il->abort_scan);
1743 cancel_work_sync(&il->scan_completed);
1744
1745 if (cancel_delayed_work_sync(&il->scan_check)) {
1746 mutex_lock(&il->mutex);
1747 il_force_scan_end(il);
1748 mutex_unlock(&il->mutex);
1749 }
1750}
1751EXPORT_SYMBOL(il_cancel_scan_deferred_work);
1752
1753/* il->sta_lock must be held */
1754static void
1755il_sta_ucode_activate(struct il_priv *il, u8 sta_id)
1756{
1757
1758 if (!(il->stations[sta_id].used & IL_STA_DRIVER_ACTIVE))
1759 IL_ERR("ACTIVATE a non DRIVER active station id %u addr %pM\n",
1760 sta_id, il->stations[sta_id].sta.sta.addr);
1761
1762 if (il->stations[sta_id].used & IL_STA_UCODE_ACTIVE) {
1763 D_ASSOC("STA id %u addr %pM already present"
1764 " in uCode (according to driver)\n", sta_id,
1765 il->stations[sta_id].sta.sta.addr);
1766 } else {
1767 il->stations[sta_id].used |= IL_STA_UCODE_ACTIVE;
1768 D_ASSOC("Added STA id %u addr %pM to uCode\n", sta_id,
1769 il->stations[sta_id].sta.sta.addr);
1770 }
1771}
1772
1773static int
1774il_process_add_sta_resp(struct il_priv *il, struct il_addsta_cmd *addsta,
1775 struct il_rx_pkt *pkt, bool sync)
1776{
1777 u8 sta_id = addsta->sta.sta_id;
1778 unsigned long flags;
1779 int ret = -EIO;
1780
1781 if (pkt->hdr.flags & IL_CMD_FAILED_MSK) {
1782 IL_ERR("Bad return from C_ADD_STA (0x%08X)\n", pkt->hdr.flags);
1783 return ret;
1784 }
1785
1786 D_INFO("Processing response for adding station %u\n", sta_id);
1787
1788 spin_lock_irqsave(&il->sta_lock, flags);
1789
1790 switch (pkt->u.add_sta.status) {
1791 case ADD_STA_SUCCESS_MSK:
1792 D_INFO("C_ADD_STA PASSED\n");
1793 il_sta_ucode_activate(il, sta_id);
1794 ret = 0;
1795 break;
1796 case ADD_STA_NO_ROOM_IN_TBL:
1797 IL_ERR("Adding station %d failed, no room in table.\n", sta_id);
1798 break;
1799 case ADD_STA_NO_BLOCK_ACK_RESOURCE:
1800 IL_ERR("Adding station %d failed, no block ack resource.\n",
1801 sta_id);
1802 break;
1803 case ADD_STA_MODIFY_NON_EXIST_STA:
1804 IL_ERR("Attempting to modify non-existing station %d\n",
1805 sta_id);
1806 break;
1807 default:
1808 D_ASSOC("Received C_ADD_STA:(0x%08X)\n", pkt->u.add_sta.status);
1809 break;
1810 }
1811
1812 D_INFO("%s station id %u addr %pM\n",
1813 il->stations[sta_id].sta.mode ==
1814 STA_CONTROL_MODIFY_MSK ? "Modified" : "Added", sta_id,
1815 il->stations[sta_id].sta.sta.addr);
1816
1817 /*
1818 * XXX: The MAC address in the command buffer is often changed from
1819 * the original sent to the device. That is, the MAC address
1820 * written to the command buffer often is not the same MAC address
1821 * read from the command buffer when the command returns. This
1822 * issue has not yet been resolved and this debugging is left to
1823 * observe the problem.
1824 */
1825 D_INFO("%s station according to cmd buffer %pM\n",
1826 il->stations[sta_id].sta.mode ==
1827 STA_CONTROL_MODIFY_MSK ? "Modified" : "Added", addsta->sta.addr);
1828 spin_unlock_irqrestore(&il->sta_lock, flags);
1829
1830 return ret;
1831}
1832
1833static void
1834il_add_sta_callback(struct il_priv *il, struct il_device_cmd *cmd,
1835 struct il_rx_pkt *pkt)
1836{
1837 struct il_addsta_cmd *addsta = (struct il_addsta_cmd *)cmd->cmd.payload;
1838
1839 il_process_add_sta_resp(il, addsta, pkt, false);
1840
1841}
1842
1843int
1844il_send_add_sta(struct il_priv *il, struct il_addsta_cmd *sta, u8 flags)
1845{
1846 struct il_rx_pkt *pkt = NULL;
1847 int ret = 0;
1848 u8 data[sizeof(*sta)];
1849 struct il_host_cmd cmd = {
1850 .id = C_ADD_STA,
1851 .flags = flags,
1852 .data = data,
1853 };
1854 u8 sta_id __maybe_unused = sta->sta.sta_id;
1855
1856 D_INFO("Adding sta %u (%pM) %ssynchronously\n", sta_id, sta->sta.addr,
1857 flags & CMD_ASYNC ? "a" : "");
1858
1859 if (flags & CMD_ASYNC)
1860 cmd.callback = il_add_sta_callback;
1861 else {
1862 cmd.flags |= CMD_WANT_SKB;
1863 might_sleep();
1864 }
1865
1866 cmd.len = il->ops->build_addsta_hcmd(sta, data);
1867 ret = il_send_cmd(il, &cmd);
1868
1869 if (ret || (flags & CMD_ASYNC))
1870 return ret;
1871
1872 if (ret == 0) {
1873 pkt = (struct il_rx_pkt *)cmd.reply_page;
1874 ret = il_process_add_sta_resp(il, sta, pkt, true);
1875 }
1876 il_free_pages(il, cmd.reply_page);
1877
1878 return ret;
1879}
1880EXPORT_SYMBOL(il_send_add_sta);
1881
1882static void
1883il_set_ht_add_station(struct il_priv *il, u8 idx, struct ieee80211_sta *sta)
1884{
1885 struct ieee80211_sta_ht_cap *sta_ht_inf = &sta->ht_cap;
1886 __le32 sta_flags;
1887
1888 if (!sta || !sta_ht_inf->ht_supported)
1889 goto done;
1890
1891 D_ASSOC("spatial multiplexing power save mode: %s\n",
1892 (sta->smps_mode == IEEE80211_SMPS_STATIC) ? "static" :
1893 (sta->smps_mode == IEEE80211_SMPS_DYNAMIC) ? "dynamic" :
1894 "disabled");
1895
1896 sta_flags = il->stations[idx].sta.station_flags;
1897
1898 sta_flags &= ~(STA_FLG_RTS_MIMO_PROT_MSK | STA_FLG_MIMO_DIS_MSK);
1899
1900 switch (sta->smps_mode) {
1901 case IEEE80211_SMPS_STATIC:
1902 sta_flags |= STA_FLG_MIMO_DIS_MSK;
1903 break;
1904 case IEEE80211_SMPS_DYNAMIC:
1905 sta_flags |= STA_FLG_RTS_MIMO_PROT_MSK;
1906 break;
1907 case IEEE80211_SMPS_OFF:
1908 break;
1909 default:
1910 IL_WARN("Invalid MIMO PS mode %d\n", sta->smps_mode);
1911 break;
1912 }
1913
1914 sta_flags |=
1915 cpu_to_le32((u32) sta_ht_inf->
1916 ampdu_factor << STA_FLG_MAX_AGG_SIZE_POS);
1917
1918 sta_flags |=
1919 cpu_to_le32((u32) sta_ht_inf->
1920 ampdu_density << STA_FLG_AGG_MPDU_DENSITY_POS);
1921
1922 if (il_is_ht40_tx_allowed(il, &sta->ht_cap))
1923 sta_flags |= STA_FLG_HT40_EN_MSK;
1924 else
1925 sta_flags &= ~STA_FLG_HT40_EN_MSK;
1926
1927 il->stations[idx].sta.station_flags = sta_flags;
1928done:
1929 return;
1930}
1931
1932/**
1933 * il_prep_station - Prepare station information for addition
1934 *
1935 * should be called with sta_lock held
1936 */
1937u8
1938il_prep_station(struct il_priv *il, const u8 *addr, bool is_ap,
1939 struct ieee80211_sta *sta)
1940{
1941 struct il_station_entry *station;
1942 int i;
1943 u8 sta_id = IL_INVALID_STATION;
1944 u16 rate;
1945
1946 if (is_ap)
1947 sta_id = IL_AP_ID;
1948 else if (is_broadcast_ether_addr(addr))
1949 sta_id = il->hw_params.bcast_id;
1950 else
1951 for (i = IL_STA_ID; i < il->hw_params.max_stations; i++) {
1952 if (ether_addr_equal(il->stations[i].sta.sta.addr,
1953 addr)) {
1954 sta_id = i;
1955 break;
1956 }
1957
1958 if (!il->stations[i].used &&
1959 sta_id == IL_INVALID_STATION)
1960 sta_id = i;
1961 }
1962
1963 /*
1964 * These two conditions have the same outcome, but keep them
1965 * separate
1966 */
1967 if (unlikely(sta_id == IL_INVALID_STATION))
1968 return sta_id;
1969
1970 /*
1971 * uCode is not able to deal with multiple requests to add a
1972 * station. Keep track if one is in progress so that we do not send
1973 * another.
1974 */
1975 if (il->stations[sta_id].used & IL_STA_UCODE_INPROGRESS) {
1976 D_INFO("STA %d already in process of being added.\n", sta_id);
1977 return sta_id;
1978 }
1979
1980 if ((il->stations[sta_id].used & IL_STA_DRIVER_ACTIVE) &&
1981 (il->stations[sta_id].used & IL_STA_UCODE_ACTIVE) &&
1982 ether_addr_equal(il->stations[sta_id].sta.sta.addr, addr)) {
1983 D_ASSOC("STA %d (%pM) already added, not adding again.\n",
1984 sta_id, addr);
1985 return sta_id;
1986 }
1987
1988 station = &il->stations[sta_id];
1989 station->used = IL_STA_DRIVER_ACTIVE;
1990 D_ASSOC("Add STA to driver ID %d: %pM\n", sta_id, addr);
1991 il->num_stations++;
1992
1993 /* Set up the C_ADD_STA command to send to device */
1994 memset(&station->sta, 0, sizeof(struct il_addsta_cmd));
1995 memcpy(station->sta.sta.addr, addr, ETH_ALEN);
1996 station->sta.mode = 0;
1997 station->sta.sta.sta_id = sta_id;
1998 station->sta.station_flags = 0;
1999
2000 /*
2001 * OK to call unconditionally, since local stations (IBSS BSSID
2002 * STA and broadcast STA) pass in a NULL sta, and mac80211
2003 * doesn't allow HT IBSS.
2004 */
2005 il_set_ht_add_station(il, sta_id, sta);
2006
2007 /* 3945 only */
2008 rate = (il->band == IEEE80211_BAND_5GHZ) ? RATE_6M_PLCP : RATE_1M_PLCP;
2009 /* Turn on both antennas for the station... */
2010 station->sta.rate_n_flags = cpu_to_le16(rate | RATE_MCS_ANT_AB_MSK);
2011
2012 return sta_id;
2013
2014}
2015EXPORT_SYMBOL_GPL(il_prep_station);
2016
2017#define STA_WAIT_TIMEOUT (HZ/2)
2018
2019/**
2020 * il_add_station_common -
2021 */
2022int
2023il_add_station_common(struct il_priv *il, const u8 *addr, bool is_ap,
2024 struct ieee80211_sta *sta, u8 *sta_id_r)
2025{
2026 unsigned long flags_spin;
2027 int ret = 0;
2028 u8 sta_id;
2029 struct il_addsta_cmd sta_cmd;
2030
2031 *sta_id_r = 0;
2032 spin_lock_irqsave(&il->sta_lock, flags_spin);
2033 sta_id = il_prep_station(il, addr, is_ap, sta);
2034 if (sta_id == IL_INVALID_STATION) {
2035 IL_ERR("Unable to prepare station %pM for addition\n", addr);
2036 spin_unlock_irqrestore(&il->sta_lock, flags_spin);
2037 return -EINVAL;
2038 }
2039
2040 /*
2041 * uCode is not able to deal with multiple requests to add a
2042 * station. Keep track if one is in progress so that we do not send
2043 * another.
2044 */
2045 if (il->stations[sta_id].used & IL_STA_UCODE_INPROGRESS) {
2046 D_INFO("STA %d already in process of being added.\n", sta_id);
2047 spin_unlock_irqrestore(&il->sta_lock, flags_spin);
2048 return -EEXIST;
2049 }
2050
2051 if ((il->stations[sta_id].used & IL_STA_DRIVER_ACTIVE) &&
2052 (il->stations[sta_id].used & IL_STA_UCODE_ACTIVE)) {
2053 D_ASSOC("STA %d (%pM) already added, not adding again.\n",
2054 sta_id, addr);
2055 spin_unlock_irqrestore(&il->sta_lock, flags_spin);
2056 return -EEXIST;
2057 }
2058
2059 il->stations[sta_id].used |= IL_STA_UCODE_INPROGRESS;
2060 memcpy(&sta_cmd, &il->stations[sta_id].sta,
2061 sizeof(struct il_addsta_cmd));
2062 spin_unlock_irqrestore(&il->sta_lock, flags_spin);
2063
2064 /* Add station to device's station table */
2065 ret = il_send_add_sta(il, &sta_cmd, CMD_SYNC);
2066 if (ret) {
2067 spin_lock_irqsave(&il->sta_lock, flags_spin);
2068 IL_ERR("Adding station %pM failed.\n",
2069 il->stations[sta_id].sta.sta.addr);
2070 il->stations[sta_id].used &= ~IL_STA_DRIVER_ACTIVE;
2071 il->stations[sta_id].used &= ~IL_STA_UCODE_INPROGRESS;
2072 spin_unlock_irqrestore(&il->sta_lock, flags_spin);
2073 }
2074 *sta_id_r = sta_id;
2075 return ret;
2076}
2077EXPORT_SYMBOL(il_add_station_common);
2078
2079/**
2080 * il_sta_ucode_deactivate - deactivate ucode status for a station
2081 *
2082 * il->sta_lock must be held
2083 */
2084static void
2085il_sta_ucode_deactivate(struct il_priv *il, u8 sta_id)
2086{
2087 /* Ucode must be active and driver must be non active */
2088 if ((il->stations[sta_id].
2089 used & (IL_STA_UCODE_ACTIVE | IL_STA_DRIVER_ACTIVE)) !=
2090 IL_STA_UCODE_ACTIVE)
2091 IL_ERR("removed non active STA %u\n", sta_id);
2092
2093 il->stations[sta_id].used &= ~IL_STA_UCODE_ACTIVE;
2094
2095 memset(&il->stations[sta_id], 0, sizeof(struct il_station_entry));
2096 D_ASSOC("Removed STA %u\n", sta_id);
2097}
2098
2099static int
2100il_send_remove_station(struct il_priv *il, const u8 * addr, int sta_id,
2101 bool temporary)
2102{
2103 struct il_rx_pkt *pkt;
2104 int ret;
2105
2106 unsigned long flags_spin;
2107 struct il_rem_sta_cmd rm_sta_cmd;
2108
2109 struct il_host_cmd cmd = {
2110 .id = C_REM_STA,
2111 .len = sizeof(struct il_rem_sta_cmd),
2112 .flags = CMD_SYNC,
2113 .data = &rm_sta_cmd,
2114 };
2115
2116 memset(&rm_sta_cmd, 0, sizeof(rm_sta_cmd));
2117 rm_sta_cmd.num_sta = 1;
2118 memcpy(&rm_sta_cmd.addr, addr, ETH_ALEN);
2119
2120 cmd.flags |= CMD_WANT_SKB;
2121
2122 ret = il_send_cmd(il, &cmd);
2123
2124 if (ret)
2125 return ret;
2126
2127 pkt = (struct il_rx_pkt *)cmd.reply_page;
2128 if (pkt->hdr.flags & IL_CMD_FAILED_MSK) {
2129 IL_ERR("Bad return from C_REM_STA (0x%08X)\n", pkt->hdr.flags);
2130 ret = -EIO;
2131 }
2132
2133 if (!ret) {
2134 switch (pkt->u.rem_sta.status) {
2135 case REM_STA_SUCCESS_MSK:
2136 if (!temporary) {
2137 spin_lock_irqsave(&il->sta_lock, flags_spin);
2138 il_sta_ucode_deactivate(il, sta_id);
2139 spin_unlock_irqrestore(&il->sta_lock,
2140 flags_spin);
2141 }
2142 D_ASSOC("C_REM_STA PASSED\n");
2143 break;
2144 default:
2145 ret = -EIO;
2146 IL_ERR("C_REM_STA failed\n");
2147 break;
2148 }
2149 }
2150 il_free_pages(il, cmd.reply_page);
2151
2152 return ret;
2153}
2154
2155/**
2156 * il_remove_station - Remove driver's knowledge of station.
2157 */
2158int
2159il_remove_station(struct il_priv *il, const u8 sta_id, const u8 * addr)
2160{
2161 unsigned long flags;
2162
2163 if (!il_is_ready(il)) {
2164 D_INFO("Unable to remove station %pM, device not ready.\n",
2165 addr);
2166 /*
2167 * It is typical for stations to be removed when we are
2168 * going down. Return success since device will be down
2169 * soon anyway
2170 */
2171 return 0;
2172 }
2173
2174 D_ASSOC("Removing STA from driver:%d %pM\n", sta_id, addr);
2175
2176 if (WARN_ON(sta_id == IL_INVALID_STATION))
2177 return -EINVAL;
2178
2179 spin_lock_irqsave(&il->sta_lock, flags);
2180
2181 if (!(il->stations[sta_id].used & IL_STA_DRIVER_ACTIVE)) {
2182 D_INFO("Removing %pM but non DRIVER active\n", addr);
2183 goto out_err;
2184 }
2185
2186 if (!(il->stations[sta_id].used & IL_STA_UCODE_ACTIVE)) {
2187 D_INFO("Removing %pM but non UCODE active\n", addr);
2188 goto out_err;
2189 }
2190
2191 if (il->stations[sta_id].used & IL_STA_LOCAL) {
2192 kfree(il->stations[sta_id].lq);
2193 il->stations[sta_id].lq = NULL;
2194 }
2195
2196 il->stations[sta_id].used &= ~IL_STA_DRIVER_ACTIVE;
2197
2198 il->num_stations--;
2199
2200 BUG_ON(il->num_stations < 0);
2201
2202 spin_unlock_irqrestore(&il->sta_lock, flags);
2203
2204 return il_send_remove_station(il, addr, sta_id, false);
2205out_err:
2206 spin_unlock_irqrestore(&il->sta_lock, flags);
2207 return -EINVAL;
2208}
2209EXPORT_SYMBOL_GPL(il_remove_station);
2210
2211/**
2212 * il_clear_ucode_stations - clear ucode station table bits
2213 *
2214 * This function clears all the bits in the driver indicating
2215 * which stations are active in the ucode. Call when something
2216 * other than explicit station management would cause this in
2217 * the ucode, e.g. unassociated RXON.
2218 */
2219void
2220il_clear_ucode_stations(struct il_priv *il)
2221{
2222 int i;
2223 unsigned long flags_spin;
2224 bool cleared = false;
2225
2226 D_INFO("Clearing ucode stations in driver\n");
2227
2228 spin_lock_irqsave(&il->sta_lock, flags_spin);
2229 for (i = 0; i < il->hw_params.max_stations; i++) {
2230 if (il->stations[i].used & IL_STA_UCODE_ACTIVE) {
2231 D_INFO("Clearing ucode active for station %d\n", i);
2232 il->stations[i].used &= ~IL_STA_UCODE_ACTIVE;
2233 cleared = true;
2234 }
2235 }
2236 spin_unlock_irqrestore(&il->sta_lock, flags_spin);
2237
2238 if (!cleared)
2239 D_INFO("No active stations found to be cleared\n");
2240}
2241EXPORT_SYMBOL(il_clear_ucode_stations);
2242
2243/**
2244 * il_restore_stations() - Restore driver known stations to device
2245 *
2246 * All stations considered active by driver, but not present in ucode, is
2247 * restored.
2248 *
2249 * Function sleeps.
2250 */
2251void
2252il_restore_stations(struct il_priv *il)
2253{
2254 struct il_addsta_cmd sta_cmd;
2255 struct il_link_quality_cmd lq;
2256 unsigned long flags_spin;
2257 int i;
2258 bool found = false;
2259 int ret;
2260 bool send_lq;
2261
2262 if (!il_is_ready(il)) {
2263 D_INFO("Not ready yet, not restoring any stations.\n");
2264 return;
2265 }
2266
2267 D_ASSOC("Restoring all known stations ... start.\n");
2268 spin_lock_irqsave(&il->sta_lock, flags_spin);
2269 for (i = 0; i < il->hw_params.max_stations; i++) {
2270 if ((il->stations[i].used & IL_STA_DRIVER_ACTIVE) &&
2271 !(il->stations[i].used & IL_STA_UCODE_ACTIVE)) {
2272 D_ASSOC("Restoring sta %pM\n",
2273 il->stations[i].sta.sta.addr);
2274 il->stations[i].sta.mode = 0;
2275 il->stations[i].used |= IL_STA_UCODE_INPROGRESS;
2276 found = true;
2277 }
2278 }
2279
2280 for (i = 0; i < il->hw_params.max_stations; i++) {
2281 if ((il->stations[i].used & IL_STA_UCODE_INPROGRESS)) {
2282 memcpy(&sta_cmd, &il->stations[i].sta,
2283 sizeof(struct il_addsta_cmd));
2284 send_lq = false;
2285 if (il->stations[i].lq) {
2286 memcpy(&lq, il->stations[i].lq,
2287 sizeof(struct il_link_quality_cmd));
2288 send_lq = true;
2289 }
2290 spin_unlock_irqrestore(&il->sta_lock, flags_spin);
2291 ret = il_send_add_sta(il, &sta_cmd, CMD_SYNC);
2292 if (ret) {
2293 spin_lock_irqsave(&il->sta_lock, flags_spin);
2294 IL_ERR("Adding station %pM failed.\n",
2295 il->stations[i].sta.sta.addr);
2296 il->stations[i].used &= ~IL_STA_DRIVER_ACTIVE;
2297 il->stations[i].used &=
2298 ~IL_STA_UCODE_INPROGRESS;
2299 spin_unlock_irqrestore(&il->sta_lock,
2300 flags_spin);
2301 }
2302 /*
2303 * Rate scaling has already been initialized, send
2304 * current LQ command
2305 */
2306 if (send_lq)
2307 il_send_lq_cmd(il, &lq, CMD_SYNC, true);
2308 spin_lock_irqsave(&il->sta_lock, flags_spin);
2309 il->stations[i].used &= ~IL_STA_UCODE_INPROGRESS;
2310 }
2311 }
2312
2313 spin_unlock_irqrestore(&il->sta_lock, flags_spin);
2314 if (!found)
2315 D_INFO("Restoring all known stations"
2316 " .... no stations to be restored.\n");
2317 else
2318 D_INFO("Restoring all known stations" " .... complete.\n");
2319}
2320EXPORT_SYMBOL(il_restore_stations);
2321
2322int
2323il_get_free_ucode_key_idx(struct il_priv *il)
2324{
2325 int i;
2326
2327 for (i = 0; i < il->sta_key_max_num; i++)
2328 if (!test_and_set_bit(i, &il->ucode_key_table))
2329 return i;
2330
2331 return WEP_INVALID_OFFSET;
2332}
2333EXPORT_SYMBOL(il_get_free_ucode_key_idx);
2334
2335void
2336il_dealloc_bcast_stations(struct il_priv *il)
2337{
2338 unsigned long flags;
2339 int i;
2340
2341 spin_lock_irqsave(&il->sta_lock, flags);
2342 for (i = 0; i < il->hw_params.max_stations; i++) {
2343 if (!(il->stations[i].used & IL_STA_BCAST))
2344 continue;
2345
2346 il->stations[i].used &= ~IL_STA_UCODE_ACTIVE;
2347 il->num_stations--;
2348 BUG_ON(il->num_stations < 0);
2349 kfree(il->stations[i].lq);
2350 il->stations[i].lq = NULL;
2351 }
2352 spin_unlock_irqrestore(&il->sta_lock, flags);
2353}
2354EXPORT_SYMBOL_GPL(il_dealloc_bcast_stations);
2355
2356#ifdef CONFIG_IWLEGACY_DEBUG
2357static void
2358il_dump_lq_cmd(struct il_priv *il, struct il_link_quality_cmd *lq)
2359{
2360 int i;
2361 D_RATE("lq station id 0x%x\n", lq->sta_id);
2362 D_RATE("lq ant 0x%X 0x%X\n", lq->general_params.single_stream_ant_msk,
2363 lq->general_params.dual_stream_ant_msk);
2364
2365 for (i = 0; i < LINK_QUAL_MAX_RETRY_NUM; i++)
2366 D_RATE("lq idx %d 0x%X\n", i, lq->rs_table[i].rate_n_flags);
2367}
2368#else
2369static inline void
2370il_dump_lq_cmd(struct il_priv *il, struct il_link_quality_cmd *lq)
2371{
2372}
2373#endif
2374
2375/**
2376 * il_is_lq_table_valid() - Test one aspect of LQ cmd for validity
2377 *
2378 * It sometimes happens when a HT rate has been in use and we
2379 * loose connectivity with AP then mac80211 will first tell us that the
2380 * current channel is not HT anymore before removing the station. In such a
2381 * scenario the RXON flags will be updated to indicate we are not
2382 * communicating HT anymore, but the LQ command may still contain HT rates.
2383 * Test for this to prevent driver from sending LQ command between the time
2384 * RXON flags are updated and when LQ command is updated.
2385 */
2386static bool
2387il_is_lq_table_valid(struct il_priv *il, struct il_link_quality_cmd *lq)
2388{
2389 int i;
2390
2391 if (il->ht.enabled)
2392 return true;
2393
2394 D_INFO("Channel %u is not an HT channel\n", il->active.channel);
2395 for (i = 0; i < LINK_QUAL_MAX_RETRY_NUM; i++) {
2396 if (le32_to_cpu(lq->rs_table[i].rate_n_flags) & RATE_MCS_HT_MSK) {
2397 D_INFO("idx %d of LQ expects HT channel\n", i);
2398 return false;
2399 }
2400 }
2401 return true;
2402}
2403
2404/**
2405 * il_send_lq_cmd() - Send link quality command
2406 * @init: This command is sent as part of station initialization right
2407 * after station has been added.
2408 *
2409 * The link quality command is sent as the last step of station creation.
2410 * This is the special case in which init is set and we call a callback in
2411 * this case to clear the state indicating that station creation is in
2412 * progress.
2413 */
2414int
2415il_send_lq_cmd(struct il_priv *il, struct il_link_quality_cmd *lq,
2416 u8 flags, bool init)
2417{
2418 int ret = 0;
2419 unsigned long flags_spin;
2420
2421 struct il_host_cmd cmd = {
2422 .id = C_TX_LINK_QUALITY_CMD,
2423 .len = sizeof(struct il_link_quality_cmd),
2424 .flags = flags,
2425 .data = lq,
2426 };
2427
2428 if (WARN_ON(lq->sta_id == IL_INVALID_STATION))
2429 return -EINVAL;
2430
2431 spin_lock_irqsave(&il->sta_lock, flags_spin);
2432 if (!(il->stations[lq->sta_id].used & IL_STA_DRIVER_ACTIVE)) {
2433 spin_unlock_irqrestore(&il->sta_lock, flags_spin);
2434 return -EINVAL;
2435 }
2436 spin_unlock_irqrestore(&il->sta_lock, flags_spin);
2437
2438 il_dump_lq_cmd(il, lq);
2439 BUG_ON(init && (cmd.flags & CMD_ASYNC));
2440
2441 if (il_is_lq_table_valid(il, lq))
2442 ret = il_send_cmd(il, &cmd);
2443 else
2444 ret = -EINVAL;
2445
2446 if (cmd.flags & CMD_ASYNC)
2447 return ret;
2448
2449 if (init) {
2450 D_INFO("init LQ command complete,"
2451 " clearing sta addition status for sta %d\n",
2452 lq->sta_id);
2453 spin_lock_irqsave(&il->sta_lock, flags_spin);
2454 il->stations[lq->sta_id].used &= ~IL_STA_UCODE_INPROGRESS;
2455 spin_unlock_irqrestore(&il->sta_lock, flags_spin);
2456 }
2457 return ret;
2458}
2459EXPORT_SYMBOL(il_send_lq_cmd);
2460
2461int
2462il_mac_sta_remove(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
2463 struct ieee80211_sta *sta)
2464{
2465 struct il_priv *il = hw->priv;
2466 struct il_station_priv_common *sta_common = (void *)sta->drv_priv;
2467 int ret;
2468
2469 mutex_lock(&il->mutex);
2470 D_MAC80211("enter station %pM\n", sta->addr);
2471
2472 ret = il_remove_station(il, sta_common->sta_id, sta->addr);
2473 if (ret)
2474 IL_ERR("Error removing station %pM\n", sta->addr);
2475
2476 D_MAC80211("leave ret %d\n", ret);
2477 mutex_unlock(&il->mutex);
2478
2479 return ret;
2480}
2481EXPORT_SYMBOL(il_mac_sta_remove);
2482
2483/************************** RX-FUNCTIONS ****************************/
2484/*
2485 * Rx theory of operation
2486 *
2487 * Driver allocates a circular buffer of Receive Buffer Descriptors (RBDs),
2488 * each of which point to Receive Buffers to be filled by the NIC. These get
2489 * used not only for Rx frames, but for any command response or notification
2490 * from the NIC. The driver and NIC manage the Rx buffers by means
2491 * of idxes into the circular buffer.
2492 *
2493 * Rx Queue Indexes
2494 * The host/firmware share two idx registers for managing the Rx buffers.
2495 *
2496 * The READ idx maps to the first position that the firmware may be writing
2497 * to -- the driver can read up to (but not including) this position and get
2498 * good data.
2499 * The READ idx is managed by the firmware once the card is enabled.
2500 *
2501 * The WRITE idx maps to the last position the driver has read from -- the
2502 * position preceding WRITE is the last slot the firmware can place a packet.
2503 *
2504 * The queue is empty (no good data) if WRITE = READ - 1, and is full if
2505 * WRITE = READ.
2506 *
2507 * During initialization, the host sets up the READ queue position to the first
2508 * IDX position, and WRITE to the last (READ - 1 wrapped)
2509 *
2510 * When the firmware places a packet in a buffer, it will advance the READ idx
2511 * and fire the RX interrupt. The driver can then query the READ idx and
2512 * process as many packets as possible, moving the WRITE idx forward as it
2513 * resets the Rx queue buffers with new memory.
2514 *
2515 * The management in the driver is as follows:
2516 * + A list of pre-allocated SKBs is stored in iwl->rxq->rx_free. When
2517 * iwl->rxq->free_count drops to or below RX_LOW_WATERMARK, work is scheduled
2518 * to replenish the iwl->rxq->rx_free.
2519 * + In il_rx_replenish (scheduled) if 'processed' != 'read' then the
2520 * iwl->rxq is replenished and the READ IDX is updated (updating the
2521 * 'processed' and 'read' driver idxes as well)
2522 * + A received packet is processed and handed to the kernel network stack,
2523 * detached from the iwl->rxq. The driver 'processed' idx is updated.
2524 * + The Host/Firmware iwl->rxq is replenished at tasklet time from the rx_free
2525 * list. If there are no allocated buffers in iwl->rxq->rx_free, the READ
2526 * IDX is not incremented and iwl->status(RX_STALLED) is set. If there
2527 * were enough free buffers and RX_STALLED is set it is cleared.
2528 *
2529 *
2530 * Driver sequence:
2531 *
2532 * il_rx_queue_alloc() Allocates rx_free
2533 * il_rx_replenish() Replenishes rx_free list from rx_used, and calls
2534 * il_rx_queue_restock
2535 * il_rx_queue_restock() Moves available buffers from rx_free into Rx
2536 * queue, updates firmware pointers, and updates
2537 * the WRITE idx. If insufficient rx_free buffers
2538 * are available, schedules il_rx_replenish
2539 *
2540 * -- enable interrupts --
2541 * ISR - il_rx() Detach il_rx_bufs from pool up to the
2542 * READ IDX, detaching the SKB from the pool.
2543 * Moves the packet buffer from queue to rx_used.
2544 * Calls il_rx_queue_restock to refill any empty
2545 * slots.
2546 * ...
2547 *
2548 */
2549
2550/**
2551 * il_rx_queue_space - Return number of free slots available in queue.
2552 */
2553int
2554il_rx_queue_space(const struct il_rx_queue *q)
2555{
2556 int s = q->read - q->write;
2557 if (s <= 0)
2558 s += RX_QUEUE_SIZE;
2559 /* keep some buffer to not confuse full and empty queue */
2560 s -= 2;
2561 if (s < 0)
2562 s = 0;
2563 return s;
2564}
2565EXPORT_SYMBOL(il_rx_queue_space);
2566
2567/**
2568 * il_rx_queue_update_write_ptr - Update the write pointer for the RX queue
2569 */
2570void
2571il_rx_queue_update_write_ptr(struct il_priv *il, struct il_rx_queue *q)
2572{
2573 unsigned long flags;
2574 u32 rx_wrt_ptr_reg = il->hw_params.rx_wrt_ptr_reg;
2575 u32 reg;
2576
2577 spin_lock_irqsave(&q->lock, flags);
2578
2579 if (q->need_update == 0)
2580 goto exit_unlock;
2581
2582 /* If power-saving is in use, make sure device is awake */
2583 if (test_bit(S_POWER_PMI, &il->status)) {
2584 reg = _il_rd(il, CSR_UCODE_DRV_GP1);
2585
2586 if (reg & CSR_UCODE_DRV_GP1_BIT_MAC_SLEEP) {
2587 D_INFO("Rx queue requesting wakeup," " GP1 = 0x%x\n",
2588 reg);
2589 il_set_bit(il, CSR_GP_CNTRL,
2590 CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
2591 goto exit_unlock;
2592 }
2593
2594 q->write_actual = (q->write & ~0x7);
2595 il_wr(il, rx_wrt_ptr_reg, q->write_actual);
2596
2597 /* Else device is assumed to be awake */
2598 } else {
2599 /* Device expects a multiple of 8 */
2600 q->write_actual = (q->write & ~0x7);
2601 il_wr(il, rx_wrt_ptr_reg, q->write_actual);
2602 }
2603
2604 q->need_update = 0;
2605
2606exit_unlock:
2607 spin_unlock_irqrestore(&q->lock, flags);
2608}
2609EXPORT_SYMBOL(il_rx_queue_update_write_ptr);
2610
2611int
2612il_rx_queue_alloc(struct il_priv *il)
2613{
2614 struct il_rx_queue *rxq = &il->rxq;
2615 struct device *dev = &il->pci_dev->dev;
2616 int i;
2617
2618 spin_lock_init(&rxq->lock);
2619 INIT_LIST_HEAD(&rxq->rx_free);
2620 INIT_LIST_HEAD(&rxq->rx_used);
2621
2622 /* Alloc the circular buffer of Read Buffer Descriptors (RBDs) */
2623 rxq->bd = dma_alloc_coherent(dev, 4 * RX_QUEUE_SIZE, &rxq->bd_dma,
2624 GFP_KERNEL);
2625 if (!rxq->bd)
2626 goto err_bd;
2627
2628 rxq->rb_stts = dma_alloc_coherent(dev, sizeof(struct il_rb_status),
2629 &rxq->rb_stts_dma, GFP_KERNEL);
2630 if (!rxq->rb_stts)
2631 goto err_rb;
2632
2633 /* Fill the rx_used queue with _all_ of the Rx buffers */
2634 for (i = 0; i < RX_FREE_BUFFERS + RX_QUEUE_SIZE; i++)
2635 list_add_tail(&rxq->pool[i].list, &rxq->rx_used);
2636
2637 /* Set us so that we have processed and used all buffers, but have
2638 * not restocked the Rx queue with fresh buffers */
2639 rxq->read = rxq->write = 0;
2640 rxq->write_actual = 0;
2641 rxq->free_count = 0;
2642 rxq->need_update = 0;
2643 return 0;
2644
2645err_rb:
2646 dma_free_coherent(&il->pci_dev->dev, 4 * RX_QUEUE_SIZE, rxq->bd,
2647 rxq->bd_dma);
2648err_bd:
2649 return -ENOMEM;
2650}
2651EXPORT_SYMBOL(il_rx_queue_alloc);
2652
2653void
2654il_hdl_spectrum_measurement(struct il_priv *il, struct il_rx_buf *rxb)
2655{
2656 struct il_rx_pkt *pkt = rxb_addr(rxb);
2657 struct il_spectrum_notification *report = &(pkt->u.spectrum_notif);
2658
2659 if (!report->state) {
2660 D_11H("Spectrum Measure Notification: Start\n");
2661 return;
2662 }
2663
2664 memcpy(&il->measure_report, report, sizeof(*report));
2665 il->measurement_status |= MEASUREMENT_READY;
2666}
2667EXPORT_SYMBOL(il_hdl_spectrum_measurement);
2668
2669/*
2670 * returns non-zero if packet should be dropped
2671 */
2672int
2673il_set_decrypted_flag(struct il_priv *il, struct ieee80211_hdr *hdr,
2674 u32 decrypt_res, struct ieee80211_rx_status *stats)
2675{
2676 u16 fc = le16_to_cpu(hdr->frame_control);
2677
2678 /*
2679 * All contexts have the same setting here due to it being
2680 * a module parameter, so OK to check any context.
2681 */
2682 if (il->active.filter_flags & RXON_FILTER_DIS_DECRYPT_MSK)
2683 return 0;
2684
2685 if (!(fc & IEEE80211_FCTL_PROTECTED))
2686 return 0;
2687
2688 D_RX("decrypt_res:0x%x\n", decrypt_res);
2689 switch (decrypt_res & RX_RES_STATUS_SEC_TYPE_MSK) {
2690 case RX_RES_STATUS_SEC_TYPE_TKIP:
2691 /* The uCode has got a bad phase 1 Key, pushes the packet.
2692 * Decryption will be done in SW. */
2693 if ((decrypt_res & RX_RES_STATUS_DECRYPT_TYPE_MSK) ==
2694 RX_RES_STATUS_BAD_KEY_TTAK)
2695 break;
2696
2697 case RX_RES_STATUS_SEC_TYPE_WEP:
2698 if ((decrypt_res & RX_RES_STATUS_DECRYPT_TYPE_MSK) ==
2699 RX_RES_STATUS_BAD_ICV_MIC) {
2700 /* bad ICV, the packet is destroyed since the
2701 * decryption is inplace, drop it */
2702 D_RX("Packet destroyed\n");
2703 return -1;
2704 }
2705 case RX_RES_STATUS_SEC_TYPE_CCMP:
2706 if ((decrypt_res & RX_RES_STATUS_DECRYPT_TYPE_MSK) ==
2707 RX_RES_STATUS_DECRYPT_OK) {
2708 D_RX("hw decrypt successfully!!!\n");
2709 stats->flag |= RX_FLAG_DECRYPTED;
2710 }
2711 break;
2712
2713 default:
2714 break;
2715 }
2716 return 0;
2717}
2718EXPORT_SYMBOL(il_set_decrypted_flag);
2719
2720/**
2721 * il_txq_update_write_ptr - Send new write idx to hardware
2722 */
2723void
2724il_txq_update_write_ptr(struct il_priv *il, struct il_tx_queue *txq)
2725{
2726 u32 reg = 0;
2727 int txq_id = txq->q.id;
2728
2729 if (txq->need_update == 0)
2730 return;
2731
2732 /* if we're trying to save power */
2733 if (test_bit(S_POWER_PMI, &il->status)) {
2734 /* wake up nic if it's powered down ...
2735 * uCode will wake up, and interrupt us again, so next
2736 * time we'll skip this part. */
2737 reg = _il_rd(il, CSR_UCODE_DRV_GP1);
2738
2739 if (reg & CSR_UCODE_DRV_GP1_BIT_MAC_SLEEP) {
2740 D_INFO("Tx queue %d requesting wakeup," " GP1 = 0x%x\n",
2741 txq_id, reg);
2742 il_set_bit(il, CSR_GP_CNTRL,
2743 CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
2744 return;
2745 }
2746
2747 il_wr(il, HBUS_TARG_WRPTR, txq->q.write_ptr | (txq_id << 8));
2748
2749 /*
2750 * else not in power-save mode,
2751 * uCode will never sleep when we're
2752 * trying to tx (during RFKILL, we're not trying to tx).
2753 */
2754 } else
2755 _il_wr(il, HBUS_TARG_WRPTR, txq->q.write_ptr | (txq_id << 8));
2756 txq->need_update = 0;
2757}
2758EXPORT_SYMBOL(il_txq_update_write_ptr);
2759
2760/**
2761 * il_tx_queue_unmap - Unmap any remaining DMA mappings and free skb's
2762 */
2763void
2764il_tx_queue_unmap(struct il_priv *il, int txq_id)
2765{
2766 struct il_tx_queue *txq = &il->txq[txq_id];
2767 struct il_queue *q = &txq->q;
2768
2769 if (q->n_bd == 0)
2770 return;
2771
2772 while (q->write_ptr != q->read_ptr) {
2773 il->ops->txq_free_tfd(il, txq);
2774 q->read_ptr = il_queue_inc_wrap(q->read_ptr, q->n_bd);
2775 }
2776}
2777EXPORT_SYMBOL(il_tx_queue_unmap);
2778
2779/**
2780 * il_tx_queue_free - Deallocate DMA queue.
2781 * @txq: Transmit queue to deallocate.
2782 *
2783 * Empty queue by removing and destroying all BD's.
2784 * Free all buffers.
2785 * 0-fill, but do not free "txq" descriptor structure.
2786 */
2787void
2788il_tx_queue_free(struct il_priv *il, int txq_id)
2789{
2790 struct il_tx_queue *txq = &il->txq[txq_id];
2791 struct device *dev = &il->pci_dev->dev;
2792 int i;
2793
2794 il_tx_queue_unmap(il, txq_id);
2795
2796 /* De-alloc array of command/tx buffers */
2797 for (i = 0; i < TFD_TX_CMD_SLOTS; i++)
2798 kfree(txq->cmd[i]);
2799
2800 /* De-alloc circular buffer of TFDs */
2801 if (txq->q.n_bd)
2802 dma_free_coherent(dev, il->hw_params.tfd_size * txq->q.n_bd,
2803 txq->tfds, txq->q.dma_addr);
2804
2805 /* De-alloc array of per-TFD driver data */
2806 kfree(txq->skbs);
2807 txq->skbs = NULL;
2808
2809 /* deallocate arrays */
2810 kfree(txq->cmd);
2811 kfree(txq->meta);
2812 txq->cmd = NULL;
2813 txq->meta = NULL;
2814
2815 /* 0-fill queue descriptor structure */
2816 memset(txq, 0, sizeof(*txq));
2817}
2818EXPORT_SYMBOL(il_tx_queue_free);
2819
2820/**
2821 * il_cmd_queue_unmap - Unmap any remaining DMA mappings from command queue
2822 */
2823void
2824il_cmd_queue_unmap(struct il_priv *il)
2825{
2826 struct il_tx_queue *txq = &il->txq[il->cmd_queue];
2827 struct il_queue *q = &txq->q;
2828 int i;
2829
2830 if (q->n_bd == 0)
2831 return;
2832
2833 while (q->read_ptr != q->write_ptr) {
2834 i = il_get_cmd_idx(q, q->read_ptr, 0);
2835
2836 if (txq->meta[i].flags & CMD_MAPPED) {
2837 pci_unmap_single(il->pci_dev,
2838 dma_unmap_addr(&txq->meta[i], mapping),
2839 dma_unmap_len(&txq->meta[i], len),
2840 PCI_DMA_BIDIRECTIONAL);
2841 txq->meta[i].flags = 0;
2842 }
2843
2844 q->read_ptr = il_queue_inc_wrap(q->read_ptr, q->n_bd);
2845 }
2846
2847 i = q->n_win;
2848 if (txq->meta[i].flags & CMD_MAPPED) {
2849 pci_unmap_single(il->pci_dev,
2850 dma_unmap_addr(&txq->meta[i], mapping),
2851 dma_unmap_len(&txq->meta[i], len),
2852 PCI_DMA_BIDIRECTIONAL);
2853 txq->meta[i].flags = 0;
2854 }
2855}
2856EXPORT_SYMBOL(il_cmd_queue_unmap);
2857
2858/**
2859 * il_cmd_queue_free - Deallocate DMA queue.
2860 * @txq: Transmit queue to deallocate.
2861 *
2862 * Empty queue by removing and destroying all BD's.
2863 * Free all buffers.
2864 * 0-fill, but do not free "txq" descriptor structure.
2865 */
2866void
2867il_cmd_queue_free(struct il_priv *il)
2868{
2869 struct il_tx_queue *txq = &il->txq[il->cmd_queue];
2870 struct device *dev = &il->pci_dev->dev;
2871 int i;
2872
2873 il_cmd_queue_unmap(il);
2874
2875 /* De-alloc array of command/tx buffers */
2876 for (i = 0; i <= TFD_CMD_SLOTS; i++)
2877 kfree(txq->cmd[i]);
2878
2879 /* De-alloc circular buffer of TFDs */
2880 if (txq->q.n_bd)
2881 dma_free_coherent(dev, il->hw_params.tfd_size * txq->q.n_bd,
2882 txq->tfds, txq->q.dma_addr);
2883
2884 /* deallocate arrays */
2885 kfree(txq->cmd);
2886 kfree(txq->meta);
2887 txq->cmd = NULL;
2888 txq->meta = NULL;
2889
2890 /* 0-fill queue descriptor structure */
2891 memset(txq, 0, sizeof(*txq));
2892}
2893EXPORT_SYMBOL(il_cmd_queue_free);
2894
2895/*************** DMA-QUEUE-GENERAL-FUNCTIONS *****
2896 * DMA services
2897 *
2898 * Theory of operation
2899 *
2900 * A Tx or Rx queue resides in host DRAM, and is comprised of a circular buffer
2901 * of buffer descriptors, each of which points to one or more data buffers for
2902 * the device to read from or fill. Driver and device exchange status of each
2903 * queue via "read" and "write" pointers. Driver keeps minimum of 2 empty
2904 * entries in each circular buffer, to protect against confusing empty and full
2905 * queue states.
2906 *
2907 * The device reads or writes the data in the queues via the device's several
2908 * DMA/FIFO channels. Each queue is mapped to a single DMA channel.
2909 *
2910 * For Tx queue, there are low mark and high mark limits. If, after queuing
2911 * the packet for Tx, free space become < low mark, Tx queue stopped. When
2912 * reclaiming packets (on 'tx done IRQ), if free space become > high mark,
2913 * Tx queue resumed.
2914 *
2915 * See more detailed info in 4965.h.
2916 ***************************************************/
2917
2918int
2919il_queue_space(const struct il_queue *q)
2920{
2921 int s = q->read_ptr - q->write_ptr;
2922
2923 if (q->read_ptr > q->write_ptr)
2924 s -= q->n_bd;
2925
2926 if (s <= 0)
2927 s += q->n_win;
2928 /* keep some reserve to not confuse empty and full situations */
2929 s -= 2;
2930 if (s < 0)
2931 s = 0;
2932 return s;
2933}
2934EXPORT_SYMBOL(il_queue_space);
2935
2936
2937/**
2938 * il_queue_init - Initialize queue's high/low-water and read/write idxes
2939 */
2940static int
2941il_queue_init(struct il_priv *il, struct il_queue *q, int slots, u32 id)
2942{
2943 /*
2944 * TFD_QUEUE_SIZE_MAX must be power-of-two size, otherwise
2945 * il_queue_inc_wrap and il_queue_dec_wrap are broken.
2946 */
2947 BUILD_BUG_ON(TFD_QUEUE_SIZE_MAX & (TFD_QUEUE_SIZE_MAX - 1));
2948 /* FIXME: remove q->n_bd */
2949 q->n_bd = TFD_QUEUE_SIZE_MAX;
2950
2951 q->n_win = slots;
2952 q->id = id;
2953
2954 /* slots_must be power-of-two size, otherwise
2955 * il_get_cmd_idx is broken. */
2956 BUG_ON(!is_power_of_2(slots));
2957
2958 q->low_mark = q->n_win / 4;
2959 if (q->low_mark < 4)
2960 q->low_mark = 4;
2961
2962 q->high_mark = q->n_win / 8;
2963 if (q->high_mark < 2)
2964 q->high_mark = 2;
2965
2966 q->write_ptr = q->read_ptr = 0;
2967
2968 return 0;
2969}
2970
2971/**
2972 * il_tx_queue_alloc - Alloc driver data and TFD CB for one Tx/cmd queue
2973 */
2974static int
2975il_tx_queue_alloc(struct il_priv *il, struct il_tx_queue *txq, u32 id)
2976{
2977 struct device *dev = &il->pci_dev->dev;
2978 size_t tfd_sz = il->hw_params.tfd_size * TFD_QUEUE_SIZE_MAX;
2979
2980 /* Driver ilate data, only for Tx (not command) queues,
2981 * not shared with device. */
2982 if (id != il->cmd_queue) {
2983 txq->skbs = kcalloc(TFD_QUEUE_SIZE_MAX,
2984 sizeof(struct sk_buff *),
2985 GFP_KERNEL);
2986 if (!txq->skbs) {
2987 IL_ERR("Fail to alloc skbs\n");
2988 goto error;
2989 }
2990 } else
2991 txq->skbs = NULL;
2992
2993 /* Circular buffer of transmit frame descriptors (TFDs),
2994 * shared with device */
2995 txq->tfds =
2996 dma_alloc_coherent(dev, tfd_sz, &txq->q.dma_addr, GFP_KERNEL);
2997 if (!txq->tfds)
2998 goto error;
2999
3000 txq->q.id = id;
3001
3002 return 0;
3003
3004error:
3005 kfree(txq->skbs);
3006 txq->skbs = NULL;
3007
3008 return -ENOMEM;
3009}
3010
3011/**
3012 * il_tx_queue_init - Allocate and initialize one tx/cmd queue
3013 */
3014int
3015il_tx_queue_init(struct il_priv *il, u32 txq_id)
3016{
3017 int i, len, ret;
3018 int slots, actual_slots;
3019 struct il_tx_queue *txq = &il->txq[txq_id];
3020
3021 /*
3022 * Alloc buffer array for commands (Tx or other types of commands).
3023 * For the command queue (#4/#9), allocate command space + one big
3024 * command for scan, since scan command is very huge; the system will
3025 * not have two scans at the same time, so only one is needed.
3026 * For normal Tx queues (all other queues), no super-size command
3027 * space is needed.
3028 */
3029 if (txq_id == il->cmd_queue) {
3030 slots = TFD_CMD_SLOTS;
3031 actual_slots = slots + 1;
3032 } else {
3033 slots = TFD_TX_CMD_SLOTS;
3034 actual_slots = slots;
3035 }
3036
3037 txq->meta =
3038 kzalloc(sizeof(struct il_cmd_meta) * actual_slots, GFP_KERNEL);
3039 txq->cmd =
3040 kzalloc(sizeof(struct il_device_cmd *) * actual_slots, GFP_KERNEL);
3041
3042 if (!txq->meta || !txq->cmd)
3043 goto out_free_arrays;
3044
3045 len = sizeof(struct il_device_cmd);
3046 for (i = 0; i < actual_slots; i++) {
3047 /* only happens for cmd queue */
3048 if (i == slots)
3049 len = IL_MAX_CMD_SIZE;
3050
3051 txq->cmd[i] = kmalloc(len, GFP_KERNEL);
3052 if (!txq->cmd[i])
3053 goto err;
3054 }
3055
3056 /* Alloc driver data array and TFD circular buffer */
3057 ret = il_tx_queue_alloc(il, txq, txq_id);
3058 if (ret)
3059 goto err;
3060
3061 txq->need_update = 0;
3062
3063 /*
3064 * For the default queues 0-3, set up the swq_id
3065 * already -- all others need to get one later
3066 * (if they need one at all).
3067 */
3068 if (txq_id < 4)
3069 il_set_swq_id(txq, txq_id, txq_id);
3070
3071 /* Initialize queue's high/low-water marks, and head/tail idxes */
3072 il_queue_init(il, &txq->q, slots, txq_id);
3073
3074 /* Tell device where to find queue */
3075 il->ops->txq_init(il, txq);
3076
3077 return 0;
3078err:
3079 for (i = 0; i < actual_slots; i++)
3080 kfree(txq->cmd[i]);
3081out_free_arrays:
3082 kfree(txq->meta);
3083 kfree(txq->cmd);
3084
3085 return -ENOMEM;
3086}
3087EXPORT_SYMBOL(il_tx_queue_init);
3088
3089void
3090il_tx_queue_reset(struct il_priv *il, u32 txq_id)
3091{
3092 int slots, actual_slots;
3093 struct il_tx_queue *txq = &il->txq[txq_id];
3094
3095 if (txq_id == il->cmd_queue) {
3096 slots = TFD_CMD_SLOTS;
3097 actual_slots = TFD_CMD_SLOTS + 1;
3098 } else {
3099 slots = TFD_TX_CMD_SLOTS;
3100 actual_slots = TFD_TX_CMD_SLOTS;
3101 }
3102
3103 memset(txq->meta, 0, sizeof(struct il_cmd_meta) * actual_slots);
3104 txq->need_update = 0;
3105
3106 /* Initialize queue's high/low-water marks, and head/tail idxes */
3107 il_queue_init(il, &txq->q, slots, txq_id);
3108
3109 /* Tell device where to find queue */
3110 il->ops->txq_init(il, txq);
3111}
3112EXPORT_SYMBOL(il_tx_queue_reset);
3113
3114/*************** HOST COMMAND QUEUE FUNCTIONS *****/
3115
3116/**
3117 * il_enqueue_hcmd - enqueue a uCode command
3118 * @il: device ilate data point
3119 * @cmd: a point to the ucode command structure
3120 *
3121 * The function returns < 0 values to indicate the operation is
3122 * failed. On success, it turns the idx (> 0) of command in the
3123 * command queue.
3124 */
3125int
3126il_enqueue_hcmd(struct il_priv *il, struct il_host_cmd *cmd)
3127{
3128 struct il_tx_queue *txq = &il->txq[il->cmd_queue];
3129 struct il_queue *q = &txq->q;
3130 struct il_device_cmd *out_cmd;
3131 struct il_cmd_meta *out_meta;
3132 dma_addr_t phys_addr;
3133 unsigned long flags;
3134 int len;
3135 u32 idx;
3136 u16 fix_size;
3137
3138 cmd->len = il->ops->get_hcmd_size(cmd->id, cmd->len);
3139 fix_size = (u16) (cmd->len + sizeof(out_cmd->hdr));
3140
3141 /* If any of the command structures end up being larger than
3142 * the TFD_MAX_PAYLOAD_SIZE, and it sent as a 'small' command then
3143 * we will need to increase the size of the TFD entries
3144 * Also, check to see if command buffer should not exceed the size
3145 * of device_cmd and max_cmd_size. */
3146 BUG_ON((fix_size > TFD_MAX_PAYLOAD_SIZE) &&
3147 !(cmd->flags & CMD_SIZE_HUGE));
3148 BUG_ON(fix_size > IL_MAX_CMD_SIZE);
3149
3150 if (il_is_rfkill(il) || il_is_ctkill(il)) {
3151 IL_WARN("Not sending command - %s KILL\n",
3152 il_is_rfkill(il) ? "RF" : "CT");
3153 return -EIO;
3154 }
3155
3156 spin_lock_irqsave(&il->hcmd_lock, flags);
3157
3158 if (il_queue_space(q) < ((cmd->flags & CMD_ASYNC) ? 2 : 1)) {
3159 spin_unlock_irqrestore(&il->hcmd_lock, flags);
3160
3161 IL_ERR("Restarting adapter due to command queue full\n");
3162 queue_work(il->workqueue, &il->restart);
3163 return -ENOSPC;
3164 }
3165
3166 idx = il_get_cmd_idx(q, q->write_ptr, cmd->flags & CMD_SIZE_HUGE);
3167 out_cmd = txq->cmd[idx];
3168 out_meta = &txq->meta[idx];
3169
3170 if (WARN_ON(out_meta->flags & CMD_MAPPED)) {
3171 spin_unlock_irqrestore(&il->hcmd_lock, flags);
3172 return -ENOSPC;
3173 }
3174
3175 memset(out_meta, 0, sizeof(*out_meta)); /* re-initialize to NULL */
3176 out_meta->flags = cmd->flags | CMD_MAPPED;
3177 if (cmd->flags & CMD_WANT_SKB)
3178 out_meta->source = cmd;
3179 if (cmd->flags & CMD_ASYNC)
3180 out_meta->callback = cmd->callback;
3181
3182 out_cmd->hdr.cmd = cmd->id;
3183 memcpy(&out_cmd->cmd.payload, cmd->data, cmd->len);
3184
3185 /* At this point, the out_cmd now has all of the incoming cmd
3186 * information */
3187
3188 out_cmd->hdr.flags = 0;
3189 out_cmd->hdr.sequence =
3190 cpu_to_le16(QUEUE_TO_SEQ(il->cmd_queue) | IDX_TO_SEQ(q->write_ptr));
3191 if (cmd->flags & CMD_SIZE_HUGE)
3192 out_cmd->hdr.sequence |= SEQ_HUGE_FRAME;
3193 len = sizeof(struct il_device_cmd);
3194 if (idx == TFD_CMD_SLOTS)
3195 len = IL_MAX_CMD_SIZE;
3196
3197#ifdef CONFIG_IWLEGACY_DEBUG
3198 switch (out_cmd->hdr.cmd) {
3199 case C_TX_LINK_QUALITY_CMD:
3200 case C_SENSITIVITY:
3201 D_HC_DUMP("Sending command %s (#%x), seq: 0x%04X, "
3202 "%d bytes at %d[%d]:%d\n",
3203 il_get_cmd_string(out_cmd->hdr.cmd), out_cmd->hdr.cmd,
3204 le16_to_cpu(out_cmd->hdr.sequence), fix_size,
3205 q->write_ptr, idx, il->cmd_queue);
3206 break;
3207 default:
3208 D_HC("Sending command %s (#%x), seq: 0x%04X, "
3209 "%d bytes at %d[%d]:%d\n",
3210 il_get_cmd_string(out_cmd->hdr.cmd), out_cmd->hdr.cmd,
3211 le16_to_cpu(out_cmd->hdr.sequence), fix_size, q->write_ptr,
3212 idx, il->cmd_queue);
3213 }
3214#endif
3215
3216 phys_addr =
3217 pci_map_single(il->pci_dev, &out_cmd->hdr, fix_size,
3218 PCI_DMA_BIDIRECTIONAL);
3219 if (unlikely(pci_dma_mapping_error(il->pci_dev, phys_addr))) {
3220 idx = -ENOMEM;
3221 goto out;
3222 }
3223 dma_unmap_addr_set(out_meta, mapping, phys_addr);
3224 dma_unmap_len_set(out_meta, len, fix_size);
3225
3226 txq->need_update = 1;
3227
3228 if (il->ops->txq_update_byte_cnt_tbl)
3229 /* Set up entry in queue's byte count circular buffer */
3230 il->ops->txq_update_byte_cnt_tbl(il, txq, 0);
3231
3232 il->ops->txq_attach_buf_to_tfd(il, txq, phys_addr, fix_size, 1,
3233 U32_PAD(cmd->len));
3234
3235 /* Increment and update queue's write idx */
3236 q->write_ptr = il_queue_inc_wrap(q->write_ptr, q->n_bd);
3237 il_txq_update_write_ptr(il, txq);
3238
3239out:
3240 spin_unlock_irqrestore(&il->hcmd_lock, flags);
3241 return idx;
3242}
3243
3244/**
3245 * il_hcmd_queue_reclaim - Reclaim TX command queue entries already Tx'd
3246 *
3247 * When FW advances 'R' idx, all entries between old and new 'R' idx
3248 * need to be reclaimed. As result, some free space forms. If there is
3249 * enough free space (> low mark), wake the stack that feeds us.
3250 */
3251static void
3252il_hcmd_queue_reclaim(struct il_priv *il, int txq_id, int idx, int cmd_idx)
3253{
3254 struct il_tx_queue *txq = &il->txq[txq_id];
3255 struct il_queue *q = &txq->q;
3256 int nfreed = 0;
3257
3258 if (idx >= q->n_bd || il_queue_used(q, idx) == 0) {
3259 IL_ERR("Read idx for DMA queue txq id (%d), idx %d, "
3260 "is out of range [0-%d] %d %d.\n", txq_id, idx, q->n_bd,
3261 q->write_ptr, q->read_ptr);
3262 return;
3263 }
3264
3265 for (idx = il_queue_inc_wrap(idx, q->n_bd); q->read_ptr != idx;
3266 q->read_ptr = il_queue_inc_wrap(q->read_ptr, q->n_bd)) {
3267
3268 if (nfreed++ > 0) {
3269 IL_ERR("HCMD skipped: idx (%d) %d %d\n", idx,
3270 q->write_ptr, q->read_ptr);
3271 queue_work(il->workqueue, &il->restart);
3272 }
3273
3274 }
3275}
3276
3277/**
3278 * il_tx_cmd_complete - Pull unused buffers off the queue and reclaim them
3279 * @rxb: Rx buffer to reclaim
3280 *
3281 * If an Rx buffer has an async callback associated with it the callback
3282 * will be executed. The attached skb (if present) will only be freed
3283 * if the callback returns 1
3284 */
3285void
3286il_tx_cmd_complete(struct il_priv *il, struct il_rx_buf *rxb)
3287{
3288 struct il_rx_pkt *pkt = rxb_addr(rxb);
3289 u16 sequence = le16_to_cpu(pkt->hdr.sequence);
3290 int txq_id = SEQ_TO_QUEUE(sequence);
3291 int idx = SEQ_TO_IDX(sequence);
3292 int cmd_idx;
3293 bool huge = !!(pkt->hdr.sequence & SEQ_HUGE_FRAME);
3294 struct il_device_cmd *cmd;
3295 struct il_cmd_meta *meta;
3296 struct il_tx_queue *txq = &il->txq[il->cmd_queue];
3297 unsigned long flags;
3298
3299 /* If a Tx command is being handled and it isn't in the actual
3300 * command queue then there a command routing bug has been introduced
3301 * in the queue management code. */
3302 if (WARN
3303 (txq_id != il->cmd_queue,
3304 "wrong command queue %d (should be %d), sequence 0x%X readp=%d writep=%d\n",
3305 txq_id, il->cmd_queue, sequence, il->txq[il->cmd_queue].q.read_ptr,
3306 il->txq[il->cmd_queue].q.write_ptr)) {
3307 il_print_hex_error(il, pkt, 32);
3308 return;
3309 }
3310
3311 cmd_idx = il_get_cmd_idx(&txq->q, idx, huge);
3312 cmd = txq->cmd[cmd_idx];
3313 meta = &txq->meta[cmd_idx];
3314
3315 txq->time_stamp = jiffies;
3316
3317 pci_unmap_single(il->pci_dev, dma_unmap_addr(meta, mapping),
3318 dma_unmap_len(meta, len), PCI_DMA_BIDIRECTIONAL);
3319
3320 /* Input error checking is done when commands are added to queue. */
3321 if (meta->flags & CMD_WANT_SKB) {
3322 meta->source->reply_page = (unsigned long)rxb_addr(rxb);
3323 rxb->page = NULL;
3324 } else if (meta->callback)
3325 meta->callback(il, cmd, pkt);
3326
3327 spin_lock_irqsave(&il->hcmd_lock, flags);
3328
3329 il_hcmd_queue_reclaim(il, txq_id, idx, cmd_idx);
3330
3331 if (!(meta->flags & CMD_ASYNC)) {
3332 clear_bit(S_HCMD_ACTIVE, &il->status);
3333 D_INFO("Clearing HCMD_ACTIVE for command %s\n",
3334 il_get_cmd_string(cmd->hdr.cmd));
3335 wake_up(&il->wait_command_queue);
3336 }
3337
3338 /* Mark as unmapped */
3339 meta->flags = 0;
3340
3341 spin_unlock_irqrestore(&il->hcmd_lock, flags);
3342}
3343EXPORT_SYMBOL(il_tx_cmd_complete);
3344
3345MODULE_DESCRIPTION("iwl-legacy: common functions for 3945 and 4965");
3346MODULE_VERSION(IWLWIFI_VERSION);
3347MODULE_AUTHOR(DRV_COPYRIGHT " " DRV_AUTHOR);
3348MODULE_LICENSE("GPL");
3349
3350/*
3351 * set bt_coex_active to true, uCode will do kill/defer
3352 * every time the priority line is asserted (BT is sending signals on the
3353 * priority line in the PCIx).
3354 * set bt_coex_active to false, uCode will ignore the BT activity and
3355 * perform the normal operation
3356 *
3357 * User might experience transmit issue on some platform due to WiFi/BT
3358 * co-exist problem. The possible behaviors are:
3359 * Able to scan and finding all the available AP
3360 * Not able to associate with any AP
3361 * On those platforms, WiFi communication can be restored by set
3362 * "bt_coex_active" module parameter to "false"
3363 *
3364 * default: bt_coex_active = true (BT_COEX_ENABLE)
3365 */
3366static bool bt_coex_active = true;
3367module_param(bt_coex_active, bool, S_IRUGO);
3368MODULE_PARM_DESC(bt_coex_active, "enable wifi/bluetooth co-exist");
3369
3370u32 il_debug_level;
3371EXPORT_SYMBOL(il_debug_level);
3372
3373const u8 il_bcast_addr[ETH_ALEN] = { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF };
3374EXPORT_SYMBOL(il_bcast_addr);
3375
3376#define MAX_BIT_RATE_40_MHZ 150 /* Mbps */
3377#define MAX_BIT_RATE_20_MHZ 72 /* Mbps */
3378static void
3379il_init_ht_hw_capab(const struct il_priv *il,
3380 struct ieee80211_sta_ht_cap *ht_info,
3381 enum ieee80211_band band)
3382{
3383 u16 max_bit_rate = 0;
3384 u8 rx_chains_num = il->hw_params.rx_chains_num;
3385 u8 tx_chains_num = il->hw_params.tx_chains_num;
3386
3387 ht_info->cap = 0;
3388 memset(&ht_info->mcs, 0, sizeof(ht_info->mcs));
3389
3390 ht_info->ht_supported = true;
3391
3392 ht_info->cap |= IEEE80211_HT_CAP_SGI_20;
3393 max_bit_rate = MAX_BIT_RATE_20_MHZ;
3394 if (il->hw_params.ht40_channel & BIT(band)) {
3395 ht_info->cap |= IEEE80211_HT_CAP_SUP_WIDTH_20_40;
3396 ht_info->cap |= IEEE80211_HT_CAP_SGI_40;
3397 ht_info->mcs.rx_mask[4] = 0x01;
3398 max_bit_rate = MAX_BIT_RATE_40_MHZ;
3399 }
3400
3401 if (il->cfg->mod_params->amsdu_size_8K)
3402 ht_info->cap |= IEEE80211_HT_CAP_MAX_AMSDU;
3403
3404 ht_info->ampdu_factor = CFG_HT_RX_AMPDU_FACTOR_DEF;
3405 ht_info->ampdu_density = CFG_HT_MPDU_DENSITY_DEF;
3406
3407 ht_info->mcs.rx_mask[0] = 0xFF;
3408 if (rx_chains_num >= 2)
3409 ht_info->mcs.rx_mask[1] = 0xFF;
3410 if (rx_chains_num >= 3)
3411 ht_info->mcs.rx_mask[2] = 0xFF;
3412
3413 /* Highest supported Rx data rate */
3414 max_bit_rate *= rx_chains_num;
3415 WARN_ON(max_bit_rate & ~IEEE80211_HT_MCS_RX_HIGHEST_MASK);
3416 ht_info->mcs.rx_highest = cpu_to_le16(max_bit_rate);
3417
3418 /* Tx MCS capabilities */
3419 ht_info->mcs.tx_params = IEEE80211_HT_MCS_TX_DEFINED;
3420 if (tx_chains_num != rx_chains_num) {
3421 ht_info->mcs.tx_params |= IEEE80211_HT_MCS_TX_RX_DIFF;
3422 ht_info->mcs.tx_params |=
3423 ((tx_chains_num -
3424 1) << IEEE80211_HT_MCS_TX_MAX_STREAMS_SHIFT);
3425 }
3426}
3427
3428/**
3429 * il_init_geos - Initialize mac80211's geo/channel info based from eeprom
3430 */
3431int
3432il_init_geos(struct il_priv *il)
3433{
3434 struct il_channel_info *ch;
3435 struct ieee80211_supported_band *sband;
3436 struct ieee80211_channel *channels;
3437 struct ieee80211_channel *geo_ch;
3438 struct ieee80211_rate *rates;
3439 int i = 0;
3440 s8 max_tx_power = 0;
3441
3442 if (il->bands[IEEE80211_BAND_2GHZ].n_bitrates ||
3443 il->bands[IEEE80211_BAND_5GHZ].n_bitrates) {
3444 D_INFO("Geography modes already initialized.\n");
3445 set_bit(S_GEO_CONFIGURED, &il->status);
3446 return 0;
3447 }
3448
3449 channels =
3450 kzalloc(sizeof(struct ieee80211_channel) * il->channel_count,
3451 GFP_KERNEL);
3452 if (!channels)
3453 return -ENOMEM;
3454
3455 rates =
3456 kzalloc((sizeof(struct ieee80211_rate) * RATE_COUNT_LEGACY),
3457 GFP_KERNEL);
3458 if (!rates) {
3459 kfree(channels);
3460 return -ENOMEM;
3461 }
3462
3463 /* 5.2GHz channels start after the 2.4GHz channels */
3464 sband = &il->bands[IEEE80211_BAND_5GHZ];
3465 sband->channels = &channels[ARRAY_SIZE(il_eeprom_band_1)];
3466 /* just OFDM */
3467 sband->bitrates = &rates[IL_FIRST_OFDM_RATE];
3468 sband->n_bitrates = RATE_COUNT_LEGACY - IL_FIRST_OFDM_RATE;
3469
3470 if (il->cfg->sku & IL_SKU_N)
3471 il_init_ht_hw_capab(il, &sband->ht_cap, IEEE80211_BAND_5GHZ);
3472
3473 sband = &il->bands[IEEE80211_BAND_2GHZ];
3474 sband->channels = channels;
3475 /* OFDM & CCK */
3476 sband->bitrates = rates;
3477 sband->n_bitrates = RATE_COUNT_LEGACY;
3478
3479 if (il->cfg->sku & IL_SKU_N)
3480 il_init_ht_hw_capab(il, &sband->ht_cap, IEEE80211_BAND_2GHZ);
3481
3482 il->ieee_channels = channels;
3483 il->ieee_rates = rates;
3484
3485 for (i = 0; i < il->channel_count; i++) {
3486 ch = &il->channel_info[i];
3487
3488 if (!il_is_channel_valid(ch))
3489 continue;
3490
3491 sband = &il->bands[ch->band];
3492
3493 geo_ch = &sband->channels[sband->n_channels++];
3494
3495 geo_ch->center_freq =
3496 ieee80211_channel_to_frequency(ch->channel, ch->band);
3497 geo_ch->max_power = ch->max_power_avg;
3498 geo_ch->max_antenna_gain = 0xff;
3499 geo_ch->hw_value = ch->channel;
3500
3501 if (il_is_channel_valid(ch)) {
3502 if (!(ch->flags & EEPROM_CHANNEL_IBSS))
3503 geo_ch->flags |= IEEE80211_CHAN_NO_IR;
3504
3505 if (!(ch->flags & EEPROM_CHANNEL_ACTIVE))
3506 geo_ch->flags |= IEEE80211_CHAN_NO_IR;
3507
3508 if (ch->flags & EEPROM_CHANNEL_RADAR)
3509 geo_ch->flags |= IEEE80211_CHAN_RADAR;
3510
3511 geo_ch->flags |= ch->ht40_extension_channel;
3512
3513 if (ch->max_power_avg > max_tx_power)
3514 max_tx_power = ch->max_power_avg;
3515 } else {
3516 geo_ch->flags |= IEEE80211_CHAN_DISABLED;
3517 }
3518
3519 D_INFO("Channel %d Freq=%d[%sGHz] %s flag=0x%X\n", ch->channel,
3520 geo_ch->center_freq,
3521 il_is_channel_a_band(ch) ? "5.2" : "2.4",
3522 geo_ch->
3523 flags & IEEE80211_CHAN_DISABLED ? "restricted" : "valid",
3524 geo_ch->flags);
3525 }
3526
3527 il->tx_power_device_lmt = max_tx_power;
3528 il->tx_power_user_lmt = max_tx_power;
3529 il->tx_power_next = max_tx_power;
3530
3531 if (il->bands[IEEE80211_BAND_5GHZ].n_channels == 0 &&
3532 (il->cfg->sku & IL_SKU_A)) {
3533 IL_INFO("Incorrectly detected BG card as ABG. "
3534 "Please send your PCI ID 0x%04X:0x%04X to maintainer.\n",
3535 il->pci_dev->device, il->pci_dev->subsystem_device);
3536 il->cfg->sku &= ~IL_SKU_A;
3537 }
3538
3539 IL_INFO("Tunable channels: %d 802.11bg, %d 802.11a channels\n",
3540 il->bands[IEEE80211_BAND_2GHZ].n_channels,
3541 il->bands[IEEE80211_BAND_5GHZ].n_channels);
3542
3543 set_bit(S_GEO_CONFIGURED, &il->status);
3544
3545 return 0;
3546}
3547EXPORT_SYMBOL(il_init_geos);
3548
3549/*
3550 * il_free_geos - undo allocations in il_init_geos
3551 */
3552void
3553il_free_geos(struct il_priv *il)
3554{
3555 kfree(il->ieee_channels);
3556 kfree(il->ieee_rates);
3557 clear_bit(S_GEO_CONFIGURED, &il->status);
3558}
3559EXPORT_SYMBOL(il_free_geos);
3560
3561static bool
3562il_is_channel_extension(struct il_priv *il, enum ieee80211_band band,
3563 u16 channel, u8 extension_chan_offset)
3564{
3565 const struct il_channel_info *ch_info;
3566
3567 ch_info = il_get_channel_info(il, band, channel);
3568 if (!il_is_channel_valid(ch_info))
3569 return false;
3570
3571 if (extension_chan_offset == IEEE80211_HT_PARAM_CHA_SEC_ABOVE)
3572 return !(ch_info->
3573 ht40_extension_channel & IEEE80211_CHAN_NO_HT40PLUS);
3574 else if (extension_chan_offset == IEEE80211_HT_PARAM_CHA_SEC_BELOW)
3575 return !(ch_info->
3576 ht40_extension_channel & IEEE80211_CHAN_NO_HT40MINUS);
3577
3578 return false;
3579}
3580
3581bool
3582il_is_ht40_tx_allowed(struct il_priv *il, struct ieee80211_sta_ht_cap *ht_cap)
3583{
3584 if (!il->ht.enabled || !il->ht.is_40mhz)
3585 return false;
3586
3587 /*
3588 * We do not check for IEEE80211_HT_CAP_SUP_WIDTH_20_40
3589 * the bit will not set if it is pure 40MHz case
3590 */
3591 if (ht_cap && !ht_cap->ht_supported)
3592 return false;
3593
3594#ifdef CONFIG_IWLEGACY_DEBUGFS
3595 if (il->disable_ht40)
3596 return false;
3597#endif
3598
3599 return il_is_channel_extension(il, il->band,
3600 le16_to_cpu(il->staging.channel),
3601 il->ht.extension_chan_offset);
3602}
3603EXPORT_SYMBOL(il_is_ht40_tx_allowed);
3604
3605static u16
3606il_adjust_beacon_interval(u16 beacon_val, u16 max_beacon_val)
3607{
3608 u16 new_val;
3609 u16 beacon_factor;
3610
3611 /*
3612 * If mac80211 hasn't given us a beacon interval, program
3613 * the default into the device.
3614 */
3615 if (!beacon_val)
3616 return DEFAULT_BEACON_INTERVAL;
3617
3618 /*
3619 * If the beacon interval we obtained from the peer
3620 * is too large, we'll have to wake up more often
3621 * (and in IBSS case, we'll beacon too much)
3622 *
3623 * For example, if max_beacon_val is 4096, and the
3624 * requested beacon interval is 7000, we'll have to
3625 * use 3500 to be able to wake up on the beacons.
3626 *
3627 * This could badly influence beacon detection stats.
3628 */
3629
3630 beacon_factor = (beacon_val + max_beacon_val) / max_beacon_val;
3631 new_val = beacon_val / beacon_factor;
3632
3633 if (!new_val)
3634 new_val = max_beacon_val;
3635
3636 return new_val;
3637}
3638
3639int
3640il_send_rxon_timing(struct il_priv *il)
3641{
3642 u64 tsf;
3643 s32 interval_tm, rem;
3644 struct ieee80211_conf *conf = NULL;
3645 u16 beacon_int;
3646 struct ieee80211_vif *vif = il->vif;
3647
3648 conf = &il->hw->conf;
3649
3650 lockdep_assert_held(&il->mutex);
3651
3652 memset(&il->timing, 0, sizeof(struct il_rxon_time_cmd));
3653
3654 il->timing.timestamp = cpu_to_le64(il->timestamp);
3655 il->timing.listen_interval = cpu_to_le16(conf->listen_interval);
3656
3657 beacon_int = vif ? vif->bss_conf.beacon_int : 0;
3658
3659 /*
3660 * TODO: For IBSS we need to get atim_win from mac80211,
3661 * for now just always use 0
3662 */
3663 il->timing.atim_win = 0;
3664
3665 beacon_int =
3666 il_adjust_beacon_interval(beacon_int,
3667 il->hw_params.max_beacon_itrvl *
3668 TIME_UNIT);
3669 il->timing.beacon_interval = cpu_to_le16(beacon_int);
3670
3671 tsf = il->timestamp; /* tsf is modifed by do_div: copy it */
3672 interval_tm = beacon_int * TIME_UNIT;
3673 rem = do_div(tsf, interval_tm);
3674 il->timing.beacon_init_val = cpu_to_le32(interval_tm - rem);
3675
3676 il->timing.dtim_period = vif ? (vif->bss_conf.dtim_period ? : 1) : 1;
3677
3678 D_ASSOC("beacon interval %d beacon timer %d beacon tim %d\n",
3679 le16_to_cpu(il->timing.beacon_interval),
3680 le32_to_cpu(il->timing.beacon_init_val),
3681 le16_to_cpu(il->timing.atim_win));
3682
3683 return il_send_cmd_pdu(il, C_RXON_TIMING, sizeof(il->timing),
3684 &il->timing);
3685}
3686EXPORT_SYMBOL(il_send_rxon_timing);
3687
3688void
3689il_set_rxon_hwcrypto(struct il_priv *il, int hw_decrypt)
3690{
3691 struct il_rxon_cmd *rxon = &il->staging;
3692
3693 if (hw_decrypt)
3694 rxon->filter_flags &= ~RXON_FILTER_DIS_DECRYPT_MSK;
3695 else
3696 rxon->filter_flags |= RXON_FILTER_DIS_DECRYPT_MSK;
3697
3698}
3699EXPORT_SYMBOL(il_set_rxon_hwcrypto);
3700
3701/* validate RXON structure is valid */
3702int
3703il_check_rxon_cmd(struct il_priv *il)
3704{
3705 struct il_rxon_cmd *rxon = &il->staging;
3706 bool error = false;
3707
3708 if (rxon->flags & RXON_FLG_BAND_24G_MSK) {
3709 if (rxon->flags & RXON_FLG_TGJ_NARROW_BAND_MSK) {
3710 IL_WARN("check 2.4G: wrong narrow\n");
3711 error = true;
3712 }
3713 if (rxon->flags & RXON_FLG_RADAR_DETECT_MSK) {
3714 IL_WARN("check 2.4G: wrong radar\n");
3715 error = true;
3716 }
3717 } else {
3718 if (!(rxon->flags & RXON_FLG_SHORT_SLOT_MSK)) {
3719 IL_WARN("check 5.2G: not short slot!\n");
3720 error = true;
3721 }
3722 if (rxon->flags & RXON_FLG_CCK_MSK) {
3723 IL_WARN("check 5.2G: CCK!\n");
3724 error = true;
3725 }
3726 }
3727 if ((rxon->node_addr[0] | rxon->bssid_addr[0]) & 0x1) {
3728 IL_WARN("mac/bssid mcast!\n");
3729 error = true;
3730 }
3731
3732 /* make sure basic rates 6Mbps and 1Mbps are supported */
3733 if ((rxon->ofdm_basic_rates & RATE_6M_MASK) == 0 &&
3734 (rxon->cck_basic_rates & RATE_1M_MASK) == 0) {
3735 IL_WARN("neither 1 nor 6 are basic\n");
3736 error = true;
3737 }
3738
3739 if (le16_to_cpu(rxon->assoc_id) > 2007) {
3740 IL_WARN("aid > 2007\n");
3741 error = true;
3742 }
3743
3744 if ((rxon->flags & (RXON_FLG_CCK_MSK | RXON_FLG_SHORT_SLOT_MSK)) ==
3745 (RXON_FLG_CCK_MSK | RXON_FLG_SHORT_SLOT_MSK)) {
3746 IL_WARN("CCK and short slot\n");
3747 error = true;
3748 }
3749
3750 if ((rxon->flags & (RXON_FLG_CCK_MSK | RXON_FLG_AUTO_DETECT_MSK)) ==
3751 (RXON_FLG_CCK_MSK | RXON_FLG_AUTO_DETECT_MSK)) {
3752 IL_WARN("CCK and auto detect");
3753 error = true;
3754 }
3755
3756 if ((rxon->
3757 flags & (RXON_FLG_AUTO_DETECT_MSK | RXON_FLG_TGG_PROTECT_MSK)) ==
3758 RXON_FLG_TGG_PROTECT_MSK) {
3759 IL_WARN("TGg but no auto-detect\n");
3760 error = true;
3761 }
3762
3763 if (error)
3764 IL_WARN("Tuning to channel %d\n", le16_to_cpu(rxon->channel));
3765
3766 if (error) {
3767 IL_ERR("Invalid RXON\n");
3768 return -EINVAL;
3769 }
3770 return 0;
3771}
3772EXPORT_SYMBOL(il_check_rxon_cmd);
3773
3774/**
3775 * il_full_rxon_required - check if full RXON (vs RXON_ASSOC) cmd is needed
3776 * @il: staging_rxon is compared to active_rxon
3777 *
3778 * If the RXON structure is changing enough to require a new tune,
3779 * or is clearing the RXON_FILTER_ASSOC_MSK, then return 1 to indicate that
3780 * a new tune (full RXON command, rather than RXON_ASSOC cmd) is required.
3781 */
3782int
3783il_full_rxon_required(struct il_priv *il)
3784{
3785 const struct il_rxon_cmd *staging = &il->staging;
3786 const struct il_rxon_cmd *active = &il->active;
3787
3788#define CHK(cond) \
3789 if ((cond)) { \
3790 D_INFO("need full RXON - " #cond "\n"); \
3791 return 1; \
3792 }
3793
3794#define CHK_NEQ(c1, c2) \
3795 if ((c1) != (c2)) { \
3796 D_INFO("need full RXON - " \
3797 #c1 " != " #c2 " - %d != %d\n", \
3798 (c1), (c2)); \
3799 return 1; \
3800 }
3801
3802 /* These items are only settable from the full RXON command */
3803 CHK(!il_is_associated(il));
3804 CHK(!ether_addr_equal_64bits(staging->bssid_addr, active->bssid_addr));
3805 CHK(!ether_addr_equal_64bits(staging->node_addr, active->node_addr));
3806 CHK(!ether_addr_equal_64bits(staging->wlap_bssid_addr,
3807 active->wlap_bssid_addr));
3808 CHK_NEQ(staging->dev_type, active->dev_type);
3809 CHK_NEQ(staging->channel, active->channel);
3810 CHK_NEQ(staging->air_propagation, active->air_propagation);
3811 CHK_NEQ(staging->ofdm_ht_single_stream_basic_rates,
3812 active->ofdm_ht_single_stream_basic_rates);
3813 CHK_NEQ(staging->ofdm_ht_dual_stream_basic_rates,
3814 active->ofdm_ht_dual_stream_basic_rates);
3815 CHK_NEQ(staging->assoc_id, active->assoc_id);
3816
3817 /* flags, filter_flags, ofdm_basic_rates, and cck_basic_rates can
3818 * be updated with the RXON_ASSOC command -- however only some
3819 * flag transitions are allowed using RXON_ASSOC */
3820
3821 /* Check if we are not switching bands */
3822 CHK_NEQ(staging->flags & RXON_FLG_BAND_24G_MSK,
3823 active->flags & RXON_FLG_BAND_24G_MSK);
3824
3825 /* Check if we are switching association toggle */
3826 CHK_NEQ(staging->filter_flags & RXON_FILTER_ASSOC_MSK,
3827 active->filter_flags & RXON_FILTER_ASSOC_MSK);
3828
3829#undef CHK
3830#undef CHK_NEQ
3831
3832 return 0;
3833}
3834EXPORT_SYMBOL(il_full_rxon_required);
3835
3836u8
3837il_get_lowest_plcp(struct il_priv *il)
3838{
3839 /*
3840 * Assign the lowest rate -- should really get this from
3841 * the beacon skb from mac80211.
3842 */
3843 if (il->staging.flags & RXON_FLG_BAND_24G_MSK)
3844 return RATE_1M_PLCP;
3845 else
3846 return RATE_6M_PLCP;
3847}
3848EXPORT_SYMBOL(il_get_lowest_plcp);
3849
3850static void
3851_il_set_rxon_ht(struct il_priv *il, struct il_ht_config *ht_conf)
3852{
3853 struct il_rxon_cmd *rxon = &il->staging;
3854
3855 if (!il->ht.enabled) {
3856 rxon->flags &=
3857 ~(RXON_FLG_CHANNEL_MODE_MSK |
3858 RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK | RXON_FLG_HT40_PROT_MSK
3859 | RXON_FLG_HT_PROT_MSK);
3860 return;
3861 }
3862
3863 rxon->flags |=
3864 cpu_to_le32(il->ht.protection << RXON_FLG_HT_OPERATING_MODE_POS);
3865
3866 /* Set up channel bandwidth:
3867 * 20 MHz only, 20/40 mixed or pure 40 if ht40 ok */
3868 /* clear the HT channel mode before set the mode */
3869 rxon->flags &=
3870 ~(RXON_FLG_CHANNEL_MODE_MSK | RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK);
3871 if (il_is_ht40_tx_allowed(il, NULL)) {
3872 /* pure ht40 */
3873 if (il->ht.protection == IEEE80211_HT_OP_MODE_PROTECTION_20MHZ) {
3874 rxon->flags |= RXON_FLG_CHANNEL_MODE_PURE_40;
3875 /* Note: control channel is opposite of extension channel */
3876 switch (il->ht.extension_chan_offset) {
3877 case IEEE80211_HT_PARAM_CHA_SEC_ABOVE:
3878 rxon->flags &=
3879 ~RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK;
3880 break;
3881 case IEEE80211_HT_PARAM_CHA_SEC_BELOW:
3882 rxon->flags |= RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK;
3883 break;
3884 }
3885 } else {
3886 /* Note: control channel is opposite of extension channel */
3887 switch (il->ht.extension_chan_offset) {
3888 case IEEE80211_HT_PARAM_CHA_SEC_ABOVE:
3889 rxon->flags &=
3890 ~(RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK);
3891 rxon->flags |= RXON_FLG_CHANNEL_MODE_MIXED;
3892 break;
3893 case IEEE80211_HT_PARAM_CHA_SEC_BELOW:
3894 rxon->flags |= RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK;
3895 rxon->flags |= RXON_FLG_CHANNEL_MODE_MIXED;
3896 break;
3897 case IEEE80211_HT_PARAM_CHA_SEC_NONE:
3898 default:
3899 /* channel location only valid if in Mixed mode */
3900 IL_ERR("invalid extension channel offset\n");
3901 break;
3902 }
3903 }
3904 } else {
3905 rxon->flags |= RXON_FLG_CHANNEL_MODE_LEGACY;
3906 }
3907
3908 if (il->ops->set_rxon_chain)
3909 il->ops->set_rxon_chain(il);
3910
3911 D_ASSOC("rxon flags 0x%X operation mode :0x%X "
3912 "extension channel offset 0x%x\n", le32_to_cpu(rxon->flags),
3913 il->ht.protection, il->ht.extension_chan_offset);
3914}
3915
3916void
3917il_set_rxon_ht(struct il_priv *il, struct il_ht_config *ht_conf)
3918{
3919 _il_set_rxon_ht(il, ht_conf);
3920}
3921EXPORT_SYMBOL(il_set_rxon_ht);
3922
3923/* Return valid, unused, channel for a passive scan to reset the RF */
3924u8
3925il_get_single_channel_number(struct il_priv *il, enum ieee80211_band band)
3926{
3927 const struct il_channel_info *ch_info;
3928 int i;
3929 u8 channel = 0;
3930 u8 min, max;
3931
3932 if (band == IEEE80211_BAND_5GHZ) {
3933 min = 14;
3934 max = il->channel_count;
3935 } else {
3936 min = 0;
3937 max = 14;
3938 }
3939
3940 for (i = min; i < max; i++) {
3941 channel = il->channel_info[i].channel;
3942 if (channel == le16_to_cpu(il->staging.channel))
3943 continue;
3944
3945 ch_info = il_get_channel_info(il, band, channel);
3946 if (il_is_channel_valid(ch_info))
3947 break;
3948 }
3949
3950 return channel;
3951}
3952EXPORT_SYMBOL(il_get_single_channel_number);
3953
3954/**
3955 * il_set_rxon_channel - Set the band and channel values in staging RXON
3956 * @ch: requested channel as a pointer to struct ieee80211_channel
3957
3958 * NOTE: Does not commit to the hardware; it sets appropriate bit fields
3959 * in the staging RXON flag structure based on the ch->band
3960 */
3961int
3962il_set_rxon_channel(struct il_priv *il, struct ieee80211_channel *ch)
3963{
3964 enum ieee80211_band band = ch->band;
3965 u16 channel = ch->hw_value;
3966
3967 if (le16_to_cpu(il->staging.channel) == channel && il->band == band)
3968 return 0;
3969
3970 il->staging.channel = cpu_to_le16(channel);
3971 if (band == IEEE80211_BAND_5GHZ)
3972 il->staging.flags &= ~RXON_FLG_BAND_24G_MSK;
3973 else
3974 il->staging.flags |= RXON_FLG_BAND_24G_MSK;
3975
3976 il->band = band;
3977
3978 D_INFO("Staging channel set to %d [%d]\n", channel, band);
3979
3980 return 0;
3981}
3982EXPORT_SYMBOL(il_set_rxon_channel);
3983
3984void
3985il_set_flags_for_band(struct il_priv *il, enum ieee80211_band band,
3986 struct ieee80211_vif *vif)
3987{
3988 if (band == IEEE80211_BAND_5GHZ) {
3989 il->staging.flags &=
3990 ~(RXON_FLG_BAND_24G_MSK | RXON_FLG_AUTO_DETECT_MSK |
3991 RXON_FLG_CCK_MSK);
3992 il->staging.flags |= RXON_FLG_SHORT_SLOT_MSK;
3993 } else {
3994 /* Copied from il_post_associate() */
3995 if (vif && vif->bss_conf.use_short_slot)
3996 il->staging.flags |= RXON_FLG_SHORT_SLOT_MSK;
3997 else
3998 il->staging.flags &= ~RXON_FLG_SHORT_SLOT_MSK;
3999
4000 il->staging.flags |= RXON_FLG_BAND_24G_MSK;
4001 il->staging.flags |= RXON_FLG_AUTO_DETECT_MSK;
4002 il->staging.flags &= ~RXON_FLG_CCK_MSK;
4003 }
4004}
4005EXPORT_SYMBOL(il_set_flags_for_band);
4006
4007/*
4008 * initialize rxon structure with default values from eeprom
4009 */
4010void
4011il_connection_init_rx_config(struct il_priv *il)
4012{
4013 const struct il_channel_info *ch_info;
4014
4015 memset(&il->staging, 0, sizeof(il->staging));
4016
4017 switch (il->iw_mode) {
4018 case NL80211_IFTYPE_UNSPECIFIED:
4019 il->staging.dev_type = RXON_DEV_TYPE_ESS;
4020 break;
4021 case NL80211_IFTYPE_STATION:
4022 il->staging.dev_type = RXON_DEV_TYPE_ESS;
4023 il->staging.filter_flags = RXON_FILTER_ACCEPT_GRP_MSK;
4024 break;
4025 case NL80211_IFTYPE_ADHOC:
4026 il->staging.dev_type = RXON_DEV_TYPE_IBSS;
4027 il->staging.flags = RXON_FLG_SHORT_PREAMBLE_MSK;
4028 il->staging.filter_flags =
4029 RXON_FILTER_BCON_AWARE_MSK | RXON_FILTER_ACCEPT_GRP_MSK;
4030 break;
4031 default:
4032 IL_ERR("Unsupported interface type %d\n", il->vif->type);
4033 return;
4034 }
4035
4036#if 0
4037 /* TODO: Figure out when short_preamble would be set and cache from
4038 * that */
4039 if (!hw_to_local(il->hw)->short_preamble)
4040 il->staging.flags &= ~RXON_FLG_SHORT_PREAMBLE_MSK;
4041 else
4042 il->staging.flags |= RXON_FLG_SHORT_PREAMBLE_MSK;
4043#endif
4044
4045 ch_info =
4046 il_get_channel_info(il, il->band, le16_to_cpu(il->active.channel));
4047
4048 if (!ch_info)
4049 ch_info = &il->channel_info[0];
4050
4051 il->staging.channel = cpu_to_le16(ch_info->channel);
4052 il->band = ch_info->band;
4053
4054 il_set_flags_for_band(il, il->band, il->vif);
4055
4056 il->staging.ofdm_basic_rates =
4057 (IL_OFDM_RATES_MASK >> IL_FIRST_OFDM_RATE) & 0xFF;
4058 il->staging.cck_basic_rates =
4059 (IL_CCK_RATES_MASK >> IL_FIRST_CCK_RATE) & 0xF;
4060
4061 /* clear both MIX and PURE40 mode flag */
4062 il->staging.flags &=
4063 ~(RXON_FLG_CHANNEL_MODE_MIXED | RXON_FLG_CHANNEL_MODE_PURE_40);
4064 if (il->vif)
4065 memcpy(il->staging.node_addr, il->vif->addr, ETH_ALEN);
4066
4067 il->staging.ofdm_ht_single_stream_basic_rates = 0xff;
4068 il->staging.ofdm_ht_dual_stream_basic_rates = 0xff;
4069}
4070EXPORT_SYMBOL(il_connection_init_rx_config);
4071
4072void
4073il_set_rate(struct il_priv *il)
4074{
4075 const struct ieee80211_supported_band *hw = NULL;
4076 struct ieee80211_rate *rate;
4077 int i;
4078
4079 hw = il_get_hw_mode(il, il->band);
4080 if (!hw) {
4081 IL_ERR("Failed to set rate: unable to get hw mode\n");
4082 return;
4083 }
4084
4085 il->active_rate = 0;
4086
4087 for (i = 0; i < hw->n_bitrates; i++) {
4088 rate = &(hw->bitrates[i]);
4089 if (rate->hw_value < RATE_COUNT_LEGACY)
4090 il->active_rate |= (1 << rate->hw_value);
4091 }
4092
4093 D_RATE("Set active_rate = %0x\n", il->active_rate);
4094
4095 il->staging.cck_basic_rates =
4096 (IL_CCK_BASIC_RATES_MASK >> IL_FIRST_CCK_RATE) & 0xF;
4097
4098 il->staging.ofdm_basic_rates =
4099 (IL_OFDM_BASIC_RATES_MASK >> IL_FIRST_OFDM_RATE) & 0xFF;
4100}
4101EXPORT_SYMBOL(il_set_rate);
4102
4103void
4104il_chswitch_done(struct il_priv *il, bool is_success)
4105{
4106 if (test_bit(S_EXIT_PENDING, &il->status))
4107 return;
4108
4109 if (test_and_clear_bit(S_CHANNEL_SWITCH_PENDING, &il->status))
4110 ieee80211_chswitch_done(il->vif, is_success);
4111}
4112EXPORT_SYMBOL(il_chswitch_done);
4113
4114void
4115il_hdl_csa(struct il_priv *il, struct il_rx_buf *rxb)
4116{
4117 struct il_rx_pkt *pkt = rxb_addr(rxb);
4118 struct il_csa_notification *csa = &(pkt->u.csa_notif);
4119 struct il_rxon_cmd *rxon = (void *)&il->active;
4120
4121 if (!test_bit(S_CHANNEL_SWITCH_PENDING, &il->status))
4122 return;
4123
4124 if (!le32_to_cpu(csa->status) && csa->channel == il->switch_channel) {
4125 rxon->channel = csa->channel;
4126 il->staging.channel = csa->channel;
4127 D_11H("CSA notif: channel %d\n", le16_to_cpu(csa->channel));
4128 il_chswitch_done(il, true);
4129 } else {
4130 IL_ERR("CSA notif (fail) : channel %d\n",
4131 le16_to_cpu(csa->channel));
4132 il_chswitch_done(il, false);
4133 }
4134}
4135EXPORT_SYMBOL(il_hdl_csa);
4136
4137#ifdef CONFIG_IWLEGACY_DEBUG
4138void
4139il_print_rx_config_cmd(struct il_priv *il)
4140{
4141 struct il_rxon_cmd *rxon = &il->staging;
4142
4143 D_RADIO("RX CONFIG:\n");
4144 il_print_hex_dump(il, IL_DL_RADIO, (u8 *) rxon, sizeof(*rxon));
4145 D_RADIO("u16 channel: 0x%x\n", le16_to_cpu(rxon->channel));
4146 D_RADIO("u32 flags: 0x%08X\n", le32_to_cpu(rxon->flags));
4147 D_RADIO("u32 filter_flags: 0x%08x\n", le32_to_cpu(rxon->filter_flags));
4148 D_RADIO("u8 dev_type: 0x%x\n", rxon->dev_type);
4149 D_RADIO("u8 ofdm_basic_rates: 0x%02x\n", rxon->ofdm_basic_rates);
4150 D_RADIO("u8 cck_basic_rates: 0x%02x\n", rxon->cck_basic_rates);
4151 D_RADIO("u8[6] node_addr: %pM\n", rxon->node_addr);
4152 D_RADIO("u8[6] bssid_addr: %pM\n", rxon->bssid_addr);
4153 D_RADIO("u16 assoc_id: 0x%x\n", le16_to_cpu(rxon->assoc_id));
4154}
4155EXPORT_SYMBOL(il_print_rx_config_cmd);
4156#endif
4157/**
4158 * il_irq_handle_error - called for HW or SW error interrupt from card
4159 */
4160void
4161il_irq_handle_error(struct il_priv *il)
4162{
4163 /* Set the FW error flag -- cleared on il_down */
4164 set_bit(S_FW_ERROR, &il->status);
4165
4166 /* Cancel currently queued command. */
4167 clear_bit(S_HCMD_ACTIVE, &il->status);
4168
4169 IL_ERR("Loaded firmware version: %s\n", il->hw->wiphy->fw_version);
4170
4171 il->ops->dump_nic_error_log(il);
4172 if (il->ops->dump_fh)
4173 il->ops->dump_fh(il, NULL, false);
4174#ifdef CONFIG_IWLEGACY_DEBUG
4175 if (il_get_debug_level(il) & IL_DL_FW_ERRORS)
4176 il_print_rx_config_cmd(il);
4177#endif
4178
4179 wake_up(&il->wait_command_queue);
4180
4181 /* Keep the restart process from trying to send host
4182 * commands by clearing the INIT status bit */
4183 clear_bit(S_READY, &il->status);
4184
4185 if (!test_bit(S_EXIT_PENDING, &il->status)) {
4186 IL_DBG(IL_DL_FW_ERRORS,
4187 "Restarting adapter due to uCode error.\n");
4188
4189 if (il->cfg->mod_params->restart_fw)
4190 queue_work(il->workqueue, &il->restart);
4191 }
4192}
4193EXPORT_SYMBOL(il_irq_handle_error);
4194
4195static int
4196_il_apm_stop_master(struct il_priv *il)
4197{
4198 int ret = 0;
4199
4200 /* stop device's busmaster DMA activity */
4201 _il_set_bit(il, CSR_RESET, CSR_RESET_REG_FLAG_STOP_MASTER);
4202
4203 ret =
4204 _il_poll_bit(il, CSR_RESET, CSR_RESET_REG_FLAG_MASTER_DISABLED,
4205 CSR_RESET_REG_FLAG_MASTER_DISABLED, 100);
4206 if (ret < 0)
4207 IL_WARN("Master Disable Timed Out, 100 usec\n");
4208
4209 D_INFO("stop master\n");
4210
4211 return ret;
4212}
4213
4214void
4215_il_apm_stop(struct il_priv *il)
4216{
4217 lockdep_assert_held(&il->reg_lock);
4218
4219 D_INFO("Stop card, put in low power state\n");
4220
4221 /* Stop device's DMA activity */
4222 _il_apm_stop_master(il);
4223
4224 /* Reset the entire device */
4225 _il_set_bit(il, CSR_RESET, CSR_RESET_REG_FLAG_SW_RESET);
4226
4227 udelay(10);
4228
4229 /*
4230 * Clear "initialization complete" bit to move adapter from
4231 * D0A* (powered-up Active) --> D0U* (Uninitialized) state.
4232 */
4233 _il_clear_bit(il, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
4234}
4235EXPORT_SYMBOL(_il_apm_stop);
4236
4237void
4238il_apm_stop(struct il_priv *il)
4239{
4240 unsigned long flags;
4241
4242 spin_lock_irqsave(&il->reg_lock, flags);
4243 _il_apm_stop(il);
4244 spin_unlock_irqrestore(&il->reg_lock, flags);
4245}
4246EXPORT_SYMBOL(il_apm_stop);
4247
4248/*
4249 * Start up NIC's basic functionality after it has been reset
4250 * (e.g. after platform boot, or shutdown via il_apm_stop())
4251 * NOTE: This does not load uCode nor start the embedded processor
4252 */
4253int
4254il_apm_init(struct il_priv *il)
4255{
4256 int ret = 0;
4257 u16 lctl;
4258
4259 D_INFO("Init card's basic functions\n");
4260
4261 /*
4262 * Use "set_bit" below rather than "write", to preserve any hardware
4263 * bits already set by default after reset.
4264 */
4265
4266 /* Disable L0S exit timer (platform NMI Work/Around) */
4267 il_set_bit(il, CSR_GIO_CHICKEN_BITS,
4268 CSR_GIO_CHICKEN_BITS_REG_BIT_DIS_L0S_EXIT_TIMER);
4269
4270 /*
4271 * Disable L0s without affecting L1;
4272 * don't wait for ICH L0s (ICH bug W/A)
4273 */
4274 il_set_bit(il, CSR_GIO_CHICKEN_BITS,
4275 CSR_GIO_CHICKEN_BITS_REG_BIT_L1A_NO_L0S_RX);
4276
4277 /* Set FH wait threshold to maximum (HW error during stress W/A) */
4278 il_set_bit(il, CSR_DBG_HPET_MEM_REG, CSR_DBG_HPET_MEM_REG_VAL);
4279
4280 /*
4281 * Enable HAP INTA (interrupt from management bus) to
4282 * wake device's PCI Express link L1a -> L0s
4283 * NOTE: This is no-op for 3945 (non-existent bit)
4284 */
4285 il_set_bit(il, CSR_HW_IF_CONFIG_REG,
4286 CSR_HW_IF_CONFIG_REG_BIT_HAP_WAKE_L1A);
4287
4288 /*
4289 * HW bug W/A for instability in PCIe bus L0->L0S->L1 transition.
4290 * Check if BIOS (or OS) enabled L1-ASPM on this device.
4291 * If so (likely), disable L0S, so device moves directly L0->L1;
4292 * costs negligible amount of power savings.
4293 * If not (unlikely), enable L0S, so there is at least some
4294 * power savings, even without L1.
4295 */
4296 if (il->cfg->set_l0s) {
4297 pcie_capability_read_word(il->pci_dev, PCI_EXP_LNKCTL, &lctl);
4298 if (lctl & PCI_EXP_LNKCTL_ASPM_L1) {
4299 /* L1-ASPM enabled; disable(!) L0S */
4300 il_set_bit(il, CSR_GIO_REG,
4301 CSR_GIO_REG_VAL_L0S_ENABLED);
4302 D_POWER("L1 Enabled; Disabling L0S\n");
4303 } else {
4304 /* L1-ASPM disabled; enable(!) L0S */
4305 il_clear_bit(il, CSR_GIO_REG,
4306 CSR_GIO_REG_VAL_L0S_ENABLED);
4307 D_POWER("L1 Disabled; Enabling L0S\n");
4308 }
4309 }
4310
4311 /* Configure analog phase-lock-loop before activating to D0A */
4312 if (il->cfg->pll_cfg_val)
4313 il_set_bit(il, CSR_ANA_PLL_CFG,
4314 il->cfg->pll_cfg_val);
4315
4316 /*
4317 * Set "initialization complete" bit to move adapter from
4318 * D0U* --> D0A* (powered-up active) state.
4319 */
4320 il_set_bit(il, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
4321
4322 /*
4323 * Wait for clock stabilization; once stabilized, access to
4324 * device-internal resources is supported, e.g. il_wr_prph()
4325 * and accesses to uCode SRAM.
4326 */
4327 ret =
4328 _il_poll_bit(il, CSR_GP_CNTRL,
4329 CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
4330 CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY, 25000);
4331 if (ret < 0) {
4332 D_INFO("Failed to init the card\n");
4333 goto out;
4334 }
4335
4336 /*
4337 * Enable DMA and BSM (if used) clocks, wait for them to stabilize.
4338 * BSM (Boostrap State Machine) is only in 3945 and 4965.
4339 *
4340 * Write to "CLK_EN_REG"; "1" bits enable clocks, while "0" bits
4341 * do not disable clocks. This preserves any hardware bits already
4342 * set by default in "CLK_CTRL_REG" after reset.
4343 */
4344 if (il->cfg->use_bsm)
4345 il_wr_prph(il, APMG_CLK_EN_REG,
4346 APMG_CLK_VAL_DMA_CLK_RQT | APMG_CLK_VAL_BSM_CLK_RQT);
4347 else
4348 il_wr_prph(il, APMG_CLK_EN_REG, APMG_CLK_VAL_DMA_CLK_RQT);
4349 udelay(20);
4350
4351 /* Disable L1-Active */
4352 il_set_bits_prph(il, APMG_PCIDEV_STT_REG,
4353 APMG_PCIDEV_STT_VAL_L1_ACT_DIS);
4354
4355out:
4356 return ret;
4357}
4358EXPORT_SYMBOL(il_apm_init);
4359
4360int
4361il_set_tx_power(struct il_priv *il, s8 tx_power, bool force)
4362{
4363 int ret;
4364 s8 prev_tx_power;
4365 bool defer;
4366
4367 lockdep_assert_held(&il->mutex);
4368
4369 if (il->tx_power_user_lmt == tx_power && !force)
4370 return 0;
4371
4372 if (!il->ops->send_tx_power)
4373 return -EOPNOTSUPP;
4374
4375 /* 0 dBm mean 1 milliwatt */
4376 if (tx_power < 0) {
4377 IL_WARN("Requested user TXPOWER %d below 1 mW.\n", tx_power);
4378 return -EINVAL;
4379 }
4380
4381 if (tx_power > il->tx_power_device_lmt) {
4382 IL_WARN("Requested user TXPOWER %d above upper limit %d.\n",
4383 tx_power, il->tx_power_device_lmt);
4384 return -EINVAL;
4385 }
4386
4387 if (!il_is_ready_rf(il))
4388 return -EIO;
4389
4390 /* scan complete and commit_rxon use tx_power_next value,
4391 * it always need to be updated for newest request */
4392 il->tx_power_next = tx_power;
4393
4394 /* do not set tx power when scanning or channel changing */
4395 defer = test_bit(S_SCANNING, &il->status) ||
4396 memcmp(&il->active, &il->staging, sizeof(il->staging));
4397 if (defer && !force) {
4398 D_INFO("Deferring tx power set\n");
4399 return 0;
4400 }
4401
4402 prev_tx_power = il->tx_power_user_lmt;
4403 il->tx_power_user_lmt = tx_power;
4404
4405 ret = il->ops->send_tx_power(il);
4406
4407 /* if fail to set tx_power, restore the orig. tx power */
4408 if (ret) {
4409 il->tx_power_user_lmt = prev_tx_power;
4410 il->tx_power_next = prev_tx_power;
4411 }
4412 return ret;
4413}
4414EXPORT_SYMBOL(il_set_tx_power);
4415
4416void
4417il_send_bt_config(struct il_priv *il)
4418{
4419 struct il_bt_cmd bt_cmd = {
4420 .lead_time = BT_LEAD_TIME_DEF,
4421 .max_kill = BT_MAX_KILL_DEF,
4422 .kill_ack_mask = 0,
4423 .kill_cts_mask = 0,
4424 };
4425
4426 if (!bt_coex_active)
4427 bt_cmd.flags = BT_COEX_DISABLE;
4428 else
4429 bt_cmd.flags = BT_COEX_ENABLE;
4430
4431 D_INFO("BT coex %s\n",
4432 (bt_cmd.flags == BT_COEX_DISABLE) ? "disable" : "active");
4433
4434 if (il_send_cmd_pdu(il, C_BT_CONFIG, sizeof(struct il_bt_cmd), &bt_cmd))
4435 IL_ERR("failed to send BT Coex Config\n");
4436}
4437EXPORT_SYMBOL(il_send_bt_config);
4438
4439int
4440il_send_stats_request(struct il_priv *il, u8 flags, bool clear)
4441{
4442 struct il_stats_cmd stats_cmd = {
4443 .configuration_flags = clear ? IL_STATS_CONF_CLEAR_STATS : 0,
4444 };
4445
4446 if (flags & CMD_ASYNC)
4447 return il_send_cmd_pdu_async(il, C_STATS, sizeof(struct il_stats_cmd),
4448 &stats_cmd, NULL);
4449 else
4450 return il_send_cmd_pdu(il, C_STATS, sizeof(struct il_stats_cmd),
4451 &stats_cmd);
4452}
4453EXPORT_SYMBOL(il_send_stats_request);
4454
4455void
4456il_hdl_pm_sleep(struct il_priv *il, struct il_rx_buf *rxb)
4457{
4458#ifdef CONFIG_IWLEGACY_DEBUG
4459 struct il_rx_pkt *pkt = rxb_addr(rxb);
4460 struct il_sleep_notification *sleep = &(pkt->u.sleep_notif);
4461 D_RX("sleep mode: %d, src: %d\n",
4462 sleep->pm_sleep_mode, sleep->pm_wakeup_src);
4463#endif
4464}
4465EXPORT_SYMBOL(il_hdl_pm_sleep);
4466
4467void
4468il_hdl_pm_debug_stats(struct il_priv *il, struct il_rx_buf *rxb)
4469{
4470 struct il_rx_pkt *pkt = rxb_addr(rxb);
4471 u32 len = le32_to_cpu(pkt->len_n_flags) & IL_RX_FRAME_SIZE_MSK;
4472 D_RADIO("Dumping %d bytes of unhandled notification for %s:\n", len,
4473 il_get_cmd_string(pkt->hdr.cmd));
4474 il_print_hex_dump(il, IL_DL_RADIO, pkt->u.raw, len);
4475}
4476EXPORT_SYMBOL(il_hdl_pm_debug_stats);
4477
4478void
4479il_hdl_error(struct il_priv *il, struct il_rx_buf *rxb)
4480{
4481 struct il_rx_pkt *pkt = rxb_addr(rxb);
4482
4483 IL_ERR("Error Reply type 0x%08X cmd %s (0x%02X) "
4484 "seq 0x%04X ser 0x%08X\n",
4485 le32_to_cpu(pkt->u.err_resp.error_type),
4486 il_get_cmd_string(pkt->u.err_resp.cmd_id),
4487 pkt->u.err_resp.cmd_id,
4488 le16_to_cpu(pkt->u.err_resp.bad_cmd_seq_num),
4489 le32_to_cpu(pkt->u.err_resp.error_info));
4490}
4491EXPORT_SYMBOL(il_hdl_error);
4492
4493void
4494il_clear_isr_stats(struct il_priv *il)
4495{
4496 memset(&il->isr_stats, 0, sizeof(il->isr_stats));
4497}
4498
4499int
4500il_mac_conf_tx(struct ieee80211_hw *hw, struct ieee80211_vif *vif, u16 queue,
4501 const struct ieee80211_tx_queue_params *params)
4502{
4503 struct il_priv *il = hw->priv;
4504 unsigned long flags;
4505 int q;
4506
4507 D_MAC80211("enter\n");
4508
4509 if (!il_is_ready_rf(il)) {
4510 D_MAC80211("leave - RF not ready\n");
4511 return -EIO;
4512 }
4513
4514 if (queue >= AC_NUM) {
4515 D_MAC80211("leave - queue >= AC_NUM %d\n", queue);
4516 return 0;
4517 }
4518
4519 q = AC_NUM - 1 - queue;
4520
4521 spin_lock_irqsave(&il->lock, flags);
4522
4523 il->qos_data.def_qos_parm.ac[q].cw_min =
4524 cpu_to_le16(params->cw_min);
4525 il->qos_data.def_qos_parm.ac[q].cw_max =
4526 cpu_to_le16(params->cw_max);
4527 il->qos_data.def_qos_parm.ac[q].aifsn = params->aifs;
4528 il->qos_data.def_qos_parm.ac[q].edca_txop =
4529 cpu_to_le16((params->txop * 32));
4530
4531 il->qos_data.def_qos_parm.ac[q].reserved1 = 0;
4532
4533 spin_unlock_irqrestore(&il->lock, flags);
4534
4535 D_MAC80211("leave\n");
4536 return 0;
4537}
4538EXPORT_SYMBOL(il_mac_conf_tx);
4539
4540int
4541il_mac_tx_last_beacon(struct ieee80211_hw *hw)
4542{
4543 struct il_priv *il = hw->priv;
4544 int ret;
4545
4546 D_MAC80211("enter\n");
4547
4548 ret = (il->ibss_manager == IL_IBSS_MANAGER);
4549
4550 D_MAC80211("leave ret %d\n", ret);
4551 return ret;
4552}
4553EXPORT_SYMBOL_GPL(il_mac_tx_last_beacon);
4554
4555static int
4556il_set_mode(struct il_priv *il)
4557{
4558 il_connection_init_rx_config(il);
4559
4560 if (il->ops->set_rxon_chain)
4561 il->ops->set_rxon_chain(il);
4562
4563 return il_commit_rxon(il);
4564}
4565
4566int
4567il_mac_add_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
4568{
4569 struct il_priv *il = hw->priv;
4570 int err;
4571 bool reset;
4572
4573 mutex_lock(&il->mutex);
4574 D_MAC80211("enter: type %d, addr %pM\n", vif->type, vif->addr);
4575
4576 if (!il_is_ready_rf(il)) {
4577 IL_WARN("Try to add interface when device not ready\n");
4578 err = -EINVAL;
4579 goto out;
4580 }
4581
4582 /*
4583 * We do not support multiple virtual interfaces, but on hardware reset
4584 * we have to add the same interface again.
4585 */
4586 reset = (il->vif == vif);
4587 if (il->vif && !reset) {
4588 err = -EOPNOTSUPP;
4589 goto out;
4590 }
4591
4592 il->vif = vif;
4593 il->iw_mode = vif->type;
4594
4595 err = il_set_mode(il);
4596 if (err) {
4597 IL_WARN("Fail to set mode %d\n", vif->type);
4598 if (!reset) {
4599 il->vif = NULL;
4600 il->iw_mode = NL80211_IFTYPE_STATION;
4601 }
4602 }
4603
4604out:
4605 D_MAC80211("leave err %d\n", err);
4606 mutex_unlock(&il->mutex);
4607
4608 return err;
4609}
4610EXPORT_SYMBOL(il_mac_add_interface);
4611
4612static void
4613il_teardown_interface(struct il_priv *il, struct ieee80211_vif *vif)
4614{
4615 lockdep_assert_held(&il->mutex);
4616
4617 if (il->scan_vif == vif) {
4618 il_scan_cancel_timeout(il, 200);
4619 il_force_scan_end(il);
4620 }
4621
4622 il_set_mode(il);
4623}
4624
4625void
4626il_mac_remove_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
4627{
4628 struct il_priv *il = hw->priv;
4629
4630 mutex_lock(&il->mutex);
4631 D_MAC80211("enter: type %d, addr %pM\n", vif->type, vif->addr);
4632
4633 WARN_ON(il->vif != vif);
4634 il->vif = NULL;
4635 il->iw_mode = NL80211_IFTYPE_UNSPECIFIED;
4636 il_teardown_interface(il, vif);
4637 eth_zero_addr(il->bssid);
4638
4639 D_MAC80211("leave\n");
4640 mutex_unlock(&il->mutex);
4641}
4642EXPORT_SYMBOL(il_mac_remove_interface);
4643
4644int
4645il_alloc_txq_mem(struct il_priv *il)
4646{
4647 if (!il->txq)
4648 il->txq =
4649 kzalloc(sizeof(struct il_tx_queue) *
4650 il->cfg->num_of_queues, GFP_KERNEL);
4651 if (!il->txq) {
4652 IL_ERR("Not enough memory for txq\n");
4653 return -ENOMEM;
4654 }
4655 return 0;
4656}
4657EXPORT_SYMBOL(il_alloc_txq_mem);
4658
4659void
4660il_free_txq_mem(struct il_priv *il)
4661{
4662 kfree(il->txq);
4663 il->txq = NULL;
4664}
4665EXPORT_SYMBOL(il_free_txq_mem);
4666
4667int
4668il_force_reset(struct il_priv *il, bool external)
4669{
4670 struct il_force_reset *force_reset;
4671
4672 if (test_bit(S_EXIT_PENDING, &il->status))
4673 return -EINVAL;
4674
4675 force_reset = &il->force_reset;
4676 force_reset->reset_request_count++;
4677 if (!external) {
4678 if (force_reset->last_force_reset_jiffies &&
4679 time_after(force_reset->last_force_reset_jiffies +
4680 force_reset->reset_duration, jiffies)) {
4681 D_INFO("force reset rejected\n");
4682 force_reset->reset_reject_count++;
4683 return -EAGAIN;
4684 }
4685 }
4686 force_reset->reset_success_count++;
4687 force_reset->last_force_reset_jiffies = jiffies;
4688
4689 /*
4690 * if the request is from external(ex: debugfs),
4691 * then always perform the request in regardless the module
4692 * parameter setting
4693 * if the request is from internal (uCode error or driver
4694 * detect failure), then fw_restart module parameter
4695 * need to be check before performing firmware reload
4696 */
4697
4698 if (!external && !il->cfg->mod_params->restart_fw) {
4699 D_INFO("Cancel firmware reload based on "
4700 "module parameter setting\n");
4701 return 0;
4702 }
4703
4704 IL_ERR("On demand firmware reload\n");
4705
4706 /* Set the FW error flag -- cleared on il_down */
4707 set_bit(S_FW_ERROR, &il->status);
4708 wake_up(&il->wait_command_queue);
4709 /*
4710 * Keep the restart process from trying to send host
4711 * commands by clearing the INIT status bit
4712 */
4713 clear_bit(S_READY, &il->status);
4714 queue_work(il->workqueue, &il->restart);
4715
4716 return 0;
4717}
4718EXPORT_SYMBOL(il_force_reset);
4719
4720int
4721il_mac_change_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
4722 enum nl80211_iftype newtype, bool newp2p)
4723{
4724 struct il_priv *il = hw->priv;
4725 int err;
4726
4727 mutex_lock(&il->mutex);
4728 D_MAC80211("enter: type %d, addr %pM newtype %d newp2p %d\n",
4729 vif->type, vif->addr, newtype, newp2p);
4730
4731 if (newp2p) {
4732 err = -EOPNOTSUPP;
4733 goto out;
4734 }
4735
4736 if (!il->vif || !il_is_ready_rf(il)) {
4737 /*
4738 * Huh? But wait ... this can maybe happen when
4739 * we're in the middle of a firmware restart!
4740 */
4741 err = -EBUSY;
4742 goto out;
4743 }
4744
4745 /* success */
4746 vif->type = newtype;
4747 vif->p2p = false;
4748 il->iw_mode = newtype;
4749 il_teardown_interface(il, vif);
4750 err = 0;
4751
4752out:
4753 D_MAC80211("leave err %d\n", err);
4754 mutex_unlock(&il->mutex);
4755
4756 return err;
4757}
4758EXPORT_SYMBOL(il_mac_change_interface);
4759
4760void il_mac_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
4761 u32 queues, bool drop)
4762{
4763 struct il_priv *il = hw->priv;
4764 unsigned long timeout = jiffies + msecs_to_jiffies(500);
4765 int i;
4766
4767 mutex_lock(&il->mutex);
4768 D_MAC80211("enter\n");
4769
4770 if (il->txq == NULL)
4771 goto out;
4772
4773 for (i = 0; i < il->hw_params.max_txq_num; i++) {
4774 struct il_queue *q;
4775
4776 if (i == il->cmd_queue)
4777 continue;
4778
4779 q = &il->txq[i].q;
4780 if (q->read_ptr == q->write_ptr)
4781 continue;
4782
4783 if (time_after(jiffies, timeout)) {
4784 IL_ERR("Failed to flush queue %d\n", q->id);
4785 break;
4786 }
4787
4788 msleep(20);
4789 }
4790out:
4791 D_MAC80211("leave\n");
4792 mutex_unlock(&il->mutex);
4793}
4794EXPORT_SYMBOL(il_mac_flush);
4795
4796/*
4797 * On every watchdog tick we check (latest) time stamp. If it does not
4798 * change during timeout period and queue is not empty we reset firmware.
4799 */
4800static int
4801il_check_stuck_queue(struct il_priv *il, int cnt)
4802{
4803 struct il_tx_queue *txq = &il->txq[cnt];
4804 struct il_queue *q = &txq->q;
4805 unsigned long timeout;
4806 unsigned long now = jiffies;
4807 int ret;
4808
4809 if (q->read_ptr == q->write_ptr) {
4810 txq->time_stamp = now;
4811 return 0;
4812 }
4813
4814 timeout =
4815 txq->time_stamp +
4816 msecs_to_jiffies(il->cfg->wd_timeout);
4817
4818 if (time_after(now, timeout)) {
4819 IL_ERR("Queue %d stuck for %u ms.\n", q->id,
4820 jiffies_to_msecs(now - txq->time_stamp));
4821 ret = il_force_reset(il, false);
4822 return (ret == -EAGAIN) ? 0 : 1;
4823 }
4824
4825 return 0;
4826}
4827
4828/*
4829 * Making watchdog tick be a quarter of timeout assure we will
4830 * discover the queue hung between timeout and 1.25*timeout
4831 */
4832#define IL_WD_TICK(timeout) ((timeout) / 4)
4833
4834/*
4835 * Watchdog timer callback, we check each tx queue for stuck, if if hung
4836 * we reset the firmware. If everything is fine just rearm the timer.
4837 */
4838void
4839il_bg_watchdog(unsigned long data)
4840{
4841 struct il_priv *il = (struct il_priv *)data;
4842 int cnt;
4843 unsigned long timeout;
4844
4845 if (test_bit(S_EXIT_PENDING, &il->status))
4846 return;
4847
4848 timeout = il->cfg->wd_timeout;
4849 if (timeout == 0)
4850 return;
4851
4852 /* monitor and check for stuck cmd queue */
4853 if (il_check_stuck_queue(il, il->cmd_queue))
4854 return;
4855
4856 /* monitor and check for other stuck queues */
4857 for (cnt = 0; cnt < il->hw_params.max_txq_num; cnt++) {
4858 /* skip as we already checked the command queue */
4859 if (cnt == il->cmd_queue)
4860 continue;
4861 if (il_check_stuck_queue(il, cnt))
4862 return;
4863 }
4864
4865 mod_timer(&il->watchdog,
4866 jiffies + msecs_to_jiffies(IL_WD_TICK(timeout)));
4867}
4868EXPORT_SYMBOL(il_bg_watchdog);
4869
4870void
4871il_setup_watchdog(struct il_priv *il)
4872{
4873 unsigned int timeout = il->cfg->wd_timeout;
4874
4875 if (timeout)
4876 mod_timer(&il->watchdog,
4877 jiffies + msecs_to_jiffies(IL_WD_TICK(timeout)));
4878 else
4879 del_timer(&il->watchdog);
4880}
4881EXPORT_SYMBOL(il_setup_watchdog);
4882
4883/*
4884 * extended beacon time format
4885 * time in usec will be changed into a 32-bit value in extended:internal format
4886 * the extended part is the beacon counts
4887 * the internal part is the time in usec within one beacon interval
4888 */
4889u32
4890il_usecs_to_beacons(struct il_priv *il, u32 usec, u32 beacon_interval)
4891{
4892 u32 quot;
4893 u32 rem;
4894 u32 interval = beacon_interval * TIME_UNIT;
4895
4896 if (!interval || !usec)
4897 return 0;
4898
4899 quot =
4900 (usec /
4901 interval) & (il_beacon_time_mask_high(il,
4902 il->hw_params.
4903 beacon_time_tsf_bits) >> il->
4904 hw_params.beacon_time_tsf_bits);
4905 rem =
4906 (usec % interval) & il_beacon_time_mask_low(il,
4907 il->hw_params.
4908 beacon_time_tsf_bits);
4909
4910 return (quot << il->hw_params.beacon_time_tsf_bits) + rem;
4911}
4912EXPORT_SYMBOL(il_usecs_to_beacons);
4913
4914/* base is usually what we get from ucode with each received frame,
4915 * the same as HW timer counter counting down
4916 */
4917__le32
4918il_add_beacon_time(struct il_priv *il, u32 base, u32 addon,
4919 u32 beacon_interval)
4920{
4921 u32 base_low = base & il_beacon_time_mask_low(il,
4922 il->hw_params.
4923 beacon_time_tsf_bits);
4924 u32 addon_low = addon & il_beacon_time_mask_low(il,
4925 il->hw_params.
4926 beacon_time_tsf_bits);
4927 u32 interval = beacon_interval * TIME_UNIT;
4928 u32 res = (base & il_beacon_time_mask_high(il,
4929 il->hw_params.
4930 beacon_time_tsf_bits)) +
4931 (addon & il_beacon_time_mask_high(il,
4932 il->hw_params.
4933 beacon_time_tsf_bits));
4934
4935 if (base_low > addon_low)
4936 res += base_low - addon_low;
4937 else if (base_low < addon_low) {
4938 res += interval + base_low - addon_low;
4939 res += (1 << il->hw_params.beacon_time_tsf_bits);
4940 } else
4941 res += (1 << il->hw_params.beacon_time_tsf_bits);
4942
4943 return cpu_to_le32(res);
4944}
4945EXPORT_SYMBOL(il_add_beacon_time);
4946
4947#ifdef CONFIG_PM_SLEEP
4948
4949static int
4950il_pci_suspend(struct device *device)
4951{
4952 struct pci_dev *pdev = to_pci_dev(device);
4953 struct il_priv *il = pci_get_drvdata(pdev);
4954
4955 /*
4956 * This function is called when system goes into suspend state
4957 * mac80211 will call il_mac_stop() from the mac80211 suspend function
4958 * first but since il_mac_stop() has no knowledge of who the caller is,
4959 * it will not call apm_ops.stop() to stop the DMA operation.
4960 * Calling apm_ops.stop here to make sure we stop the DMA.
4961 */
4962 il_apm_stop(il);
4963
4964 return 0;
4965}
4966
4967static int
4968il_pci_resume(struct device *device)
4969{
4970 struct pci_dev *pdev = to_pci_dev(device);
4971 struct il_priv *il = pci_get_drvdata(pdev);
4972 bool hw_rfkill = false;
4973
4974 /*
4975 * We disable the RETRY_TIMEOUT register (0x41) to keep
4976 * PCI Tx retries from interfering with C3 CPU state.
4977 */
4978 pci_write_config_byte(pdev, PCI_CFG_RETRY_TIMEOUT, 0x00);
4979
4980 il_enable_interrupts(il);
4981
4982 if (!(_il_rd(il, CSR_GP_CNTRL) & CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW))
4983 hw_rfkill = true;
4984
4985 if (hw_rfkill)
4986 set_bit(S_RFKILL, &il->status);
4987 else
4988 clear_bit(S_RFKILL, &il->status);
4989
4990 wiphy_rfkill_set_hw_state(il->hw->wiphy, hw_rfkill);
4991
4992 return 0;
4993}
4994
4995SIMPLE_DEV_PM_OPS(il_pm_ops, il_pci_suspend, il_pci_resume);
4996EXPORT_SYMBOL(il_pm_ops);
4997
4998#endif /* CONFIG_PM_SLEEP */
4999
5000static void
5001il_update_qos(struct il_priv *il)
5002{
5003 if (test_bit(S_EXIT_PENDING, &il->status))
5004 return;
5005
5006 il->qos_data.def_qos_parm.qos_flags = 0;
5007
5008 if (il->qos_data.qos_active)
5009 il->qos_data.def_qos_parm.qos_flags |=
5010 QOS_PARAM_FLG_UPDATE_EDCA_MSK;
5011
5012 if (il->ht.enabled)
5013 il->qos_data.def_qos_parm.qos_flags |= QOS_PARAM_FLG_TGN_MSK;
5014
5015 D_QOS("send QoS cmd with Qos active=%d FLAGS=0x%X\n",
5016 il->qos_data.qos_active, il->qos_data.def_qos_parm.qos_flags);
5017
5018 il_send_cmd_pdu_async(il, C_QOS_PARAM, sizeof(struct il_qosparam_cmd),
5019 &il->qos_data.def_qos_parm, NULL);
5020}
5021
5022/**
5023 * il_mac_config - mac80211 config callback
5024 */
5025int
5026il_mac_config(struct ieee80211_hw *hw, u32 changed)
5027{
5028 struct il_priv *il = hw->priv;
5029 const struct il_channel_info *ch_info;
5030 struct ieee80211_conf *conf = &hw->conf;
5031 struct ieee80211_channel *channel = conf->chandef.chan;
5032 struct il_ht_config *ht_conf = &il->current_ht_config;
5033 unsigned long flags = 0;
5034 int ret = 0;
5035 u16 ch;
5036 int scan_active = 0;
5037 bool ht_changed = false;
5038
5039 mutex_lock(&il->mutex);
5040 D_MAC80211("enter: channel %d changed 0x%X\n", channel->hw_value,
5041 changed);
5042
5043 if (unlikely(test_bit(S_SCANNING, &il->status))) {
5044 scan_active = 1;
5045 D_MAC80211("scan active\n");
5046 }
5047
5048 if (changed &
5049 (IEEE80211_CONF_CHANGE_SMPS | IEEE80211_CONF_CHANGE_CHANNEL)) {
5050 /* mac80211 uses static for non-HT which is what we want */
5051 il->current_ht_config.smps = conf->smps_mode;
5052
5053 /*
5054 * Recalculate chain counts.
5055 *
5056 * If monitor mode is enabled then mac80211 will
5057 * set up the SM PS mode to OFF if an HT channel is
5058 * configured.
5059 */
5060 if (il->ops->set_rxon_chain)
5061 il->ops->set_rxon_chain(il);
5062 }
5063
5064 /* during scanning mac80211 will delay channel setting until
5065 * scan finish with changed = 0
5066 */
5067 if (!changed || (changed & IEEE80211_CONF_CHANGE_CHANNEL)) {
5068
5069 if (scan_active)
5070 goto set_ch_out;
5071
5072 ch = channel->hw_value;
5073 ch_info = il_get_channel_info(il, channel->band, ch);
5074 if (!il_is_channel_valid(ch_info)) {
5075 D_MAC80211("leave - invalid channel\n");
5076 ret = -EINVAL;
5077 goto set_ch_out;
5078 }
5079
5080 if (il->iw_mode == NL80211_IFTYPE_ADHOC &&
5081 !il_is_channel_ibss(ch_info)) {
5082 D_MAC80211("leave - not IBSS channel\n");
5083 ret = -EINVAL;
5084 goto set_ch_out;
5085 }
5086
5087 spin_lock_irqsave(&il->lock, flags);
5088
5089 /* Configure HT40 channels */
5090 if (il->ht.enabled != conf_is_ht(conf)) {
5091 il->ht.enabled = conf_is_ht(conf);
5092 ht_changed = true;
5093 }
5094 if (il->ht.enabled) {
5095 if (conf_is_ht40_minus(conf)) {
5096 il->ht.extension_chan_offset =
5097 IEEE80211_HT_PARAM_CHA_SEC_BELOW;
5098 il->ht.is_40mhz = true;
5099 } else if (conf_is_ht40_plus(conf)) {
5100 il->ht.extension_chan_offset =
5101 IEEE80211_HT_PARAM_CHA_SEC_ABOVE;
5102 il->ht.is_40mhz = true;
5103 } else {
5104 il->ht.extension_chan_offset =
5105 IEEE80211_HT_PARAM_CHA_SEC_NONE;
5106 il->ht.is_40mhz = false;
5107 }
5108 } else
5109 il->ht.is_40mhz = false;
5110
5111 /*
5112 * Default to no protection. Protection mode will
5113 * later be set from BSS config in il_ht_conf
5114 */
5115 il->ht.protection = IEEE80211_HT_OP_MODE_PROTECTION_NONE;
5116
5117 /* if we are switching from ht to 2.4 clear flags
5118 * from any ht related info since 2.4 does not
5119 * support ht */
5120 if ((le16_to_cpu(il->staging.channel) != ch))
5121 il->staging.flags = 0;
5122
5123 il_set_rxon_channel(il, channel);
5124 il_set_rxon_ht(il, ht_conf);
5125
5126 il_set_flags_for_band(il, channel->band, il->vif);
5127
5128 spin_unlock_irqrestore(&il->lock, flags);
5129
5130 if (il->ops->update_bcast_stations)
5131 ret = il->ops->update_bcast_stations(il);
5132
5133set_ch_out:
5134 /* The list of supported rates and rate mask can be different
5135 * for each band; since the band may have changed, reset
5136 * the rate mask to what mac80211 lists */
5137 il_set_rate(il);
5138 }
5139
5140 if (changed & (IEEE80211_CONF_CHANGE_PS | IEEE80211_CONF_CHANGE_IDLE)) {
5141 il->power_data.ps_disabled = !(conf->flags & IEEE80211_CONF_PS);
5142 ret = il_power_update_mode(il, false);
5143 if (ret)
5144 D_MAC80211("Error setting sleep level\n");
5145 }
5146
5147 if (changed & IEEE80211_CONF_CHANGE_POWER) {
5148 D_MAC80211("TX Power old=%d new=%d\n", il->tx_power_user_lmt,
5149 conf->power_level);
5150
5151 il_set_tx_power(il, conf->power_level, false);
5152 }
5153
5154 if (!il_is_ready(il)) {
5155 D_MAC80211("leave - not ready\n");
5156 goto out;
5157 }
5158
5159 if (scan_active)
5160 goto out;
5161
5162 if (memcmp(&il->active, &il->staging, sizeof(il->staging)))
5163 il_commit_rxon(il);
5164 else
5165 D_INFO("Not re-sending same RXON configuration.\n");
5166 if (ht_changed)
5167 il_update_qos(il);
5168
5169out:
5170 D_MAC80211("leave ret %d\n", ret);
5171 mutex_unlock(&il->mutex);
5172
5173 return ret;
5174}
5175EXPORT_SYMBOL(il_mac_config);
5176
5177void
5178il_mac_reset_tsf(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
5179{
5180 struct il_priv *il = hw->priv;
5181 unsigned long flags;
5182
5183 mutex_lock(&il->mutex);
5184 D_MAC80211("enter: type %d, addr %pM\n", vif->type, vif->addr);
5185
5186 spin_lock_irqsave(&il->lock, flags);
5187
5188 memset(&il->current_ht_config, 0, sizeof(struct il_ht_config));
5189
5190 /* new association get rid of ibss beacon skb */
5191 if (il->beacon_skb)
5192 dev_kfree_skb(il->beacon_skb);
5193 il->beacon_skb = NULL;
5194 il->timestamp = 0;
5195
5196 spin_unlock_irqrestore(&il->lock, flags);
5197
5198 il_scan_cancel_timeout(il, 100);
5199 if (!il_is_ready_rf(il)) {
5200 D_MAC80211("leave - not ready\n");
5201 mutex_unlock(&il->mutex);
5202 return;
5203 }
5204
5205 /* we are restarting association process */
5206 il->staging.filter_flags &= ~RXON_FILTER_ASSOC_MSK;
5207 il_commit_rxon(il);
5208
5209 il_set_rate(il);
5210
5211 D_MAC80211("leave\n");
5212 mutex_unlock(&il->mutex);
5213}
5214EXPORT_SYMBOL(il_mac_reset_tsf);
5215
5216static void
5217il_ht_conf(struct il_priv *il, struct ieee80211_vif *vif)
5218{
5219 struct il_ht_config *ht_conf = &il->current_ht_config;
5220 struct ieee80211_sta *sta;
5221 struct ieee80211_bss_conf *bss_conf = &vif->bss_conf;
5222
5223 D_ASSOC("enter:\n");
5224
5225 if (!il->ht.enabled)
5226 return;
5227
5228 il->ht.protection =
5229 bss_conf->ht_operation_mode & IEEE80211_HT_OP_MODE_PROTECTION;
5230 il->ht.non_gf_sta_present =
5231 !!(bss_conf->
5232 ht_operation_mode & IEEE80211_HT_OP_MODE_NON_GF_STA_PRSNT);
5233
5234 ht_conf->single_chain_sufficient = false;
5235
5236 switch (vif->type) {
5237 case NL80211_IFTYPE_STATION:
5238 rcu_read_lock();
5239 sta = ieee80211_find_sta(vif, bss_conf->bssid);
5240 if (sta) {
5241 struct ieee80211_sta_ht_cap *ht_cap = &sta->ht_cap;
5242 int maxstreams;
5243
5244 maxstreams =
5245 (ht_cap->mcs.
5246 tx_params & IEEE80211_HT_MCS_TX_MAX_STREAMS_MASK)
5247 >> IEEE80211_HT_MCS_TX_MAX_STREAMS_SHIFT;
5248 maxstreams += 1;
5249
5250 if (ht_cap->mcs.rx_mask[1] == 0 &&
5251 ht_cap->mcs.rx_mask[2] == 0)
5252 ht_conf->single_chain_sufficient = true;
5253 if (maxstreams <= 1)
5254 ht_conf->single_chain_sufficient = true;
5255 } else {
5256 /*
5257 * If at all, this can only happen through a race
5258 * when the AP disconnects us while we're still
5259 * setting up the connection, in that case mac80211
5260 * will soon tell us about that.
5261 */
5262 ht_conf->single_chain_sufficient = true;
5263 }
5264 rcu_read_unlock();
5265 break;
5266 case NL80211_IFTYPE_ADHOC:
5267 ht_conf->single_chain_sufficient = true;
5268 break;
5269 default:
5270 break;
5271 }
5272
5273 D_ASSOC("leave\n");
5274}
5275
5276static inline void
5277il_set_no_assoc(struct il_priv *il, struct ieee80211_vif *vif)
5278{
5279 /*
5280 * inform the ucode that there is no longer an
5281 * association and that no more packets should be
5282 * sent
5283 */
5284 il->staging.filter_flags &= ~RXON_FILTER_ASSOC_MSK;
5285 il->staging.assoc_id = 0;
5286 il_commit_rxon(il);
5287}
5288
5289static void
5290il_beacon_update(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
5291{
5292 struct il_priv *il = hw->priv;
5293 unsigned long flags;
5294 __le64 timestamp;
5295 struct sk_buff *skb = ieee80211_beacon_get(hw, vif);
5296
5297 if (!skb)
5298 return;
5299
5300 D_MAC80211("enter\n");
5301
5302 lockdep_assert_held(&il->mutex);
5303
5304 if (!il->beacon_enabled) {
5305 IL_ERR("update beacon with no beaconing enabled\n");
5306 dev_kfree_skb(skb);
5307 return;
5308 }
5309
5310 spin_lock_irqsave(&il->lock, flags);
5311
5312 if (il->beacon_skb)
5313 dev_kfree_skb(il->beacon_skb);
5314
5315 il->beacon_skb = skb;
5316
5317 timestamp = ((struct ieee80211_mgmt *)skb->data)->u.beacon.timestamp;
5318 il->timestamp = le64_to_cpu(timestamp);
5319
5320 D_MAC80211("leave\n");
5321 spin_unlock_irqrestore(&il->lock, flags);
5322
5323 if (!il_is_ready_rf(il)) {
5324 D_MAC80211("leave - RF not ready\n");
5325 return;
5326 }
5327
5328 il->ops->post_associate(il);
5329}
5330
5331void
5332il_mac_bss_info_changed(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
5333 struct ieee80211_bss_conf *bss_conf, u32 changes)
5334{
5335 struct il_priv *il = hw->priv;
5336 int ret;
5337
5338 mutex_lock(&il->mutex);
5339 D_MAC80211("enter: changes 0x%x\n", changes);
5340
5341 if (!il_is_alive(il)) {
5342 D_MAC80211("leave - not alive\n");
5343 mutex_unlock(&il->mutex);
5344 return;
5345 }
5346
5347 if (changes & BSS_CHANGED_QOS) {
5348 unsigned long flags;
5349
5350 spin_lock_irqsave(&il->lock, flags);
5351 il->qos_data.qos_active = bss_conf->qos;
5352 il_update_qos(il);
5353 spin_unlock_irqrestore(&il->lock, flags);
5354 }
5355
5356 if (changes & BSS_CHANGED_BEACON_ENABLED) {
5357 /* FIXME: can we remove beacon_enabled ? */
5358 if (vif->bss_conf.enable_beacon)
5359 il->beacon_enabled = true;
5360 else
5361 il->beacon_enabled = false;
5362 }
5363
5364 if (changes & BSS_CHANGED_BSSID) {
5365 D_MAC80211("BSSID %pM\n", bss_conf->bssid);
5366
5367 /*
5368 * On passive channel we wait with blocked queues to see if
5369 * there is traffic on that channel. If no frame will be
5370 * received (what is very unlikely since scan detects AP on
5371 * that channel, but theoretically possible), mac80211 associate
5372 * procedure will time out and mac80211 will call us with NULL
5373 * bssid. We have to unblock queues on such condition.
5374 */
5375 if (is_zero_ether_addr(bss_conf->bssid))
5376 il_wake_queues_by_reason(il, IL_STOP_REASON_PASSIVE);
5377
5378 /*
5379 * If there is currently a HW scan going on in the background,
5380 * then we need to cancel it, otherwise sometimes we are not
5381 * able to authenticate (FIXME: why ?)
5382 */
5383 if (il_scan_cancel_timeout(il, 100)) {
5384 D_MAC80211("leave - scan abort failed\n");
5385 mutex_unlock(&il->mutex);
5386 return;
5387 }
5388
5389 /* mac80211 only sets assoc when in STATION mode */
5390 memcpy(il->staging.bssid_addr, bss_conf->bssid, ETH_ALEN);
5391
5392 /* FIXME: currently needed in a few places */
5393 memcpy(il->bssid, bss_conf->bssid, ETH_ALEN);
5394 }
5395
5396 /*
5397 * This needs to be after setting the BSSID in case
5398 * mac80211 decides to do both changes at once because
5399 * it will invoke post_associate.
5400 */
5401 if (vif->type == NL80211_IFTYPE_ADHOC && (changes & BSS_CHANGED_BEACON))
5402 il_beacon_update(hw, vif);
5403
5404 if (changes & BSS_CHANGED_ERP_PREAMBLE) {
5405 D_MAC80211("ERP_PREAMBLE %d\n", bss_conf->use_short_preamble);
5406 if (bss_conf->use_short_preamble)
5407 il->staging.flags |= RXON_FLG_SHORT_PREAMBLE_MSK;
5408 else
5409 il->staging.flags &= ~RXON_FLG_SHORT_PREAMBLE_MSK;
5410 }
5411
5412 if (changes & BSS_CHANGED_ERP_CTS_PROT) {
5413 D_MAC80211("ERP_CTS %d\n", bss_conf->use_cts_prot);
5414 if (bss_conf->use_cts_prot && il->band != IEEE80211_BAND_5GHZ)
5415 il->staging.flags |= RXON_FLG_TGG_PROTECT_MSK;
5416 else
5417 il->staging.flags &= ~RXON_FLG_TGG_PROTECT_MSK;
5418 if (bss_conf->use_cts_prot)
5419 il->staging.flags |= RXON_FLG_SELF_CTS_EN;
5420 else
5421 il->staging.flags &= ~RXON_FLG_SELF_CTS_EN;
5422 }
5423
5424 if (changes & BSS_CHANGED_BASIC_RATES) {
5425 /* XXX use this information
5426 *
5427 * To do that, remove code from il_set_rate() and put something
5428 * like this here:
5429 *
5430 if (A-band)
5431 il->staging.ofdm_basic_rates =
5432 bss_conf->basic_rates;
5433 else
5434 il->staging.ofdm_basic_rates =
5435 bss_conf->basic_rates >> 4;
5436 il->staging.cck_basic_rates =
5437 bss_conf->basic_rates & 0xF;
5438 */
5439 }
5440
5441 if (changes & BSS_CHANGED_HT) {
5442 il_ht_conf(il, vif);
5443
5444 if (il->ops->set_rxon_chain)
5445 il->ops->set_rxon_chain(il);
5446 }
5447
5448 if (changes & BSS_CHANGED_ASSOC) {
5449 D_MAC80211("ASSOC %d\n", bss_conf->assoc);
5450 if (bss_conf->assoc) {
5451 il->timestamp = bss_conf->sync_tsf;
5452
5453 if (!il_is_rfkill(il))
5454 il->ops->post_associate(il);
5455 } else
5456 il_set_no_assoc(il, vif);
5457 }
5458
5459 if (changes && il_is_associated(il) && bss_conf->aid) {
5460 D_MAC80211("Changes (%#x) while associated\n", changes);
5461 ret = il_send_rxon_assoc(il);
5462 if (!ret) {
5463 /* Sync active_rxon with latest change. */
5464 memcpy((void *)&il->active, &il->staging,
5465 sizeof(struct il_rxon_cmd));
5466 }
5467 }
5468
5469 if (changes & BSS_CHANGED_BEACON_ENABLED) {
5470 if (vif->bss_conf.enable_beacon) {
5471 memcpy(il->staging.bssid_addr, bss_conf->bssid,
5472 ETH_ALEN);
5473 memcpy(il->bssid, bss_conf->bssid, ETH_ALEN);
5474 il->ops->config_ap(il);
5475 } else
5476 il_set_no_assoc(il, vif);
5477 }
5478
5479 if (changes & BSS_CHANGED_IBSS) {
5480 ret = il->ops->manage_ibss_station(il, vif,
5481 bss_conf->ibss_joined);
5482 if (ret)
5483 IL_ERR("failed to %s IBSS station %pM\n",
5484 bss_conf->ibss_joined ? "add" : "remove",
5485 bss_conf->bssid);
5486 }
5487
5488 D_MAC80211("leave\n");
5489 mutex_unlock(&il->mutex);
5490}
5491EXPORT_SYMBOL(il_mac_bss_info_changed);
5492
5493irqreturn_t
5494il_isr(int irq, void *data)
5495{
5496 struct il_priv *il = data;
5497 u32 inta, inta_mask;
5498 u32 inta_fh;
5499 unsigned long flags;
5500 if (!il)
5501 return IRQ_NONE;
5502
5503 spin_lock_irqsave(&il->lock, flags);
5504
5505 /* Disable (but don't clear!) interrupts here to avoid
5506 * back-to-back ISRs and sporadic interrupts from our NIC.
5507 * If we have something to service, the tasklet will re-enable ints.
5508 * If we *don't* have something, we'll re-enable before leaving here. */
5509 inta_mask = _il_rd(il, CSR_INT_MASK); /* just for debug */
5510 _il_wr(il, CSR_INT_MASK, 0x00000000);
5511
5512 /* Discover which interrupts are active/pending */
5513 inta = _il_rd(il, CSR_INT);
5514 inta_fh = _il_rd(il, CSR_FH_INT_STATUS);
5515
5516 /* Ignore interrupt if there's nothing in NIC to service.
5517 * This may be due to IRQ shared with another device,
5518 * or due to sporadic interrupts thrown from our NIC. */
5519 if (!inta && !inta_fh) {
5520 D_ISR("Ignore interrupt, inta == 0, inta_fh == 0\n");
5521 goto none;
5522 }
5523
5524 if (inta == 0xFFFFFFFF || (inta & 0xFFFFFFF0) == 0xa5a5a5a0) {
5525 /* Hardware disappeared. It might have already raised
5526 * an interrupt */
5527 IL_WARN("HARDWARE GONE?? INTA == 0x%08x\n", inta);
5528 goto unplugged;
5529 }
5530
5531 D_ISR("ISR inta 0x%08x, enabled 0x%08x, fh 0x%08x\n", inta, inta_mask,
5532 inta_fh);
5533
5534 inta &= ~CSR_INT_BIT_SCD;
5535
5536 /* il_irq_tasklet() will service interrupts and re-enable them */
5537 if (likely(inta || inta_fh))
5538 tasklet_schedule(&il->irq_tasklet);
5539
5540unplugged:
5541 spin_unlock_irqrestore(&il->lock, flags);
5542 return IRQ_HANDLED;
5543
5544none:
5545 /* re-enable interrupts here since we don't have anything to service. */
5546 /* only Re-enable if disabled by irq */
5547 if (test_bit(S_INT_ENABLED, &il->status))
5548 il_enable_interrupts(il);
5549 spin_unlock_irqrestore(&il->lock, flags);
5550 return IRQ_NONE;
5551}
5552EXPORT_SYMBOL(il_isr);
5553
5554/*
5555 * il_tx_cmd_protection: Set rts/cts. 3945 and 4965 only share this
5556 * function.
5557 */
5558void
5559il_tx_cmd_protection(struct il_priv *il, struct ieee80211_tx_info *info,
5560 __le16 fc, __le32 *tx_flags)
5561{
5562 if (info->control.rates[0].flags & IEEE80211_TX_RC_USE_RTS_CTS) {
5563 *tx_flags |= TX_CMD_FLG_RTS_MSK;
5564 *tx_flags &= ~TX_CMD_FLG_CTS_MSK;
5565 *tx_flags |= TX_CMD_FLG_FULL_TXOP_PROT_MSK;
5566
5567 if (!ieee80211_is_mgmt(fc))
5568 return;
5569
5570 switch (fc & cpu_to_le16(IEEE80211_FCTL_STYPE)) {
5571 case cpu_to_le16(IEEE80211_STYPE_AUTH):
5572 case cpu_to_le16(IEEE80211_STYPE_DEAUTH):
5573 case cpu_to_le16(IEEE80211_STYPE_ASSOC_REQ):
5574 case cpu_to_le16(IEEE80211_STYPE_REASSOC_REQ):
5575 *tx_flags &= ~TX_CMD_FLG_RTS_MSK;
5576 *tx_flags |= TX_CMD_FLG_CTS_MSK;
5577 break;
5578 }
5579 } else if (info->control.rates[0].
5580 flags & IEEE80211_TX_RC_USE_CTS_PROTECT) {
5581 *tx_flags &= ~TX_CMD_FLG_RTS_MSK;
5582 *tx_flags |= TX_CMD_FLG_CTS_MSK;
5583 *tx_flags |= TX_CMD_FLG_FULL_TXOP_PROT_MSK;
5584 }
5585}
5586EXPORT_SYMBOL(il_tx_cmd_protection);
diff --git a/drivers/net/wireless/intel/iwlegacy/common.h b/drivers/net/wireless/intel/iwlegacy/common.h
new file mode 100644
index 000000000000..ce52cf114fde
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlegacy/common.h
@@ -0,0 +1,3084 @@
1/******************************************************************************
2 *
3 * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of version 2 of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
17 *
18 * The full GNU General Public License is included in this distribution in the
19 * file called LICENSE.
20 *
21 * Contact Information:
22 * Intel Linux Wireless <ilw@linux.intel.com>
23 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
24 *
25 *****************************************************************************/
26#ifndef __il_core_h__
27#define __il_core_h__
28
29#include <linux/interrupt.h>
30#include <linux/pci.h> /* for struct pci_device_id */
31#include <linux/kernel.h>
32#include <linux/leds.h>
33#include <linux/wait.h>
34#include <linux/io.h>
35#include <net/mac80211.h>
36#include <net/ieee80211_radiotap.h>
37
38#include "commands.h"
39#include "csr.h"
40#include "prph.h"
41
42struct il_host_cmd;
43struct il_cmd;
44struct il_tx_queue;
45
46#define IL_ERR(f, a...) dev_err(&il->pci_dev->dev, f, ## a)
47#define IL_WARN(f, a...) dev_warn(&il->pci_dev->dev, f, ## a)
48#define IL_INFO(f, a...) dev_info(&il->pci_dev->dev, f, ## a)
49
50#define RX_QUEUE_SIZE 256
51#define RX_QUEUE_MASK 255
52#define RX_QUEUE_SIZE_LOG 8
53
54/*
55 * RX related structures and functions
56 */
57#define RX_FREE_BUFFERS 64
58#define RX_LOW_WATERMARK 8
59
60#define U32_PAD(n) ((4-(n))&0x3)
61
62/* CT-KILL constants */
63#define CT_KILL_THRESHOLD_LEGACY 110 /* in Celsius */
64
65/* Default noise level to report when noise measurement is not available.
66 * This may be because we're:
67 * 1) Not associated (4965, no beacon stats being sent to driver)
68 * 2) Scanning (noise measurement does not apply to associated channel)
69 * 3) Receiving CCK (3945 delivers noise info only for OFDM frames)
70 * Use default noise value of -127 ... this is below the range of measurable
71 * Rx dBm for either 3945 or 4965, so it can indicate "unmeasurable" to user.
72 * Also, -127 works better than 0 when averaging frames with/without
73 * noise info (e.g. averaging might be done in app); measured dBm values are
74 * always negative ... using a negative value as the default keeps all
75 * averages within an s8's (used in some apps) range of negative values. */
76#define IL_NOISE_MEAS_NOT_AVAILABLE (-127)
77
78/*
79 * RTS threshold here is total size [2347] minus 4 FCS bytes
80 * Per spec:
81 * a value of 0 means RTS on all data/management packets
82 * a value > max MSDU size means no RTS
83 * else RTS for data/management frames where MPDU is larger
84 * than RTS value.
85 */
86#define DEFAULT_RTS_THRESHOLD 2347U
87#define MIN_RTS_THRESHOLD 0U
88#define MAX_RTS_THRESHOLD 2347U
89#define MAX_MSDU_SIZE 2304U
90#define MAX_MPDU_SIZE 2346U
91#define DEFAULT_BEACON_INTERVAL 100U
92#define DEFAULT_SHORT_RETRY_LIMIT 7U
93#define DEFAULT_LONG_RETRY_LIMIT 4U
94
95struct il_rx_buf {
96 dma_addr_t page_dma;
97 struct page *page;
98 struct list_head list;
99};
100
101#define rxb_addr(r) page_address(r->page)
102
103/* defined below */
104struct il_device_cmd;
105
106struct il_cmd_meta {
107 /* only for SYNC commands, iff the reply skb is wanted */
108 struct il_host_cmd *source;
109 /*
110 * only for ASYNC commands
111 * (which is somewhat stupid -- look at common.c for instance
112 * which duplicates a bunch of code because the callback isn't
113 * invoked for SYNC commands, if it were and its result passed
114 * through it would be simpler...)
115 */
116 void (*callback) (struct il_priv *il, struct il_device_cmd *cmd,
117 struct il_rx_pkt *pkt);
118
119 /* The CMD_SIZE_HUGE flag bit indicates that the command
120 * structure is stored at the end of the shared queue memory. */
121 u32 flags;
122
123 DEFINE_DMA_UNMAP_ADDR(mapping);
124 DEFINE_DMA_UNMAP_LEN(len);
125};
126
127/*
128 * Generic queue structure
129 *
130 * Contains common data for Rx and Tx queues
131 */
132struct il_queue {
133 int n_bd; /* number of BDs in this queue */
134 int write_ptr; /* 1-st empty entry (idx) host_w */
135 int read_ptr; /* last used entry (idx) host_r */
136 /* use for monitoring and recovering the stuck queue */
137 dma_addr_t dma_addr; /* physical addr for BD's */
138 int n_win; /* safe queue win */
139 u32 id;
140 int low_mark; /* low watermark, resume queue if free
141 * space more than this */
142 int high_mark; /* high watermark, stop queue if free
143 * space less than this */
144};
145
146/**
147 * struct il_tx_queue - Tx Queue for DMA
148 * @q: generic Rx/Tx queue descriptor
149 * @bd: base of circular buffer of TFDs
150 * @cmd: array of command/TX buffer pointers
151 * @meta: array of meta data for each command/tx buffer
152 * @dma_addr_cmd: physical address of cmd/tx buffer array
153 * @skbs: array of per-TFD socket buffer pointers
154 * @time_stamp: time (in jiffies) of last read_ptr change
155 * @need_update: indicates need to update read/write idx
156 * @sched_retry: indicates queue is high-throughput aggregation (HT AGG) enabled
157 *
158 * A Tx queue consists of circular buffer of BDs (a.k.a. TFDs, transmit frame
159 * descriptors) and required locking structures.
160 */
161#define TFD_TX_CMD_SLOTS 256
162#define TFD_CMD_SLOTS 32
163
164struct il_tx_queue {
165 struct il_queue q;
166 void *tfds;
167 struct il_device_cmd **cmd;
168 struct il_cmd_meta *meta;
169 struct sk_buff **skbs;
170 unsigned long time_stamp;
171 u8 need_update;
172 u8 sched_retry;
173 u8 active;
174 u8 swq_id;
175};
176
177/*
178 * EEPROM access time values:
179 *
180 * Driver initiates EEPROM read by writing byte address << 1 to CSR_EEPROM_REG.
181 * Driver then polls CSR_EEPROM_REG for CSR_EEPROM_REG_READ_VALID_MSK (0x1).
182 * When polling, wait 10 uSec between polling loops, up to a maximum 5000 uSec.
183 * Driver reads 16-bit value from bits 31-16 of CSR_EEPROM_REG.
184 */
185#define IL_EEPROM_ACCESS_TIMEOUT 5000 /* uSec */
186
187#define IL_EEPROM_SEM_TIMEOUT 10 /* microseconds */
188#define IL_EEPROM_SEM_RETRY_LIMIT 1000 /* number of attempts (not time) */
189
190/*
191 * Regulatory channel usage flags in EEPROM struct il4965_eeprom_channel.flags.
192 *
193 * IBSS and/or AP operation is allowed *only* on those channels with
194 * (VALID && IBSS && ACTIVE && !RADAR). This restriction is in place because
195 * RADAR detection is not supported by the 4965 driver, but is a
196 * requirement for establishing a new network for legal operation on channels
197 * requiring RADAR detection or restricting ACTIVE scanning.
198 *
199 * NOTE: "WIDE" flag does not indicate anything about "HT40" 40 MHz channels.
200 * It only indicates that 20 MHz channel use is supported; HT40 channel
201 * usage is indicated by a separate set of regulatory flags for each
202 * HT40 channel pair.
203 *
204 * NOTE: Using a channel inappropriately will result in a uCode error!
205 */
206#define IL_NUM_TX_CALIB_GROUPS 5
207enum {
208 EEPROM_CHANNEL_VALID = (1 << 0), /* usable for this SKU/geo */
209 EEPROM_CHANNEL_IBSS = (1 << 1), /* usable as an IBSS channel */
210 /* Bit 2 Reserved */
211 EEPROM_CHANNEL_ACTIVE = (1 << 3), /* active scanning allowed */
212 EEPROM_CHANNEL_RADAR = (1 << 4), /* radar detection required */
213 EEPROM_CHANNEL_WIDE = (1 << 5), /* 20 MHz channel okay */
214 /* Bit 6 Reserved (was Narrow Channel) */
215 EEPROM_CHANNEL_DFS = (1 << 7), /* dynamic freq selection candidate */
216};
217
218/* SKU Capabilities */
219/* 3945 only */
220#define EEPROM_SKU_CAP_SW_RF_KILL_ENABLE (1 << 0)
221#define EEPROM_SKU_CAP_HW_RF_KILL_ENABLE (1 << 1)
222
223/* *regulatory* channel data format in eeprom, one for each channel.
224 * There are separate entries for HT40 (40 MHz) vs. normal (20 MHz) channels. */
225struct il_eeprom_channel {
226 u8 flags; /* EEPROM_CHANNEL_* flags copied from EEPROM */
227 s8 max_power_avg; /* max power (dBm) on this chnl, limit 31 */
228} __packed;
229
230/* 3945 Specific */
231#define EEPROM_3945_EEPROM_VERSION (0x2f)
232
233/* 4965 has two radio transmitters (and 3 radio receivers) */
234#define EEPROM_TX_POWER_TX_CHAINS (2)
235
236/* 4965 has room for up to 8 sets of txpower calibration data */
237#define EEPROM_TX_POWER_BANDS (8)
238
239/* 4965 factory calibration measures txpower gain settings for
240 * each of 3 target output levels */
241#define EEPROM_TX_POWER_MEASUREMENTS (3)
242
243/* 4965 Specific */
244/* 4965 driver does not work with txpower calibration version < 5 */
245#define EEPROM_4965_TX_POWER_VERSION (5)
246#define EEPROM_4965_EEPROM_VERSION (0x2f)
247#define EEPROM_4965_CALIB_VERSION_OFFSET (2*0xB6) /* 2 bytes */
248#define EEPROM_4965_CALIB_TXPOWER_OFFSET (2*0xE8) /* 48 bytes */
249#define EEPROM_4965_BOARD_REVISION (2*0x4F) /* 2 bytes */
250#define EEPROM_4965_BOARD_PBA (2*0x56+1) /* 9 bytes */
251
252/* 2.4 GHz */
253extern const u8 il_eeprom_band_1[14];
254
255/*
256 * factory calibration data for one txpower level, on one channel,
257 * measured on one of the 2 tx chains (radio transmitter and associated
258 * antenna). EEPROM contains:
259 *
260 * 1) Temperature (degrees Celsius) of device when measurement was made.
261 *
262 * 2) Gain table idx used to achieve the target measurement power.
263 * This refers to the "well-known" gain tables (see 4965.h).
264 *
265 * 3) Actual measured output power, in half-dBm ("34" = 17 dBm).
266 *
267 * 4) RF power amplifier detector level measurement (not used).
268 */
269struct il_eeprom_calib_measure {
270 u8 temperature; /* Device temperature (Celsius) */
271 u8 gain_idx; /* Index into gain table */
272 u8 actual_pow; /* Measured RF output power, half-dBm */
273 s8 pa_det; /* Power amp detector level (not used) */
274} __packed;
275
276/*
277 * measurement set for one channel. EEPROM contains:
278 *
279 * 1) Channel number measured
280 *
281 * 2) Measurements for each of 3 power levels for each of 2 radio transmitters
282 * (a.k.a. "tx chains") (6 measurements altogether)
283 */
284struct il_eeprom_calib_ch_info {
285 u8 ch_num;
286 struct il_eeprom_calib_measure
287 measurements[EEPROM_TX_POWER_TX_CHAINS]
288 [EEPROM_TX_POWER_MEASUREMENTS];
289} __packed;
290
291/*
292 * txpower subband info.
293 *
294 * For each frequency subband, EEPROM contains the following:
295 *
296 * 1) First and last channels within range of the subband. "0" values
297 * indicate that this sample set is not being used.
298 *
299 * 2) Sample measurement sets for 2 channels close to the range endpoints.
300 */
301struct il_eeprom_calib_subband_info {
302 u8 ch_from; /* channel number of lowest channel in subband */
303 u8 ch_to; /* channel number of highest channel in subband */
304 struct il_eeprom_calib_ch_info ch1;
305 struct il_eeprom_calib_ch_info ch2;
306} __packed;
307
308/*
309 * txpower calibration info. EEPROM contains:
310 *
311 * 1) Factory-measured saturation power levels (maximum levels at which
312 * tx power amplifier can output a signal without too much distortion).
313 * There is one level for 2.4 GHz band and one for 5 GHz band. These
314 * values apply to all channels within each of the bands.
315 *
316 * 2) Factory-measured power supply voltage level. This is assumed to be
317 * constant (i.e. same value applies to all channels/bands) while the
318 * factory measurements are being made.
319 *
320 * 3) Up to 8 sets of factory-measured txpower calibration values.
321 * These are for different frequency ranges, since txpower gain
322 * characteristics of the analog radio circuitry vary with frequency.
323 *
324 * Not all sets need to be filled with data;
325 * struct il_eeprom_calib_subband_info contains range of channels
326 * (0 if unused) for each set of data.
327 */
328struct il_eeprom_calib_info {
329 u8 saturation_power24; /* half-dBm (e.g. "34" = 17 dBm) */
330 u8 saturation_power52; /* half-dBm */
331 __le16 voltage; /* signed */
332 struct il_eeprom_calib_subband_info band_info[EEPROM_TX_POWER_BANDS];
333} __packed;
334
335/* General */
336#define EEPROM_DEVICE_ID (2*0x08) /* 2 bytes */
337#define EEPROM_MAC_ADDRESS (2*0x15) /* 6 bytes */
338#define EEPROM_BOARD_REVISION (2*0x35) /* 2 bytes */
339#define EEPROM_BOARD_PBA_NUMBER (2*0x3B+1) /* 9 bytes */
340#define EEPROM_VERSION (2*0x44) /* 2 bytes */
341#define EEPROM_SKU_CAP (2*0x45) /* 2 bytes */
342#define EEPROM_OEM_MODE (2*0x46) /* 2 bytes */
343#define EEPROM_WOWLAN_MODE (2*0x47) /* 2 bytes */
344#define EEPROM_RADIO_CONFIG (2*0x48) /* 2 bytes */
345#define EEPROM_NUM_MAC_ADDRESS (2*0x4C) /* 2 bytes */
346
347/* The following masks are to be applied on EEPROM_RADIO_CONFIG */
348#define EEPROM_RF_CFG_TYPE_MSK(x) (x & 0x3) /* bits 0-1 */
349#define EEPROM_RF_CFG_STEP_MSK(x) ((x >> 2) & 0x3) /* bits 2-3 */
350#define EEPROM_RF_CFG_DASH_MSK(x) ((x >> 4) & 0x3) /* bits 4-5 */
351#define EEPROM_RF_CFG_PNUM_MSK(x) ((x >> 6) & 0x3) /* bits 6-7 */
352#define EEPROM_RF_CFG_TX_ANT_MSK(x) ((x >> 8) & 0xF) /* bits 8-11 */
353#define EEPROM_RF_CFG_RX_ANT_MSK(x) ((x >> 12) & 0xF) /* bits 12-15 */
354
355#define EEPROM_3945_RF_CFG_TYPE_MAX 0x0
356#define EEPROM_4965_RF_CFG_TYPE_MAX 0x1
357
358/*
359 * Per-channel regulatory data.
360 *
361 * Each channel that *might* be supported by iwl has a fixed location
362 * in EEPROM containing EEPROM_CHANNEL_* usage flags (LSB) and max regulatory
363 * txpower (MSB).
364 *
365 * Entries immediately below are for 20 MHz channel width. HT40 (40 MHz)
366 * channels (only for 4965, not supported by 3945) appear later in the EEPROM.
367 *
368 * 2.4 GHz channels 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14
369 */
370#define EEPROM_REGULATORY_SKU_ID (2*0x60) /* 4 bytes */
371#define EEPROM_REGULATORY_BAND_1 (2*0x62) /* 2 bytes */
372#define EEPROM_REGULATORY_BAND_1_CHANNELS (2*0x63) /* 28 bytes */
373
374/*
375 * 4.9 GHz channels 183, 184, 185, 187, 188, 189, 192, 196,
376 * 5.0 GHz channels 7, 8, 11, 12, 16
377 * (4915-5080MHz) (none of these is ever supported)
378 */
379#define EEPROM_REGULATORY_BAND_2 (2*0x71) /* 2 bytes */
380#define EEPROM_REGULATORY_BAND_2_CHANNELS (2*0x72) /* 26 bytes */
381
382/*
383 * 5.2 GHz channels 34, 36, 38, 40, 42, 44, 46, 48, 52, 56, 60, 64
384 * (5170-5320MHz)
385 */
386#define EEPROM_REGULATORY_BAND_3 (2*0x7F) /* 2 bytes */
387#define EEPROM_REGULATORY_BAND_3_CHANNELS (2*0x80) /* 24 bytes */
388
389/*
390 * 5.5 GHz channels 100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140
391 * (5500-5700MHz)
392 */
393#define EEPROM_REGULATORY_BAND_4 (2*0x8C) /* 2 bytes */
394#define EEPROM_REGULATORY_BAND_4_CHANNELS (2*0x8D) /* 22 bytes */
395
396/*
397 * 5.7 GHz channels 145, 149, 153, 157, 161, 165
398 * (5725-5825MHz)
399 */
400#define EEPROM_REGULATORY_BAND_5 (2*0x98) /* 2 bytes */
401#define EEPROM_REGULATORY_BAND_5_CHANNELS (2*0x99) /* 12 bytes */
402
403/*
404 * 2.4 GHz HT40 channels 1 (5), 2 (6), 3 (7), 4 (8), 5 (9), 6 (10), 7 (11)
405 *
406 * The channel listed is the center of the lower 20 MHz half of the channel.
407 * The overall center frequency is actually 2 channels (10 MHz) above that,
408 * and the upper half of each HT40 channel is centered 4 channels (20 MHz) away
409 * from the lower half; e.g. the upper half of HT40 channel 1 is channel 5,
410 * and the overall HT40 channel width centers on channel 3.
411 *
412 * NOTE: The RXON command uses 20 MHz channel numbers to specify the
413 * control channel to which to tune. RXON also specifies whether the
414 * control channel is the upper or lower half of a HT40 channel.
415 *
416 * NOTE: 4965 does not support HT40 channels on 2.4 GHz.
417 */
418#define EEPROM_4965_REGULATORY_BAND_24_HT40_CHANNELS (2*0xA0) /* 14 bytes */
419
420/*
421 * 5.2 GHz HT40 channels 36 (40), 44 (48), 52 (56), 60 (64),
422 * 100 (104), 108 (112), 116 (120), 124 (128), 132 (136), 149 (153), 157 (161)
423 */
424#define EEPROM_4965_REGULATORY_BAND_52_HT40_CHANNELS (2*0xA8) /* 22 bytes */
425
426#define EEPROM_REGULATORY_BAND_NO_HT40 (0)
427
428int il_eeprom_init(struct il_priv *il);
429void il_eeprom_free(struct il_priv *il);
430const u8 *il_eeprom_query_addr(const struct il_priv *il, size_t offset);
431u16 il_eeprom_query16(const struct il_priv *il, size_t offset);
432int il_init_channel_map(struct il_priv *il);
433void il_free_channel_map(struct il_priv *il);
434const struct il_channel_info *il_get_channel_info(const struct il_priv *il,
435 enum ieee80211_band band,
436 u16 channel);
437
438#define IL_NUM_SCAN_RATES (2)
439
440struct il4965_channel_tgd_info {
441 u8 type;
442 s8 max_power;
443};
444
445struct il4965_channel_tgh_info {
446 s64 last_radar_time;
447};
448
449#define IL4965_MAX_RATE (33)
450
451struct il3945_clip_group {
452 /* maximum power level to prevent clipping for each rate, derived by
453 * us from this band's saturation power in EEPROM */
454 const s8 clip_powers[IL_MAX_RATES];
455};
456
457/* current Tx power values to use, one for each rate for each channel.
458 * requested power is limited by:
459 * -- regulatory EEPROM limits for this channel
460 * -- hardware capabilities (clip-powers)
461 * -- spectrum management
462 * -- user preference (e.g. iwconfig)
463 * when requested power is set, base power idx must also be set. */
464struct il3945_channel_power_info {
465 struct il3945_tx_power tpc; /* actual radio and DSP gain settings */
466 s8 power_table_idx; /* actual (compenst'd) idx into gain table */
467 s8 base_power_idx; /* gain idx for power at factory temp. */
468 s8 requested_power; /* power (dBm) requested for this chnl/rate */
469};
470
471/* current scan Tx power values to use, one for each scan rate for each
472 * channel. */
473struct il3945_scan_power_info {
474 struct il3945_tx_power tpc; /* actual radio and DSP gain settings */
475 s8 power_table_idx; /* actual (compenst'd) idx into gain table */
476 s8 requested_power; /* scan pwr (dBm) requested for chnl/rate */
477};
478
479/*
480 * One for each channel, holds all channel setup data
481 * Some of the fields (e.g. eeprom and flags/max_power_avg) are redundant
482 * with one another!
483 */
484struct il_channel_info {
485 struct il4965_channel_tgd_info tgd;
486 struct il4965_channel_tgh_info tgh;
487 struct il_eeprom_channel eeprom; /* EEPROM regulatory limit */
488 struct il_eeprom_channel ht40_eeprom; /* EEPROM regulatory limit for
489 * HT40 channel */
490
491 u8 channel; /* channel number */
492 u8 flags; /* flags copied from EEPROM */
493 s8 max_power_avg; /* (dBm) regul. eeprom, normal Tx, any rate */
494 s8 curr_txpow; /* (dBm) regulatory/spectrum/user (not h/w) limit */
495 s8 min_power; /* always 0 */
496 s8 scan_power; /* (dBm) regul. eeprom, direct scans, any rate */
497
498 u8 group_idx; /* 0-4, maps channel to group1/2/3/4/5 */
499 u8 band_idx; /* 0-4, maps channel to band1/2/3/4/5 */
500 enum ieee80211_band band;
501
502 /* HT40 channel info */
503 s8 ht40_max_power_avg; /* (dBm) regul. eeprom, normal Tx, any rate */
504 u8 ht40_flags; /* flags copied from EEPROM */
505 u8 ht40_extension_channel; /* HT_IE_EXT_CHANNEL_* */
506
507 /* Radio/DSP gain settings for each "normal" data Tx rate.
508 * These include, in addition to RF and DSP gain, a few fields for
509 * remembering/modifying gain settings (idxes). */
510 struct il3945_channel_power_info power_info[IL4965_MAX_RATE];
511
512 /* Radio/DSP gain settings for each scan rate, for directed scans. */
513 struct il3945_scan_power_info scan_pwr_info[IL_NUM_SCAN_RATES];
514};
515
516#define IL_TX_FIFO_BK 0 /* shared */
517#define IL_TX_FIFO_BE 1
518#define IL_TX_FIFO_VI 2 /* shared */
519#define IL_TX_FIFO_VO 3
520#define IL_TX_FIFO_UNUSED -1
521
522/* Minimum number of queues. MAX_NUM is defined in hw specific files.
523 * Set the minimum to accommodate the 4 standard TX queues, 1 command
524 * queue, 2 (unused) HCCA queues, and 4 HT queues (one for each AC) */
525#define IL_MIN_NUM_QUEUES 10
526
527#define IL_DEFAULT_CMD_QUEUE_NUM 4
528
529#define IEEE80211_DATA_LEN 2304
530#define IEEE80211_4ADDR_LEN 30
531#define IEEE80211_HLEN (IEEE80211_4ADDR_LEN)
532#define IEEE80211_FRAME_LEN (IEEE80211_DATA_LEN + IEEE80211_HLEN)
533
534struct il_frame {
535 union {
536 struct ieee80211_hdr frame;
537 struct il_tx_beacon_cmd beacon;
538 u8 raw[IEEE80211_FRAME_LEN];
539 u8 cmd[360];
540 } u;
541 struct list_head list;
542};
543
544enum {
545 CMD_SYNC = 0,
546 CMD_SIZE_NORMAL = 0,
547 CMD_NO_SKB = 0,
548 CMD_SIZE_HUGE = (1 << 0),
549 CMD_ASYNC = (1 << 1),
550 CMD_WANT_SKB = (1 << 2),
551 CMD_MAPPED = (1 << 3),
552};
553
554#define DEF_CMD_PAYLOAD_SIZE 320
555
556/**
557 * struct il_device_cmd
558 *
559 * For allocation of the command and tx queues, this establishes the overall
560 * size of the largest command we send to uCode, except for a scan command
561 * (which is relatively huge; space is allocated separately).
562 */
563struct il_device_cmd {
564 struct il_cmd_header hdr; /* uCode API */
565 union {
566 u32 flags;
567 u8 val8;
568 u16 val16;
569 u32 val32;
570 struct il_tx_cmd tx;
571 u8 payload[DEF_CMD_PAYLOAD_SIZE];
572 } __packed cmd;
573} __packed;
574
575#define TFD_MAX_PAYLOAD_SIZE (sizeof(struct il_device_cmd))
576
577struct il_host_cmd {
578 const void *data;
579 unsigned long reply_page;
580 void (*callback) (struct il_priv *il, struct il_device_cmd *cmd,
581 struct il_rx_pkt *pkt);
582 u32 flags;
583 u16 len;
584 u8 id;
585};
586
587#define SUP_RATE_11A_MAX_NUM_CHANNELS 8
588#define SUP_RATE_11B_MAX_NUM_CHANNELS 4
589#define SUP_RATE_11G_MAX_NUM_CHANNELS 12
590
591/**
592 * struct il_rx_queue - Rx queue
593 * @bd: driver's pointer to buffer of receive buffer descriptors (rbd)
594 * @bd_dma: bus address of buffer of receive buffer descriptors (rbd)
595 * @read: Shared idx to newest available Rx buffer
596 * @write: Shared idx to oldest written Rx packet
597 * @free_count: Number of pre-allocated buffers in rx_free
598 * @rx_free: list of free SKBs for use
599 * @rx_used: List of Rx buffers with no SKB
600 * @need_update: flag to indicate we need to update read/write idx
601 * @rb_stts: driver's pointer to receive buffer status
602 * @rb_stts_dma: bus address of receive buffer status
603 *
604 * NOTE: rx_free and rx_used are used as a FIFO for il_rx_bufs
605 */
606struct il_rx_queue {
607 __le32 *bd;
608 dma_addr_t bd_dma;
609 struct il_rx_buf pool[RX_QUEUE_SIZE + RX_FREE_BUFFERS];
610 struct il_rx_buf *queue[RX_QUEUE_SIZE];
611 u32 read;
612 u32 write;
613 u32 free_count;
614 u32 write_actual;
615 struct list_head rx_free;
616 struct list_head rx_used;
617 int need_update;
618 struct il_rb_status *rb_stts;
619 dma_addr_t rb_stts_dma;
620 spinlock_t lock;
621};
622
623#define IL_SUPPORTED_RATES_IE_LEN 8
624
625#define MAX_TID_COUNT 9
626
627#define IL_INVALID_RATE 0xFF
628#define IL_INVALID_VALUE -1
629
630/**
631 * struct il_ht_agg -- aggregation status while waiting for block-ack
632 * @txq_id: Tx queue used for Tx attempt
633 * @frame_count: # frames attempted by Tx command
634 * @wait_for_ba: Expect block-ack before next Tx reply
635 * @start_idx: Index of 1st Transmit Frame Descriptor (TFD) in Tx win
636 * @bitmap0: Low order bitmap, one bit for each frame pending ACK in Tx win
637 * @bitmap1: High order, one bit for each frame pending ACK in Tx win
638 * @rate_n_flags: Rate at which Tx was attempted
639 *
640 * If C_TX indicates that aggregation was attempted, driver must wait
641 * for block ack (N_COMPRESSED_BA). This struct stores tx reply info
642 * until block ack arrives.
643 */
644struct il_ht_agg {
645 u16 txq_id;
646 u16 frame_count;
647 u16 wait_for_ba;
648 u16 start_idx;
649 u64 bitmap;
650 u32 rate_n_flags;
651#define IL_AGG_OFF 0
652#define IL_AGG_ON 1
653#define IL_EMPTYING_HW_QUEUE_ADDBA 2
654#define IL_EMPTYING_HW_QUEUE_DELBA 3
655 u8 state;
656};
657
658struct il_tid_data {
659 u16 seq_number; /* 4965 only */
660 u16 tfds_in_queue;
661 struct il_ht_agg agg;
662};
663
664struct il_hw_key {
665 u32 cipher;
666 int keylen;
667 u8 keyidx;
668 u8 key[32];
669};
670
671union il_ht_rate_supp {
672 u16 rates;
673 struct {
674 u8 siso_rate;
675 u8 mimo_rate;
676 };
677};
678
679#define CFG_HT_RX_AMPDU_FACTOR_8K (0x0)
680#define CFG_HT_RX_AMPDU_FACTOR_16K (0x1)
681#define CFG_HT_RX_AMPDU_FACTOR_32K (0x2)
682#define CFG_HT_RX_AMPDU_FACTOR_64K (0x3)
683#define CFG_HT_RX_AMPDU_FACTOR_DEF CFG_HT_RX_AMPDU_FACTOR_64K
684#define CFG_HT_RX_AMPDU_FACTOR_MAX CFG_HT_RX_AMPDU_FACTOR_64K
685#define CFG_HT_RX_AMPDU_FACTOR_MIN CFG_HT_RX_AMPDU_FACTOR_8K
686
687/*
688 * Maximal MPDU density for TX aggregation
689 * 4 - 2us density
690 * 5 - 4us density
691 * 6 - 8us density
692 * 7 - 16us density
693 */
694#define CFG_HT_MPDU_DENSITY_2USEC (0x4)
695#define CFG_HT_MPDU_DENSITY_4USEC (0x5)
696#define CFG_HT_MPDU_DENSITY_8USEC (0x6)
697#define CFG_HT_MPDU_DENSITY_16USEC (0x7)
698#define CFG_HT_MPDU_DENSITY_DEF CFG_HT_MPDU_DENSITY_4USEC
699#define CFG_HT_MPDU_DENSITY_MAX CFG_HT_MPDU_DENSITY_16USEC
700#define CFG_HT_MPDU_DENSITY_MIN (0x1)
701
702struct il_ht_config {
703 bool single_chain_sufficient;
704 enum ieee80211_smps_mode smps; /* current smps mode */
705};
706
707/* QoS structures */
708struct il_qos_info {
709 int qos_active;
710 struct il_qosparam_cmd def_qos_parm;
711};
712
713/*
714 * Structure should be accessed with sta_lock held. When station addition
715 * is in progress (IL_STA_UCODE_INPROGRESS) it is possible to access only
716 * the commands (il_addsta_cmd and il_link_quality_cmd) without
717 * sta_lock held.
718 */
719struct il_station_entry {
720 struct il_addsta_cmd sta;
721 struct il_tid_data tid[MAX_TID_COUNT];
722 u8 used;
723 struct il_hw_key keyinfo;
724 struct il_link_quality_cmd *lq;
725};
726
727struct il_station_priv_common {
728 u8 sta_id;
729};
730
731/**
732 * struct il_vif_priv - driver's ilate per-interface information
733 *
734 * When mac80211 allocates a virtual interface, it can allocate
735 * space for us to put data into.
736 */
737struct il_vif_priv {
738 u8 ibss_bssid_sta_id;
739};
740
741/* one for each uCode image (inst/data, boot/init/runtime) */
742struct fw_desc {
743 void *v_addr; /* access by driver */
744 dma_addr_t p_addr; /* access by card's busmaster DMA */
745 u32 len; /* bytes */
746};
747
748/* uCode file layout */
749struct il_ucode_header {
750 __le32 ver; /* major/minor/API/serial */
751 struct {
752 __le32 inst_size; /* bytes of runtime code */
753 __le32 data_size; /* bytes of runtime data */
754 __le32 init_size; /* bytes of init code */
755 __le32 init_data_size; /* bytes of init data */
756 __le32 boot_size; /* bytes of bootstrap code */
757 u8 data[0]; /* in same order as sizes */
758 } v1;
759};
760
761struct il4965_ibss_seq {
762 u8 mac[ETH_ALEN];
763 u16 seq_num;
764 u16 frag_num;
765 unsigned long packet_time;
766 struct list_head list;
767};
768
769struct il_sensitivity_ranges {
770 u16 min_nrg_cck;
771 u16 max_nrg_cck;
772
773 u16 nrg_th_cck;
774 u16 nrg_th_ofdm;
775
776 u16 auto_corr_min_ofdm;
777 u16 auto_corr_min_ofdm_mrc;
778 u16 auto_corr_min_ofdm_x1;
779 u16 auto_corr_min_ofdm_mrc_x1;
780
781 u16 auto_corr_max_ofdm;
782 u16 auto_corr_max_ofdm_mrc;
783 u16 auto_corr_max_ofdm_x1;
784 u16 auto_corr_max_ofdm_mrc_x1;
785
786 u16 auto_corr_max_cck;
787 u16 auto_corr_max_cck_mrc;
788 u16 auto_corr_min_cck;
789 u16 auto_corr_min_cck_mrc;
790
791 u16 barker_corr_th_min;
792 u16 barker_corr_th_min_mrc;
793 u16 nrg_th_cca;
794};
795
796#define KELVIN_TO_CELSIUS(x) ((x)-273)
797#define CELSIUS_TO_KELVIN(x) ((x)+273)
798
799/**
800 * struct il_hw_params
801 * @bcast_id: f/w broadcast station ID
802 * @max_txq_num: Max # Tx queues supported
803 * @dma_chnl_num: Number of Tx DMA/FIFO channels
804 * @scd_bc_tbls_size: size of scheduler byte count tables
805 * @tfd_size: TFD size
806 * @tx/rx_chains_num: Number of TX/RX chains
807 * @valid_tx/rx_ant: usable antennas
808 * @max_rxq_size: Max # Rx frames in Rx queue (must be power-of-2)
809 * @max_rxq_log: Log-base-2 of max_rxq_size
810 * @rx_page_order: Rx buffer page order
811 * @rx_wrt_ptr_reg: FH{39}_RSCSR_CHNL0_WPTR
812 * @max_stations:
813 * @ht40_channel: is 40MHz width possible in band 2.4
814 * BIT(IEEE80211_BAND_5GHZ) BIT(IEEE80211_BAND_5GHZ)
815 * @sw_crypto: 0 for hw, 1 for sw
816 * @max_xxx_size: for ucode uses
817 * @ct_kill_threshold: temperature threshold
818 * @beacon_time_tsf_bits: number of valid tsf bits for beacon time
819 * @struct il_sensitivity_ranges: range of sensitivity values
820 */
821struct il_hw_params {
822 u8 bcast_id;
823 u8 max_txq_num;
824 u8 dma_chnl_num;
825 u16 scd_bc_tbls_size;
826 u32 tfd_size;
827 u8 tx_chains_num;
828 u8 rx_chains_num;
829 u8 valid_tx_ant;
830 u8 valid_rx_ant;
831 u16 max_rxq_size;
832 u16 max_rxq_log;
833 u32 rx_page_order;
834 u32 rx_wrt_ptr_reg;
835 u8 max_stations;
836 u8 ht40_channel;
837 u8 max_beacon_itrvl; /* in 1024 ms */
838 u32 max_inst_size;
839 u32 max_data_size;
840 u32 max_bsm_size;
841 u32 ct_kill_threshold; /* value in hw-dependent units */
842 u16 beacon_time_tsf_bits;
843 const struct il_sensitivity_ranges *sens;
844};
845
846/******************************************************************************
847 *
848 * Functions implemented in core module which are forward declared here
849 * for use by iwl-[4-5].c
850 *
851 * NOTE: The implementation of these functions are not hardware specific
852 * which is why they are in the core module files.
853 *
854 * Naming convention --
855 * il_ <-- Is part of iwlwifi
856 * iwlXXXX_ <-- Hardware specific (implemented in iwl-XXXX.c for XXXX)
857 * il4965_bg_ <-- Called from work queue context
858 * il4965_mac_ <-- mac80211 callback
859 *
860 ****************************************************************************/
861void il4965_update_chain_flags(struct il_priv *il);
862extern const u8 il_bcast_addr[ETH_ALEN];
863int il_queue_space(const struct il_queue *q);
864static inline int
865il_queue_used(const struct il_queue *q, int i)
866{
867 return q->write_ptr >= q->read_ptr ? (i >= q->read_ptr &&
868 i < q->write_ptr) : !(i <
869 q->read_ptr
870 && i >=
871 q->
872 write_ptr);
873}
874
875static inline u8
876il_get_cmd_idx(struct il_queue *q, u32 idx, int is_huge)
877{
878 /*
879 * This is for init calibration result and scan command which
880 * required buffer > TFD_MAX_PAYLOAD_SIZE,
881 * the big buffer at end of command array
882 */
883 if (is_huge)
884 return q->n_win; /* must be power of 2 */
885
886 /* Otherwise, use normal size buffers */
887 return idx & (q->n_win - 1);
888}
889
890struct il_dma_ptr {
891 dma_addr_t dma;
892 void *addr;
893 size_t size;
894};
895
896#define IL_OPERATION_MODE_AUTO 0
897#define IL_OPERATION_MODE_HT_ONLY 1
898#define IL_OPERATION_MODE_MIXED 2
899#define IL_OPERATION_MODE_20MHZ 3
900
901#define IL_TX_CRC_SIZE 4
902#define IL_TX_DELIMITER_SIZE 4
903
904#define TX_POWER_IL_ILLEGAL_VOLTAGE -10000
905
906/* Sensitivity and chain noise calibration */
907#define INITIALIZATION_VALUE 0xFFFF
908#define IL4965_CAL_NUM_BEACONS 20
909#define IL_CAL_NUM_BEACONS 16
910#define MAXIMUM_ALLOWED_PATHLOSS 15
911
912#define CHAIN_NOISE_MAX_DELTA_GAIN_CODE 3
913
914#define MAX_FA_OFDM 50
915#define MIN_FA_OFDM 5
916#define MAX_FA_CCK 50
917#define MIN_FA_CCK 5
918
919#define AUTO_CORR_STEP_OFDM 1
920
921#define AUTO_CORR_STEP_CCK 3
922#define AUTO_CORR_MAX_TH_CCK 160
923
924#define NRG_DIFF 2
925#define NRG_STEP_CCK 2
926#define NRG_MARGIN 8
927#define MAX_NUMBER_CCK_NO_FA 100
928
929#define AUTO_CORR_CCK_MIN_VAL_DEF (125)
930
931#define CHAIN_A 0
932#define CHAIN_B 1
933#define CHAIN_C 2
934#define CHAIN_NOISE_DELTA_GAIN_INIT_VAL 4
935#define ALL_BAND_FILTER 0xFF00
936#define IN_BAND_FILTER 0xFF
937#define MIN_AVERAGE_NOISE_MAX_VALUE 0xFFFFFFFF
938
939#define NRG_NUM_PREV_STAT_L 20
940#define NUM_RX_CHAINS 3
941
942enum il4965_false_alarm_state {
943 IL_FA_TOO_MANY = 0,
944 IL_FA_TOO_FEW = 1,
945 IL_FA_GOOD_RANGE = 2,
946};
947
948enum il4965_chain_noise_state {
949 IL_CHAIN_NOISE_ALIVE = 0, /* must be 0 */
950 IL_CHAIN_NOISE_ACCUMULATE,
951 IL_CHAIN_NOISE_CALIBRATED,
952 IL_CHAIN_NOISE_DONE,
953};
954
955enum ucode_type {
956 UCODE_NONE = 0,
957 UCODE_INIT,
958 UCODE_RT
959};
960
961/* Sensitivity calib data */
962struct il_sensitivity_data {
963 u32 auto_corr_ofdm;
964 u32 auto_corr_ofdm_mrc;
965 u32 auto_corr_ofdm_x1;
966 u32 auto_corr_ofdm_mrc_x1;
967 u32 auto_corr_cck;
968 u32 auto_corr_cck_mrc;
969
970 u32 last_bad_plcp_cnt_ofdm;
971 u32 last_fa_cnt_ofdm;
972 u32 last_bad_plcp_cnt_cck;
973 u32 last_fa_cnt_cck;
974
975 u32 nrg_curr_state;
976 u32 nrg_prev_state;
977 u32 nrg_value[10];
978 u8 nrg_silence_rssi[NRG_NUM_PREV_STAT_L];
979 u32 nrg_silence_ref;
980 u32 nrg_energy_idx;
981 u32 nrg_silence_idx;
982 u32 nrg_th_cck;
983 s32 nrg_auto_corr_silence_diff;
984 u32 num_in_cck_no_fa;
985 u32 nrg_th_ofdm;
986
987 u16 barker_corr_th_min;
988 u16 barker_corr_th_min_mrc;
989 u16 nrg_th_cca;
990};
991
992/* Chain noise (differential Rx gain) calib data */
993struct il_chain_noise_data {
994 u32 active_chains;
995 u32 chain_noise_a;
996 u32 chain_noise_b;
997 u32 chain_noise_c;
998 u32 chain_signal_a;
999 u32 chain_signal_b;
1000 u32 chain_signal_c;
1001 u16 beacon_count;
1002 u8 disconn_array[NUM_RX_CHAINS];
1003 u8 delta_gain_code[NUM_RX_CHAINS];
1004 u8 radio_write;
1005 u8 state;
1006};
1007
1008#define EEPROM_SEM_TIMEOUT 10 /* milliseconds */
1009#define EEPROM_SEM_RETRY_LIMIT 1000 /* number of attempts (not time) */
1010
1011#define IL_TRAFFIC_ENTRIES (256)
1012#define IL_TRAFFIC_ENTRY_SIZE (64)
1013
1014enum {
1015 MEASUREMENT_READY = (1 << 0),
1016 MEASUREMENT_ACTIVE = (1 << 1),
1017};
1018
1019/* interrupt stats */
1020struct isr_stats {
1021 u32 hw;
1022 u32 sw;
1023 u32 err_code;
1024 u32 sch;
1025 u32 alive;
1026 u32 rfkill;
1027 u32 ctkill;
1028 u32 wakeup;
1029 u32 rx;
1030 u32 handlers[IL_CN_MAX];
1031 u32 tx;
1032 u32 unhandled;
1033};
1034
1035/* management stats */
1036enum il_mgmt_stats {
1037 MANAGEMENT_ASSOC_REQ = 0,
1038 MANAGEMENT_ASSOC_RESP,
1039 MANAGEMENT_REASSOC_REQ,
1040 MANAGEMENT_REASSOC_RESP,
1041 MANAGEMENT_PROBE_REQ,
1042 MANAGEMENT_PROBE_RESP,
1043 MANAGEMENT_BEACON,
1044 MANAGEMENT_ATIM,
1045 MANAGEMENT_DISASSOC,
1046 MANAGEMENT_AUTH,
1047 MANAGEMENT_DEAUTH,
1048 MANAGEMENT_ACTION,
1049 MANAGEMENT_MAX,
1050};
1051/* control stats */
1052enum il_ctrl_stats {
1053 CONTROL_BACK_REQ = 0,
1054 CONTROL_BACK,
1055 CONTROL_PSPOLL,
1056 CONTROL_RTS,
1057 CONTROL_CTS,
1058 CONTROL_ACK,
1059 CONTROL_CFEND,
1060 CONTROL_CFENDACK,
1061 CONTROL_MAX,
1062};
1063
1064struct traffic_stats {
1065#ifdef CONFIG_IWLEGACY_DEBUGFS
1066 u32 mgmt[MANAGEMENT_MAX];
1067 u32 ctrl[CONTROL_MAX];
1068 u32 data_cnt;
1069 u64 data_bytes;
1070#endif
1071};
1072
1073/*
1074 * host interrupt timeout value
1075 * used with setting interrupt coalescing timer
1076 * the CSR_INT_COALESCING is an 8 bit register in 32-usec unit
1077 *
1078 * default interrupt coalescing timer is 64 x 32 = 2048 usecs
1079 * default interrupt coalescing calibration timer is 16 x 32 = 512 usecs
1080 */
1081#define IL_HOST_INT_TIMEOUT_MAX (0xFF)
1082#define IL_HOST_INT_TIMEOUT_DEF (0x40)
1083#define IL_HOST_INT_TIMEOUT_MIN (0x0)
1084#define IL_HOST_INT_CALIB_TIMEOUT_MAX (0xFF)
1085#define IL_HOST_INT_CALIB_TIMEOUT_DEF (0x10)
1086#define IL_HOST_INT_CALIB_TIMEOUT_MIN (0x0)
1087
1088#define IL_DELAY_NEXT_FORCE_FW_RELOAD (HZ*5)
1089
1090/* TX queue watchdog timeouts in mSecs */
1091#define IL_DEF_WD_TIMEOUT (2000)
1092#define IL_LONG_WD_TIMEOUT (10000)
1093#define IL_MAX_WD_TIMEOUT (120000)
1094
1095struct il_force_reset {
1096 int reset_request_count;
1097 int reset_success_count;
1098 int reset_reject_count;
1099 unsigned long reset_duration;
1100 unsigned long last_force_reset_jiffies;
1101};
1102
1103/* extend beacon time format bit shifting */
1104/*
1105 * for _3945 devices
1106 * bits 31:24 - extended
1107 * bits 23:0 - interval
1108 */
1109#define IL3945_EXT_BEACON_TIME_POS 24
1110/*
1111 * for _4965 devices
1112 * bits 31:22 - extended
1113 * bits 21:0 - interval
1114 */
1115#define IL4965_EXT_BEACON_TIME_POS 22
1116
1117struct il_rxon_context {
1118 struct ieee80211_vif *vif;
1119};
1120
1121struct il_power_mgr {
1122 struct il_powertable_cmd sleep_cmd;
1123 struct il_powertable_cmd sleep_cmd_next;
1124 int debug_sleep_level_override;
1125 bool pci_pm;
1126 bool ps_disabled;
1127};
1128
1129struct il_priv {
1130 struct ieee80211_hw *hw;
1131 struct ieee80211_channel *ieee_channels;
1132 struct ieee80211_rate *ieee_rates;
1133
1134 struct il_cfg *cfg;
1135 const struct il_ops *ops;
1136#ifdef CONFIG_IWLEGACY_DEBUGFS
1137 const struct il_debugfs_ops *debugfs_ops;
1138#endif
1139
1140 /* temporary frame storage list */
1141 struct list_head free_frames;
1142 int frames_count;
1143
1144 enum ieee80211_band band;
1145 int alloc_rxb_page;
1146
1147 void (*handlers[IL_CN_MAX]) (struct il_priv *il,
1148 struct il_rx_buf *rxb);
1149
1150 struct ieee80211_supported_band bands[IEEE80211_NUM_BANDS];
1151
1152 /* spectrum measurement report caching */
1153 struct il_spectrum_notification measure_report;
1154 u8 measurement_status;
1155
1156 /* ucode beacon time */
1157 u32 ucode_beacon_time;
1158 int missed_beacon_threshold;
1159
1160 /* track IBSS manager (last beacon) status */
1161 u32 ibss_manager;
1162
1163 /* force reset */
1164 struct il_force_reset force_reset;
1165
1166 /* we allocate array of il_channel_info for NIC's valid channels.
1167 * Access via channel # using indirect idx array */
1168 struct il_channel_info *channel_info; /* channel info array */
1169 u8 channel_count; /* # of channels */
1170
1171 /* thermal calibration */
1172 s32 temperature; /* degrees Kelvin */
1173 s32 last_temperature;
1174
1175 /* Scan related variables */
1176 unsigned long scan_start;
1177 unsigned long scan_start_tsf;
1178 void *scan_cmd;
1179 enum ieee80211_band scan_band;
1180 struct cfg80211_scan_request *scan_request;
1181 struct ieee80211_vif *scan_vif;
1182 u8 scan_tx_ant[IEEE80211_NUM_BANDS];
1183 u8 mgmt_tx_ant;
1184
1185 /* spinlock */
1186 spinlock_t lock; /* protect general shared data */
1187 spinlock_t hcmd_lock; /* protect hcmd */
1188 spinlock_t reg_lock; /* protect hw register access */
1189 struct mutex mutex;
1190
1191 /* basic pci-network driver stuff */
1192 struct pci_dev *pci_dev;
1193
1194 /* pci hardware address support */
1195 void __iomem *hw_base;
1196 u32 hw_rev;
1197 u32 hw_wa_rev;
1198 u8 rev_id;
1199
1200 /* command queue number */
1201 u8 cmd_queue;
1202
1203 /* max number of station keys */
1204 u8 sta_key_max_num;
1205
1206 /* EEPROM MAC addresses */
1207 struct mac_address addresses[1];
1208
1209 /* uCode images, save to reload in case of failure */
1210 int fw_idx; /* firmware we're trying to load */
1211 u32 ucode_ver; /* version of ucode, copy of
1212 il_ucode.ver */
1213 struct fw_desc ucode_code; /* runtime inst */
1214 struct fw_desc ucode_data; /* runtime data original */
1215 struct fw_desc ucode_data_backup; /* runtime data save/restore */
1216 struct fw_desc ucode_init; /* initialization inst */
1217 struct fw_desc ucode_init_data; /* initialization data */
1218 struct fw_desc ucode_boot; /* bootstrap inst */
1219 enum ucode_type ucode_type;
1220 u8 ucode_write_complete; /* the image write is complete */
1221 char firmware_name[25];
1222
1223 struct ieee80211_vif *vif;
1224
1225 struct il_qos_info qos_data;
1226
1227 struct {
1228 bool enabled;
1229 bool is_40mhz;
1230 bool non_gf_sta_present;
1231 u8 protection;
1232 u8 extension_chan_offset;
1233 } ht;
1234
1235 /*
1236 * We declare this const so it can only be
1237 * changed via explicit cast within the
1238 * routines that actually update the physical
1239 * hardware.
1240 */
1241 const struct il_rxon_cmd active;
1242 struct il_rxon_cmd staging;
1243
1244 struct il_rxon_time_cmd timing;
1245
1246 __le16 switch_channel;
1247
1248 /* 1st responses from initialize and runtime uCode images.
1249 * _4965's initialize alive response contains some calibration data. */
1250 struct il_init_alive_resp card_alive_init;
1251 struct il_alive_resp card_alive;
1252
1253 u16 active_rate;
1254
1255 u8 start_calib;
1256 struct il_sensitivity_data sensitivity_data;
1257 struct il_chain_noise_data chain_noise_data;
1258 __le16 sensitivity_tbl[HD_TBL_SIZE];
1259
1260 struct il_ht_config current_ht_config;
1261
1262 /* Rate scaling data */
1263 u8 retry_rate;
1264
1265 wait_queue_head_t wait_command_queue;
1266
1267 int activity_timer_active;
1268
1269 /* Rx and Tx DMA processing queues */
1270 struct il_rx_queue rxq;
1271 struct il_tx_queue *txq;
1272 unsigned long txq_ctx_active_msk;
1273 struct il_dma_ptr kw; /* keep warm address */
1274 struct il_dma_ptr scd_bc_tbls;
1275
1276 u32 scd_base_addr; /* scheduler sram base address */
1277
1278 unsigned long status;
1279
1280 /* counts mgmt, ctl, and data packets */
1281 struct traffic_stats tx_stats;
1282 struct traffic_stats rx_stats;
1283
1284 /* counts interrupts */
1285 struct isr_stats isr_stats;
1286
1287 struct il_power_mgr power_data;
1288
1289 /* context information */
1290 u8 bssid[ETH_ALEN]; /* used only on 3945 but filled by core */
1291
1292 /* station table variables */
1293
1294 /* Note: if lock and sta_lock are needed, lock must be acquired first */
1295 spinlock_t sta_lock;
1296 int num_stations;
1297 struct il_station_entry stations[IL_STATION_COUNT];
1298 unsigned long ucode_key_table;
1299
1300 /* queue refcounts */
1301#define IL_MAX_HW_QUEUES 32
1302 unsigned long queue_stopped[BITS_TO_LONGS(IL_MAX_HW_QUEUES)];
1303#define IL_STOP_REASON_PASSIVE 0
1304 unsigned long stop_reason;
1305 /* for each AC */
1306 atomic_t queue_stop_count[4];
1307
1308 /* Indication if ieee80211_ops->open has been called */
1309 u8 is_open;
1310
1311 u8 mac80211_registered;
1312
1313 /* eeprom -- this is in the card's little endian byte order */
1314 u8 *eeprom;
1315 struct il_eeprom_calib_info *calib_info;
1316
1317 enum nl80211_iftype iw_mode;
1318
1319 /* Last Rx'd beacon timestamp */
1320 u64 timestamp;
1321
1322 union {
1323#if defined(CONFIG_IWL3945) || defined(CONFIG_IWL3945_MODULE)
1324 struct {
1325 void *shared_virt;
1326 dma_addr_t shared_phys;
1327
1328 struct delayed_work thermal_periodic;
1329 struct delayed_work rfkill_poll;
1330
1331 struct il3945_notif_stats stats;
1332#ifdef CONFIG_IWLEGACY_DEBUGFS
1333 struct il3945_notif_stats accum_stats;
1334 struct il3945_notif_stats delta_stats;
1335 struct il3945_notif_stats max_delta;
1336#endif
1337
1338 u32 sta_supp_rates;
1339 int last_rx_rssi; /* From Rx packet stats */
1340
1341 /* Rx'd packet timing information */
1342 u32 last_beacon_time;
1343 u64 last_tsf;
1344
1345 /*
1346 * each calibration channel group in the
1347 * EEPROM has a derived clip setting for
1348 * each rate.
1349 */
1350 const struct il3945_clip_group clip_groups[5];
1351
1352 } _3945;
1353#endif
1354#if defined(CONFIG_IWL4965) || defined(CONFIG_IWL4965_MODULE)
1355 struct {
1356 struct il_rx_phy_res last_phy_res;
1357 bool last_phy_res_valid;
1358 u32 ampdu_ref;
1359
1360 struct completion firmware_loading_complete;
1361
1362 /*
1363 * chain noise reset and gain commands are the
1364 * two extra calibration commands follows the standard
1365 * phy calibration commands
1366 */
1367 u8 phy_calib_chain_noise_reset_cmd;
1368 u8 phy_calib_chain_noise_gain_cmd;
1369
1370 u8 key_mapping_keys;
1371 struct il_wep_key wep_keys[WEP_KEYS_MAX];
1372
1373 struct il_notif_stats stats;
1374#ifdef CONFIG_IWLEGACY_DEBUGFS
1375 struct il_notif_stats accum_stats;
1376 struct il_notif_stats delta_stats;
1377 struct il_notif_stats max_delta;
1378#endif
1379
1380 } _4965;
1381#endif
1382 };
1383
1384 struct il_hw_params hw_params;
1385
1386 u32 inta_mask;
1387
1388 struct workqueue_struct *workqueue;
1389
1390 struct work_struct restart;
1391 struct work_struct scan_completed;
1392 struct work_struct rx_replenish;
1393 struct work_struct abort_scan;
1394
1395 bool beacon_enabled;
1396 struct sk_buff *beacon_skb;
1397
1398 struct work_struct tx_flush;
1399
1400 struct tasklet_struct irq_tasklet;
1401
1402 struct delayed_work init_alive_start;
1403 struct delayed_work alive_start;
1404 struct delayed_work scan_check;
1405
1406 /* TX Power */
1407 s8 tx_power_user_lmt;
1408 s8 tx_power_device_lmt;
1409 s8 tx_power_next;
1410
1411#ifdef CONFIG_IWLEGACY_DEBUG
1412 /* debugging info */
1413 u32 debug_level; /* per device debugging will override global
1414 il_debug_level if set */
1415#endif /* CONFIG_IWLEGACY_DEBUG */
1416#ifdef CONFIG_IWLEGACY_DEBUGFS
1417 /* debugfs */
1418 u16 tx_traffic_idx;
1419 u16 rx_traffic_idx;
1420 u8 *tx_traffic;
1421 u8 *rx_traffic;
1422 struct dentry *debugfs_dir;
1423 u32 dbgfs_sram_offset, dbgfs_sram_len;
1424 bool disable_ht40;
1425#endif /* CONFIG_IWLEGACY_DEBUGFS */
1426
1427 struct work_struct txpower_work;
1428 bool disable_sens_cal;
1429 bool disable_chain_noise_cal;
1430 bool disable_tx_power_cal;
1431 struct work_struct run_time_calib_work;
1432 struct timer_list stats_periodic;
1433 struct timer_list watchdog;
1434 bool hw_ready;
1435
1436 struct led_classdev led;
1437 unsigned long blink_on, blink_off;
1438 bool led_registered;
1439}; /*il_priv */
1440
1441static inline void
1442il_txq_ctx_activate(struct il_priv *il, int txq_id)
1443{
1444 set_bit(txq_id, &il->txq_ctx_active_msk);
1445}
1446
1447static inline void
1448il_txq_ctx_deactivate(struct il_priv *il, int txq_id)
1449{
1450 clear_bit(txq_id, &il->txq_ctx_active_msk);
1451}
1452
1453static inline int
1454il_is_associated(struct il_priv *il)
1455{
1456 return (il->active.filter_flags & RXON_FILTER_ASSOC_MSK) ? 1 : 0;
1457}
1458
1459static inline int
1460il_is_any_associated(struct il_priv *il)
1461{
1462 return il_is_associated(il);
1463}
1464
1465static inline int
1466il_is_channel_valid(const struct il_channel_info *ch_info)
1467{
1468 if (ch_info == NULL)
1469 return 0;
1470 return (ch_info->flags & EEPROM_CHANNEL_VALID) ? 1 : 0;
1471}
1472
1473static inline int
1474il_is_channel_radar(const struct il_channel_info *ch_info)
1475{
1476 return (ch_info->flags & EEPROM_CHANNEL_RADAR) ? 1 : 0;
1477}
1478
1479static inline u8
1480il_is_channel_a_band(const struct il_channel_info *ch_info)
1481{
1482 return ch_info->band == IEEE80211_BAND_5GHZ;
1483}
1484
1485static inline int
1486il_is_channel_passive(const struct il_channel_info *ch)
1487{
1488 return (!(ch->flags & EEPROM_CHANNEL_ACTIVE)) ? 1 : 0;
1489}
1490
1491static inline int
1492il_is_channel_ibss(const struct il_channel_info *ch)
1493{
1494 return (ch->flags & EEPROM_CHANNEL_IBSS) ? 1 : 0;
1495}
1496
1497static inline void
1498__il_free_pages(struct il_priv *il, struct page *page)
1499{
1500 __free_pages(page, il->hw_params.rx_page_order);
1501 il->alloc_rxb_page--;
1502}
1503
1504static inline void
1505il_free_pages(struct il_priv *il, unsigned long page)
1506{
1507 free_pages(page, il->hw_params.rx_page_order);
1508 il->alloc_rxb_page--;
1509}
1510
1511#define IWLWIFI_VERSION "in-tree:"
1512#define DRV_COPYRIGHT "Copyright(c) 2003-2011 Intel Corporation"
1513#define DRV_AUTHOR "<ilw@linux.intel.com>"
1514
1515#define IL_PCI_DEVICE(dev, subdev, cfg) \
1516 .vendor = PCI_VENDOR_ID_INTEL, .device = (dev), \
1517 .subvendor = PCI_ANY_ID, .subdevice = (subdev), \
1518 .driver_data = (kernel_ulong_t)&(cfg)
1519
1520#define TIME_UNIT 1024
1521
1522#define IL_SKU_G 0x1
1523#define IL_SKU_A 0x2
1524#define IL_SKU_N 0x8
1525
1526#define IL_CMD(x) case x: return #x
1527
1528/* Size of one Rx buffer in host DRAM */
1529#define IL_RX_BUF_SIZE_3K (3 * 1000) /* 3945 only */
1530#define IL_RX_BUF_SIZE_4K (4 * 1024)
1531#define IL_RX_BUF_SIZE_8K (8 * 1024)
1532
1533#ifdef CONFIG_IWLEGACY_DEBUGFS
1534struct il_debugfs_ops {
1535 ssize_t(*rx_stats_read) (struct file *file, char __user *user_buf,
1536 size_t count, loff_t *ppos);
1537 ssize_t(*tx_stats_read) (struct file *file, char __user *user_buf,
1538 size_t count, loff_t *ppos);
1539 ssize_t(*general_stats_read) (struct file *file,
1540 char __user *user_buf, size_t count,
1541 loff_t *ppos);
1542};
1543#endif
1544
1545struct il_ops {
1546 /* Handling TX */
1547 void (*txq_update_byte_cnt_tbl) (struct il_priv *il,
1548 struct il_tx_queue *txq,
1549 u16 byte_cnt);
1550 int (*txq_attach_buf_to_tfd) (struct il_priv *il,
1551 struct il_tx_queue *txq, dma_addr_t addr,
1552 u16 len, u8 reset, u8 pad);
1553 void (*txq_free_tfd) (struct il_priv *il, struct il_tx_queue *txq);
1554 int (*txq_init) (struct il_priv *il, struct il_tx_queue *txq);
1555 /* alive notification after init uCode load */
1556 void (*init_alive_start) (struct il_priv *il);
1557 /* check validity of rtc data address */
1558 int (*is_valid_rtc_data_addr) (u32 addr);
1559 /* 1st ucode load */
1560 int (*load_ucode) (struct il_priv *il);
1561
1562 void (*dump_nic_error_log) (struct il_priv *il);
1563 int (*dump_fh) (struct il_priv *il, char **buf, bool display);
1564 int (*set_channel_switch) (struct il_priv *il,
1565 struct ieee80211_channel_switch *ch_switch);
1566 /* power management */
1567 int (*apm_init) (struct il_priv *il);
1568
1569 /* tx power */
1570 int (*send_tx_power) (struct il_priv *il);
1571 void (*update_chain_flags) (struct il_priv *il);
1572
1573 /* eeprom operations */
1574 int (*eeprom_acquire_semaphore) (struct il_priv *il);
1575 void (*eeprom_release_semaphore) (struct il_priv *il);
1576
1577 int (*rxon_assoc) (struct il_priv *il);
1578 int (*commit_rxon) (struct il_priv *il);
1579 void (*set_rxon_chain) (struct il_priv *il);
1580
1581 u16(*get_hcmd_size) (u8 cmd_id, u16 len);
1582 u16(*build_addsta_hcmd) (const struct il_addsta_cmd *cmd, u8 *data);
1583
1584 int (*request_scan) (struct il_priv *il, struct ieee80211_vif *vif);
1585 void (*post_scan) (struct il_priv *il);
1586 void (*post_associate) (struct il_priv *il);
1587 void (*config_ap) (struct il_priv *il);
1588 /* station management */
1589 int (*update_bcast_stations) (struct il_priv *il);
1590 int (*manage_ibss_station) (struct il_priv *il,
1591 struct ieee80211_vif *vif, bool add);
1592
1593 int (*send_led_cmd) (struct il_priv *il, struct il_led_cmd *led_cmd);
1594};
1595
1596struct il_mod_params {
1597 int sw_crypto; /* def: 0 = using hardware encryption */
1598 int disable_hw_scan; /* def: 0 = use h/w scan */
1599 int num_of_queues; /* def: HW dependent */
1600 int disable_11n; /* def: 0 = 11n capabilities enabled */
1601 int amsdu_size_8K; /* def: 0 = disable 8K amsdu size */
1602 int antenna; /* def: 0 = both antennas (use diversity) */
1603 int restart_fw; /* def: 1 = restart firmware */
1604};
1605
1606#define IL_LED_SOLID 11
1607#define IL_DEF_LED_INTRVL cpu_to_le32(1000)
1608
1609#define IL_LED_ACTIVITY (0<<1)
1610#define IL_LED_LINK (1<<1)
1611
1612/*
1613 * LED mode
1614 * IL_LED_DEFAULT: use device default
1615 * IL_LED_RF_STATE: turn LED on/off based on RF state
1616 * LED ON = RF ON
1617 * LED OFF = RF OFF
1618 * IL_LED_BLINK: adjust led blink rate based on blink table
1619 */
1620enum il_led_mode {
1621 IL_LED_DEFAULT,
1622 IL_LED_RF_STATE,
1623 IL_LED_BLINK,
1624};
1625
1626void il_leds_init(struct il_priv *il);
1627void il_leds_exit(struct il_priv *il);
1628
1629/**
1630 * struct il_cfg
1631 * @fw_name_pre: Firmware filename prefix. The api version and extension
1632 * (.ucode) will be added to filename before loading from disk. The
1633 * filename is constructed as fw_name_pre<api>.ucode.
1634 * @ucode_api_max: Highest version of uCode API supported by driver.
1635 * @ucode_api_min: Lowest version of uCode API supported by driver.
1636 * @scan_antennas: available antenna for scan operation
1637 * @led_mode: 0=blinking, 1=On(RF On)/Off(RF Off)
1638 *
1639 * We enable the driver to be backward compatible wrt API version. The
1640 * driver specifies which APIs it supports (with @ucode_api_max being the
1641 * highest and @ucode_api_min the lowest). Firmware will only be loaded if
1642 * it has a supported API version. The firmware's API version will be
1643 * stored in @il_priv, enabling the driver to make runtime changes based
1644 * on firmware version used.
1645 *
1646 * For example,
1647 * if (IL_UCODE_API(il->ucode_ver) >= 2) {
1648 * Driver interacts with Firmware API version >= 2.
1649 * } else {
1650 * Driver interacts with Firmware API version 1.
1651 * }
1652 *
1653 * The ideal usage of this infrastructure is to treat a new ucode API
1654 * release as a new hardware revision. That is, through utilizing the
1655 * il_hcmd_utils_ops etc. we accommodate different command structures
1656 * and flows between hardware versions as well as their API
1657 * versions.
1658 *
1659 */
1660struct il_cfg {
1661 /* params specific to an individual device within a device family */
1662 const char *name;
1663 const char *fw_name_pre;
1664 const unsigned int ucode_api_max;
1665 const unsigned int ucode_api_min;
1666 u8 valid_tx_ant;
1667 u8 valid_rx_ant;
1668 unsigned int sku;
1669 u16 eeprom_ver;
1670 u16 eeprom_calib_ver;
1671 /* module based parameters which can be set from modprobe cmd */
1672 const struct il_mod_params *mod_params;
1673 /* params not likely to change within a device family */
1674 struct il_base_params *base_params;
1675 /* params likely to change within a device family */
1676 u8 scan_rx_antennas[IEEE80211_NUM_BANDS];
1677 enum il_led_mode led_mode;
1678
1679 int eeprom_size;
1680 int num_of_queues; /* def: HW dependent */
1681 int num_of_ampdu_queues; /* def: HW dependent */
1682 /* for il_apm_init() */
1683 u32 pll_cfg_val;
1684 bool set_l0s;
1685 bool use_bsm;
1686
1687 u16 led_compensation;
1688 int chain_noise_num_beacons;
1689 unsigned int wd_timeout;
1690 bool temperature_kelvin;
1691 const bool ucode_tracing;
1692 const bool sensitivity_calib_by_driver;
1693 const bool chain_noise_calib_by_driver;
1694
1695 const u32 regulatory_bands[7];
1696};
1697
1698/***************************
1699 * L i b *
1700 ***************************/
1701
1702int il_mac_conf_tx(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
1703 u16 queue, const struct ieee80211_tx_queue_params *params);
1704int il_mac_tx_last_beacon(struct ieee80211_hw *hw);
1705
1706void il_set_rxon_hwcrypto(struct il_priv *il, int hw_decrypt);
1707int il_check_rxon_cmd(struct il_priv *il);
1708int il_full_rxon_required(struct il_priv *il);
1709int il_set_rxon_channel(struct il_priv *il, struct ieee80211_channel *ch);
1710void il_set_flags_for_band(struct il_priv *il, enum ieee80211_band band,
1711 struct ieee80211_vif *vif);
1712u8 il_get_single_channel_number(struct il_priv *il, enum ieee80211_band band);
1713void il_set_rxon_ht(struct il_priv *il, struct il_ht_config *ht_conf);
1714bool il_is_ht40_tx_allowed(struct il_priv *il,
1715 struct ieee80211_sta_ht_cap *ht_cap);
1716void il_connection_init_rx_config(struct il_priv *il);
1717void il_set_rate(struct il_priv *il);
1718int il_set_decrypted_flag(struct il_priv *il, struct ieee80211_hdr *hdr,
1719 u32 decrypt_res, struct ieee80211_rx_status *stats);
1720void il_irq_handle_error(struct il_priv *il);
1721int il_mac_add_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif);
1722void il_mac_remove_interface(struct ieee80211_hw *hw,
1723 struct ieee80211_vif *vif);
1724int il_mac_change_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
1725 enum nl80211_iftype newtype, bool newp2p);
1726void il_mac_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
1727 u32 queues, bool drop);
1728int il_alloc_txq_mem(struct il_priv *il);
1729void il_free_txq_mem(struct il_priv *il);
1730
1731#ifdef CONFIG_IWLEGACY_DEBUGFS
1732void il_update_stats(struct il_priv *il, bool is_tx, __le16 fc, u16 len);
1733#else
1734static inline void
1735il_update_stats(struct il_priv *il, bool is_tx, __le16 fc, u16 len)
1736{
1737}
1738#endif
1739
1740/*****************************************************
1741 * Handlers
1742 ***************************************************/
1743void il_hdl_pm_sleep(struct il_priv *il, struct il_rx_buf *rxb);
1744void il_hdl_pm_debug_stats(struct il_priv *il, struct il_rx_buf *rxb);
1745void il_hdl_error(struct il_priv *il, struct il_rx_buf *rxb);
1746void il_hdl_csa(struct il_priv *il, struct il_rx_buf *rxb);
1747
1748/*****************************************************
1749* RX
1750******************************************************/
1751void il_cmd_queue_unmap(struct il_priv *il);
1752void il_cmd_queue_free(struct il_priv *il);
1753int il_rx_queue_alloc(struct il_priv *il);
1754void il_rx_queue_update_write_ptr(struct il_priv *il, struct il_rx_queue *q);
1755int il_rx_queue_space(const struct il_rx_queue *q);
1756void il_tx_cmd_complete(struct il_priv *il, struct il_rx_buf *rxb);
1757
1758void il_hdl_spectrum_measurement(struct il_priv *il, struct il_rx_buf *rxb);
1759void il_recover_from_stats(struct il_priv *il, struct il_rx_pkt *pkt);
1760void il_chswitch_done(struct il_priv *il, bool is_success);
1761
1762/*****************************************************
1763* TX
1764******************************************************/
1765void il_txq_update_write_ptr(struct il_priv *il, struct il_tx_queue *txq);
1766int il_tx_queue_init(struct il_priv *il, u32 txq_id);
1767void il_tx_queue_reset(struct il_priv *il, u32 txq_id);
1768void il_tx_queue_unmap(struct il_priv *il, int txq_id);
1769void il_tx_queue_free(struct il_priv *il, int txq_id);
1770void il_setup_watchdog(struct il_priv *il);
1771/*****************************************************
1772 * TX power
1773 ****************************************************/
1774int il_set_tx_power(struct il_priv *il, s8 tx_power, bool force);
1775
1776/*******************************************************************************
1777 * Rate
1778 ******************************************************************************/
1779
1780u8 il_get_lowest_plcp(struct il_priv *il);
1781
1782/*******************************************************************************
1783 * Scanning
1784 ******************************************************************************/
1785void il_init_scan_params(struct il_priv *il);
1786int il_scan_cancel(struct il_priv *il);
1787int il_scan_cancel_timeout(struct il_priv *il, unsigned long ms);
1788void il_force_scan_end(struct il_priv *il);
1789int il_mac_hw_scan(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
1790 struct ieee80211_scan_request *hw_req);
1791void il_internal_short_hw_scan(struct il_priv *il);
1792int il_force_reset(struct il_priv *il, bool external);
1793u16 il_fill_probe_req(struct il_priv *il, struct ieee80211_mgmt *frame,
1794 const u8 *ta, const u8 *ie, int ie_len, int left);
1795void il_setup_rx_scan_handlers(struct il_priv *il);
1796u16 il_get_active_dwell_time(struct il_priv *il, enum ieee80211_band band,
1797 u8 n_probes);
1798u16 il_get_passive_dwell_time(struct il_priv *il, enum ieee80211_band band,
1799 struct ieee80211_vif *vif);
1800void il_setup_scan_deferred_work(struct il_priv *il);
1801void il_cancel_scan_deferred_work(struct il_priv *il);
1802
1803/* For faster active scanning, scan will move to the next channel if fewer than
1804 * PLCP_QUIET_THRESH packets are heard on this channel within
1805 * ACTIVE_QUIET_TIME after sending probe request. This shortens the dwell
1806 * time if it's a quiet channel (nothing responded to our probe, and there's
1807 * no other traffic).
1808 * Disable "quiet" feature by setting PLCP_QUIET_THRESH to 0. */
1809#define IL_ACTIVE_QUIET_TIME cpu_to_le16(10) /* msec */
1810#define IL_PLCP_QUIET_THRESH cpu_to_le16(1) /* packets */
1811
1812#define IL_SCAN_CHECK_WATCHDOG (HZ * 7)
1813
1814/*****************************************************
1815 * S e n d i n g H o s t C o m m a n d s *
1816 *****************************************************/
1817
1818const char *il_get_cmd_string(u8 cmd);
1819int __must_check il_send_cmd_sync(struct il_priv *il, struct il_host_cmd *cmd);
1820int il_send_cmd(struct il_priv *il, struct il_host_cmd *cmd);
1821int __must_check il_send_cmd_pdu(struct il_priv *il, u8 id, u16 len,
1822 const void *data);
1823int il_send_cmd_pdu_async(struct il_priv *il, u8 id, u16 len, const void *data,
1824 void (*callback) (struct il_priv *il,
1825 struct il_device_cmd *cmd,
1826 struct il_rx_pkt *pkt));
1827
1828int il_enqueue_hcmd(struct il_priv *il, struct il_host_cmd *cmd);
1829
1830/*****************************************************
1831 * PCI *
1832 *****************************************************/
1833
1834void il_bg_watchdog(unsigned long data);
1835u32 il_usecs_to_beacons(struct il_priv *il, u32 usec, u32 beacon_interval);
1836__le32 il_add_beacon_time(struct il_priv *il, u32 base, u32 addon,
1837 u32 beacon_interval);
1838
1839#ifdef CONFIG_PM_SLEEP
1840extern const struct dev_pm_ops il_pm_ops;
1841
1842#define IL_LEGACY_PM_OPS (&il_pm_ops)
1843
1844#else /* !CONFIG_PM_SLEEP */
1845
1846#define IL_LEGACY_PM_OPS NULL
1847
1848#endif /* !CONFIG_PM_SLEEP */
1849
1850/*****************************************************
1851* Error Handling Debugging
1852******************************************************/
1853void il4965_dump_nic_error_log(struct il_priv *il);
1854#ifdef CONFIG_IWLEGACY_DEBUG
1855void il_print_rx_config_cmd(struct il_priv *il);
1856#else
1857static inline void
1858il_print_rx_config_cmd(struct il_priv *il)
1859{
1860}
1861#endif
1862
1863void il_clear_isr_stats(struct il_priv *il);
1864
1865/*****************************************************
1866* GEOS
1867******************************************************/
1868int il_init_geos(struct il_priv *il);
1869void il_free_geos(struct il_priv *il);
1870
1871/*************** DRIVER STATUS FUNCTIONS *****/
1872
1873#define S_HCMD_ACTIVE 0 /* host command in progress */
1874/* 1 is unused (used to be S_HCMD_SYNC_ACTIVE) */
1875#define S_INT_ENABLED 2
1876#define S_RFKILL 3
1877#define S_CT_KILL 4
1878#define S_INIT 5
1879#define S_ALIVE 6
1880#define S_READY 7
1881#define S_TEMPERATURE 8
1882#define S_GEO_CONFIGURED 9
1883#define S_EXIT_PENDING 10
1884#define S_STATS 12
1885#define S_SCANNING 13
1886#define S_SCAN_ABORTING 14
1887#define S_SCAN_HW 15
1888#define S_POWER_PMI 16
1889#define S_FW_ERROR 17
1890#define S_CHANNEL_SWITCH_PENDING 18
1891
1892static inline int
1893il_is_ready(struct il_priv *il)
1894{
1895 /* The adapter is 'ready' if READY and GEO_CONFIGURED bits are
1896 * set but EXIT_PENDING is not */
1897 return test_bit(S_READY, &il->status) &&
1898 test_bit(S_GEO_CONFIGURED, &il->status) &&
1899 !test_bit(S_EXIT_PENDING, &il->status);
1900}
1901
1902static inline int
1903il_is_alive(struct il_priv *il)
1904{
1905 return test_bit(S_ALIVE, &il->status);
1906}
1907
1908static inline int
1909il_is_init(struct il_priv *il)
1910{
1911 return test_bit(S_INIT, &il->status);
1912}
1913
1914static inline int
1915il_is_rfkill(struct il_priv *il)
1916{
1917 return test_bit(S_RFKILL, &il->status);
1918}
1919
1920static inline int
1921il_is_ctkill(struct il_priv *il)
1922{
1923 return test_bit(S_CT_KILL, &il->status);
1924}
1925
1926static inline int
1927il_is_ready_rf(struct il_priv *il)
1928{
1929
1930 if (il_is_rfkill(il))
1931 return 0;
1932
1933 return il_is_ready(il);
1934}
1935
1936void il_send_bt_config(struct il_priv *il);
1937int il_send_stats_request(struct il_priv *il, u8 flags, bool clear);
1938void il_apm_stop(struct il_priv *il);
1939void _il_apm_stop(struct il_priv *il);
1940
1941int il_apm_init(struct il_priv *il);
1942
1943int il_send_rxon_timing(struct il_priv *il);
1944
1945static inline int
1946il_send_rxon_assoc(struct il_priv *il)
1947{
1948 return il->ops->rxon_assoc(il);
1949}
1950
1951static inline int
1952il_commit_rxon(struct il_priv *il)
1953{
1954 return il->ops->commit_rxon(il);
1955}
1956
1957static inline const struct ieee80211_supported_band *
1958il_get_hw_mode(struct il_priv *il, enum ieee80211_band band)
1959{
1960 return il->hw->wiphy->bands[band];
1961}
1962
1963/* mac80211 handlers */
1964int il_mac_config(struct ieee80211_hw *hw, u32 changed);
1965void il_mac_reset_tsf(struct ieee80211_hw *hw, struct ieee80211_vif *vif);
1966void il_mac_bss_info_changed(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
1967 struct ieee80211_bss_conf *bss_conf, u32 changes);
1968void il_tx_cmd_protection(struct il_priv *il, struct ieee80211_tx_info *info,
1969 __le16 fc, __le32 *tx_flags);
1970
1971irqreturn_t il_isr(int irq, void *data);
1972
1973void il_set_bit(struct il_priv *p, u32 r, u32 m);
1974void il_clear_bit(struct il_priv *p, u32 r, u32 m);
1975bool _il_grab_nic_access(struct il_priv *il);
1976int _il_poll_bit(struct il_priv *il, u32 addr, u32 bits, u32 mask, int timeout);
1977int il_poll_bit(struct il_priv *il, u32 addr, u32 mask, int timeout);
1978u32 il_rd_prph(struct il_priv *il, u32 reg);
1979void il_wr_prph(struct il_priv *il, u32 addr, u32 val);
1980u32 il_read_targ_mem(struct il_priv *il, u32 addr);
1981void il_write_targ_mem(struct il_priv *il, u32 addr, u32 val);
1982
1983static inline bool il_need_reclaim(struct il_priv *il, struct il_rx_pkt *pkt)
1984{
1985 /* Reclaim a command buffer only if this packet is a response
1986 * to a (driver-originated) command. If the packet (e.g. Rx frame)
1987 * originated from uCode, there is no command buffer to reclaim.
1988 * Ucode should set SEQ_RX_FRAME bit if ucode-originated, but
1989 * apparently a few don't get set; catch them here.
1990 */
1991 return !(pkt->hdr.sequence & SEQ_RX_FRAME) &&
1992 pkt->hdr.cmd != N_STATS && pkt->hdr.cmd != C_TX &&
1993 pkt->hdr.cmd != N_RX_PHY && pkt->hdr.cmd != N_RX &&
1994 pkt->hdr.cmd != N_RX_MPDU && pkt->hdr.cmd != N_COMPRESSED_BA;
1995}
1996
1997static inline void
1998_il_write8(struct il_priv *il, u32 ofs, u8 val)
1999{
2000 writeb(val, il->hw_base + ofs);
2001}
2002#define il_write8(il, ofs, val) _il_write8(il, ofs, val)
2003
2004static inline void
2005_il_wr(struct il_priv *il, u32 ofs, u32 val)
2006{
2007 writel(val, il->hw_base + ofs);
2008}
2009
2010static inline u32
2011_il_rd(struct il_priv *il, u32 ofs)
2012{
2013 return readl(il->hw_base + ofs);
2014}
2015
2016static inline void
2017_il_clear_bit(struct il_priv *il, u32 reg, u32 mask)
2018{
2019 _il_wr(il, reg, _il_rd(il, reg) & ~mask);
2020}
2021
2022static inline void
2023_il_set_bit(struct il_priv *il, u32 reg, u32 mask)
2024{
2025 _il_wr(il, reg, _il_rd(il, reg) | mask);
2026}
2027
2028static inline void
2029_il_release_nic_access(struct il_priv *il)
2030{
2031 _il_clear_bit(il, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
2032 /*
2033 * In above we are reading CSR_GP_CNTRL register, what will flush any
2034 * previous writes, but still want write, which clear MAC_ACCESS_REQ
2035 * bit, be performed on PCI bus before any other writes scheduled on
2036 * different CPUs (after we drop reg_lock).
2037 */
2038 mmiowb();
2039}
2040
2041static inline u32
2042il_rd(struct il_priv *il, u32 reg)
2043{
2044 u32 value;
2045 unsigned long reg_flags;
2046
2047 spin_lock_irqsave(&il->reg_lock, reg_flags);
2048 _il_grab_nic_access(il);
2049 value = _il_rd(il, reg);
2050 _il_release_nic_access(il);
2051 spin_unlock_irqrestore(&il->reg_lock, reg_flags);
2052 return value;
2053}
2054
2055static inline void
2056il_wr(struct il_priv *il, u32 reg, u32 value)
2057{
2058 unsigned long reg_flags;
2059
2060 spin_lock_irqsave(&il->reg_lock, reg_flags);
2061 if (likely(_il_grab_nic_access(il))) {
2062 _il_wr(il, reg, value);
2063 _il_release_nic_access(il);
2064 }
2065 spin_unlock_irqrestore(&il->reg_lock, reg_flags);
2066}
2067
2068static inline u32
2069_il_rd_prph(struct il_priv *il, u32 reg)
2070{
2071 _il_wr(il, HBUS_TARG_PRPH_RADDR, reg | (3 << 24));
2072 return _il_rd(il, HBUS_TARG_PRPH_RDAT);
2073}
2074
2075static inline void
2076_il_wr_prph(struct il_priv *il, u32 addr, u32 val)
2077{
2078 _il_wr(il, HBUS_TARG_PRPH_WADDR, ((addr & 0x0000FFFF) | (3 << 24)));
2079 _il_wr(il, HBUS_TARG_PRPH_WDAT, val);
2080}
2081
2082static inline void
2083il_set_bits_prph(struct il_priv *il, u32 reg, u32 mask)
2084{
2085 unsigned long reg_flags;
2086
2087 spin_lock_irqsave(&il->reg_lock, reg_flags);
2088 if (likely(_il_grab_nic_access(il))) {
2089 _il_wr_prph(il, reg, (_il_rd_prph(il, reg) | mask));
2090 _il_release_nic_access(il);
2091 }
2092 spin_unlock_irqrestore(&il->reg_lock, reg_flags);
2093}
2094
2095static inline void
2096il_set_bits_mask_prph(struct il_priv *il, u32 reg, u32 bits, u32 mask)
2097{
2098 unsigned long reg_flags;
2099
2100 spin_lock_irqsave(&il->reg_lock, reg_flags);
2101 if (likely(_il_grab_nic_access(il))) {
2102 _il_wr_prph(il, reg, ((_il_rd_prph(il, reg) & mask) | bits));
2103 _il_release_nic_access(il);
2104 }
2105 spin_unlock_irqrestore(&il->reg_lock, reg_flags);
2106}
2107
2108static inline void
2109il_clear_bits_prph(struct il_priv *il, u32 reg, u32 mask)
2110{
2111 unsigned long reg_flags;
2112 u32 val;
2113
2114 spin_lock_irqsave(&il->reg_lock, reg_flags);
2115 if (likely(_il_grab_nic_access(il))) {
2116 val = _il_rd_prph(il, reg);
2117 _il_wr_prph(il, reg, (val & ~mask));
2118 _il_release_nic_access(il);
2119 }
2120 spin_unlock_irqrestore(&il->reg_lock, reg_flags);
2121}
2122
2123#define HW_KEY_DYNAMIC 0
2124#define HW_KEY_DEFAULT 1
2125
2126#define IL_STA_DRIVER_ACTIVE BIT(0) /* driver entry is active */
2127#define IL_STA_UCODE_ACTIVE BIT(1) /* ucode entry is active */
2128#define IL_STA_UCODE_INPROGRESS BIT(2) /* ucode entry is in process of
2129 being activated */
2130#define IL_STA_LOCAL BIT(3) /* station state not directed by mac80211;
2131 (this is for the IBSS BSSID stations) */
2132#define IL_STA_BCAST BIT(4) /* this station is the special bcast station */
2133
2134void il_restore_stations(struct il_priv *il);
2135void il_clear_ucode_stations(struct il_priv *il);
2136void il_dealloc_bcast_stations(struct il_priv *il);
2137int il_get_free_ucode_key_idx(struct il_priv *il);
2138int il_send_add_sta(struct il_priv *il, struct il_addsta_cmd *sta, u8 flags);
2139int il_add_station_common(struct il_priv *il, const u8 *addr, bool is_ap,
2140 struct ieee80211_sta *sta, u8 *sta_id_r);
2141int il_remove_station(struct il_priv *il, const u8 sta_id, const u8 * addr);
2142int il_mac_sta_remove(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
2143 struct ieee80211_sta *sta);
2144
2145u8 il_prep_station(struct il_priv *il, const u8 *addr, bool is_ap,
2146 struct ieee80211_sta *sta);
2147
2148int il_send_lq_cmd(struct il_priv *il, struct il_link_quality_cmd *lq,
2149 u8 flags, bool init);
2150
2151/**
2152 * il_clear_driver_stations - clear knowledge of all stations from driver
2153 * @il: iwl il struct
2154 *
2155 * This is called during il_down() to make sure that in the case
2156 * we're coming there from a hardware restart mac80211 will be
2157 * able to reconfigure stations -- if we're getting there in the
2158 * normal down flow then the stations will already be cleared.
2159 */
2160static inline void
2161il_clear_driver_stations(struct il_priv *il)
2162{
2163 unsigned long flags;
2164
2165 spin_lock_irqsave(&il->sta_lock, flags);
2166 memset(il->stations, 0, sizeof(il->stations));
2167 il->num_stations = 0;
2168 il->ucode_key_table = 0;
2169 spin_unlock_irqrestore(&il->sta_lock, flags);
2170}
2171
2172static inline int
2173il_sta_id(struct ieee80211_sta *sta)
2174{
2175 if (WARN_ON(!sta))
2176 return IL_INVALID_STATION;
2177
2178 return ((struct il_station_priv_common *)sta->drv_priv)->sta_id;
2179}
2180
2181/**
2182 * il_sta_id_or_broadcast - return sta_id or broadcast sta
2183 * @il: iwl il
2184 * @context: the current context
2185 * @sta: mac80211 station
2186 *
2187 * In certain circumstances mac80211 passes a station pointer
2188 * that may be %NULL, for example during TX or key setup. In
2189 * that case, we need to use the broadcast station, so this
2190 * inline wraps that pattern.
2191 */
2192static inline int
2193il_sta_id_or_broadcast(struct il_priv *il, struct ieee80211_sta *sta)
2194{
2195 int sta_id;
2196
2197 if (!sta)
2198 return il->hw_params.bcast_id;
2199
2200 sta_id = il_sta_id(sta);
2201
2202 /*
2203 * mac80211 should not be passing a partially
2204 * initialised station!
2205 */
2206 WARN_ON(sta_id == IL_INVALID_STATION);
2207
2208 return sta_id;
2209}
2210
2211/**
2212 * il_queue_inc_wrap - increment queue idx, wrap back to beginning
2213 * @idx -- current idx
2214 * @n_bd -- total number of entries in queue (must be power of 2)
2215 */
2216static inline int
2217il_queue_inc_wrap(int idx, int n_bd)
2218{
2219 return ++idx & (n_bd - 1);
2220}
2221
2222/**
2223 * il_queue_dec_wrap - decrement queue idx, wrap back to end
2224 * @idx -- current idx
2225 * @n_bd -- total number of entries in queue (must be power of 2)
2226 */
2227static inline int
2228il_queue_dec_wrap(int idx, int n_bd)
2229{
2230 return --idx & (n_bd - 1);
2231}
2232
2233/* TODO: Move fw_desc functions to iwl-pci.ko */
2234static inline void
2235il_free_fw_desc(struct pci_dev *pci_dev, struct fw_desc *desc)
2236{
2237 if (desc->v_addr)
2238 dma_free_coherent(&pci_dev->dev, desc->len, desc->v_addr,
2239 desc->p_addr);
2240 desc->v_addr = NULL;
2241 desc->len = 0;
2242}
2243
2244static inline int
2245il_alloc_fw_desc(struct pci_dev *pci_dev, struct fw_desc *desc)
2246{
2247 if (!desc->len) {
2248 desc->v_addr = NULL;
2249 return -EINVAL;
2250 }
2251
2252 desc->v_addr = dma_alloc_coherent(&pci_dev->dev, desc->len,
2253 &desc->p_addr, GFP_KERNEL);
2254 return (desc->v_addr != NULL) ? 0 : -ENOMEM;
2255}
2256
2257/*
2258 * we have 8 bits used like this:
2259 *
2260 * 7 6 5 4 3 2 1 0
2261 * | | | | | | | |
2262 * | | | | | | +-+-------- AC queue (0-3)
2263 * | | | | | |
2264 * | +-+-+-+-+------------ HW queue ID
2265 * |
2266 * +---------------------- unused
2267 */
2268static inline void
2269il_set_swq_id(struct il_tx_queue *txq, u8 ac, u8 hwq)
2270{
2271 BUG_ON(ac > 3); /* only have 2 bits */
2272 BUG_ON(hwq > 31); /* only use 5 bits */
2273
2274 txq->swq_id = (hwq << 2) | ac;
2275}
2276
2277static inline void
2278_il_wake_queue(struct il_priv *il, u8 ac)
2279{
2280 if (atomic_dec_return(&il->queue_stop_count[ac]) <= 0)
2281 ieee80211_wake_queue(il->hw, ac);
2282}
2283
2284static inline void
2285_il_stop_queue(struct il_priv *il, u8 ac)
2286{
2287 if (atomic_inc_return(&il->queue_stop_count[ac]) > 0)
2288 ieee80211_stop_queue(il->hw, ac);
2289}
2290static inline void
2291il_wake_queue(struct il_priv *il, struct il_tx_queue *txq)
2292{
2293 u8 queue = txq->swq_id;
2294 u8 ac = queue & 3;
2295 u8 hwq = (queue >> 2) & 0x1f;
2296
2297 if (test_and_clear_bit(hwq, il->queue_stopped))
2298 _il_wake_queue(il, ac);
2299}
2300
2301static inline void
2302il_stop_queue(struct il_priv *il, struct il_tx_queue *txq)
2303{
2304 u8 queue = txq->swq_id;
2305 u8 ac = queue & 3;
2306 u8 hwq = (queue >> 2) & 0x1f;
2307
2308 if (!test_and_set_bit(hwq, il->queue_stopped))
2309 _il_stop_queue(il, ac);
2310}
2311
2312static inline void
2313il_wake_queues_by_reason(struct il_priv *il, int reason)
2314{
2315 u8 ac;
2316
2317 if (test_and_clear_bit(reason, &il->stop_reason))
2318 for (ac = 0; ac < 4; ac++)
2319 _il_wake_queue(il, ac);
2320}
2321
2322static inline void
2323il_stop_queues_by_reason(struct il_priv *il, int reason)
2324{
2325 u8 ac;
2326
2327 if (!test_and_set_bit(reason, &il->stop_reason))
2328 for (ac = 0; ac < 4; ac++)
2329 _il_stop_queue(il, ac);
2330}
2331
2332#ifdef ieee80211_stop_queue
2333#undef ieee80211_stop_queue
2334#endif
2335
2336#define ieee80211_stop_queue DO_NOT_USE_ieee80211_stop_queue
2337
2338#ifdef ieee80211_wake_queue
2339#undef ieee80211_wake_queue
2340#endif
2341
2342#define ieee80211_wake_queue DO_NOT_USE_ieee80211_wake_queue
2343
2344static inline void
2345il_disable_interrupts(struct il_priv *il)
2346{
2347 clear_bit(S_INT_ENABLED, &il->status);
2348
2349 /* disable interrupts from uCode/NIC to host */
2350 _il_wr(il, CSR_INT_MASK, 0x00000000);
2351
2352 /* acknowledge/clear/reset any interrupts still pending
2353 * from uCode or flow handler (Rx/Tx DMA) */
2354 _il_wr(il, CSR_INT, 0xffffffff);
2355 _il_wr(il, CSR_FH_INT_STATUS, 0xffffffff);
2356}
2357
2358static inline void
2359il_enable_rfkill_int(struct il_priv *il)
2360{
2361 _il_wr(il, CSR_INT_MASK, CSR_INT_BIT_RF_KILL);
2362}
2363
2364static inline void
2365il_enable_interrupts(struct il_priv *il)
2366{
2367 set_bit(S_INT_ENABLED, &il->status);
2368 _il_wr(il, CSR_INT_MASK, il->inta_mask);
2369}
2370
2371/**
2372 * il_beacon_time_mask_low - mask of lower 32 bit of beacon time
2373 * @il -- pointer to il_priv data structure
2374 * @tsf_bits -- number of bits need to shift for masking)
2375 */
2376static inline u32
2377il_beacon_time_mask_low(struct il_priv *il, u16 tsf_bits)
2378{
2379 return (1 << tsf_bits) - 1;
2380}
2381
2382/**
2383 * il_beacon_time_mask_high - mask of higher 32 bit of beacon time
2384 * @il -- pointer to il_priv data structure
2385 * @tsf_bits -- number of bits need to shift for masking)
2386 */
2387static inline u32
2388il_beacon_time_mask_high(struct il_priv *il, u16 tsf_bits)
2389{
2390 return ((1 << (32 - tsf_bits)) - 1) << tsf_bits;
2391}
2392
2393/**
2394 * struct il_rb_status - reseve buffer status host memory mapped FH registers
2395 *
2396 * @closed_rb_num [0:11] - Indicates the idx of the RB which was closed
2397 * @closed_fr_num [0:11] - Indicates the idx of the RX Frame which was closed
2398 * @finished_rb_num [0:11] - Indicates the idx of the current RB
2399 * in which the last frame was written to
2400 * @finished_fr_num [0:11] - Indicates the idx of the RX Frame
2401 * which was transferred
2402 */
2403struct il_rb_status {
2404 __le16 closed_rb_num;
2405 __le16 closed_fr_num;
2406 __le16 finished_rb_num;
2407 __le16 finished_fr_nam;
2408 __le32 __unused; /* 3945 only */
2409} __packed;
2410
2411#define TFD_QUEUE_SIZE_MAX 256
2412#define TFD_QUEUE_SIZE_BC_DUP 64
2413#define TFD_QUEUE_BC_SIZE (TFD_QUEUE_SIZE_MAX + TFD_QUEUE_SIZE_BC_DUP)
2414#define IL_TX_DMA_MASK DMA_BIT_MASK(36)
2415#define IL_NUM_OF_TBS 20
2416
2417static inline u8
2418il_get_dma_hi_addr(dma_addr_t addr)
2419{
2420 return (sizeof(addr) > sizeof(u32) ? (addr >> 16) >> 16 : 0) & 0xF;
2421}
2422
2423/**
2424 * struct il_tfd_tb transmit buffer descriptor within transmit frame descriptor
2425 *
2426 * This structure contains dma address and length of transmission address
2427 *
2428 * @lo: low [31:0] portion of the dma address of TX buffer every even is
2429 * unaligned on 16 bit boundary
2430 * @hi_n_len: 0-3 [35:32] portion of dma
2431 * 4-15 length of the tx buffer
2432 */
2433struct il_tfd_tb {
2434 __le32 lo;
2435 __le16 hi_n_len;
2436} __packed;
2437
2438/**
2439 * struct il_tfd
2440 *
2441 * Transmit Frame Descriptor (TFD)
2442 *
2443 * @ __reserved1[3] reserved
2444 * @ num_tbs 0-4 number of active tbs
2445 * 5 reserved
2446 * 6-7 padding (not used)
2447 * @ tbs[20] transmit frame buffer descriptors
2448 * @ __pad padding
2449 *
2450 * Each Tx queue uses a circular buffer of 256 TFDs stored in host DRAM.
2451 * Both driver and device share these circular buffers, each of which must be
2452 * contiguous 256 TFDs x 128 bytes-per-TFD = 32 KBytes
2453 *
2454 * Driver must indicate the physical address of the base of each
2455 * circular buffer via the FH49_MEM_CBBC_QUEUE registers.
2456 *
2457 * Each TFD contains pointer/size information for up to 20 data buffers
2458 * in host DRAM. These buffers collectively contain the (one) frame described
2459 * by the TFD. Each buffer must be a single contiguous block of memory within
2460 * itself, but buffers may be scattered in host DRAM. Each buffer has max size
2461 * of (4K - 4). The concatenates all of a TFD's buffers into a single
2462 * Tx frame, up to 8 KBytes in size.
2463 *
2464 * A maximum of 255 (not 256!) TFDs may be on a queue waiting for Tx.
2465 */
2466struct il_tfd {
2467 u8 __reserved1[3];
2468 u8 num_tbs;
2469 struct il_tfd_tb tbs[IL_NUM_OF_TBS];
2470 __le32 __pad;
2471} __packed;
2472/* PCI registers */
2473#define PCI_CFG_RETRY_TIMEOUT 0x041
2474
2475struct il_rate_info {
2476 u8 plcp; /* uCode API: RATE_6M_PLCP, etc. */
2477 u8 plcp_siso; /* uCode API: RATE_SISO_6M_PLCP, etc. */
2478 u8 plcp_mimo2; /* uCode API: RATE_MIMO2_6M_PLCP, etc. */
2479 u8 ieee; /* MAC header: RATE_6M_IEEE, etc. */
2480 u8 prev_ieee; /* previous rate in IEEE speeds */
2481 u8 next_ieee; /* next rate in IEEE speeds */
2482 u8 prev_rs; /* previous rate used in rs algo */
2483 u8 next_rs; /* next rate used in rs algo */
2484 u8 prev_rs_tgg; /* previous rate used in TGG rs algo */
2485 u8 next_rs_tgg; /* next rate used in TGG rs algo */
2486};
2487
2488struct il3945_rate_info {
2489 u8 plcp; /* uCode API: RATE_6M_PLCP, etc. */
2490 u8 ieee; /* MAC header: RATE_6M_IEEE, etc. */
2491 u8 prev_ieee; /* previous rate in IEEE speeds */
2492 u8 next_ieee; /* next rate in IEEE speeds */
2493 u8 prev_rs; /* previous rate used in rs algo */
2494 u8 next_rs; /* next rate used in rs algo */
2495 u8 prev_rs_tgg; /* previous rate used in TGG rs algo */
2496 u8 next_rs_tgg; /* next rate used in TGG rs algo */
2497 u8 table_rs_idx; /* idx in rate scale table cmd */
2498 u8 prev_table_rs; /* prev in rate table cmd */
2499};
2500
2501/*
2502 * These serve as idxes into
2503 * struct il_rate_info il_rates[RATE_COUNT];
2504 */
2505enum {
2506 RATE_1M_IDX = 0,
2507 RATE_2M_IDX,
2508 RATE_5M_IDX,
2509 RATE_11M_IDX,
2510 RATE_6M_IDX,
2511 RATE_9M_IDX,
2512 RATE_12M_IDX,
2513 RATE_18M_IDX,
2514 RATE_24M_IDX,
2515 RATE_36M_IDX,
2516 RATE_48M_IDX,
2517 RATE_54M_IDX,
2518 RATE_60M_IDX,
2519 RATE_COUNT,
2520 RATE_COUNT_LEGACY = RATE_COUNT - 1, /* Excluding 60M */
2521 RATE_COUNT_3945 = RATE_COUNT - 1,
2522 RATE_INVM_IDX = RATE_COUNT,
2523 RATE_INVALID = RATE_COUNT,
2524};
2525
2526enum {
2527 RATE_6M_IDX_TBL = 0,
2528 RATE_9M_IDX_TBL,
2529 RATE_12M_IDX_TBL,
2530 RATE_18M_IDX_TBL,
2531 RATE_24M_IDX_TBL,
2532 RATE_36M_IDX_TBL,
2533 RATE_48M_IDX_TBL,
2534 RATE_54M_IDX_TBL,
2535 RATE_1M_IDX_TBL,
2536 RATE_2M_IDX_TBL,
2537 RATE_5M_IDX_TBL,
2538 RATE_11M_IDX_TBL,
2539 RATE_INVM_IDX_TBL = RATE_INVM_IDX - 1,
2540};
2541
2542enum {
2543 IL_FIRST_OFDM_RATE = RATE_6M_IDX,
2544 IL39_LAST_OFDM_RATE = RATE_54M_IDX,
2545 IL_LAST_OFDM_RATE = RATE_60M_IDX,
2546 IL_FIRST_CCK_RATE = RATE_1M_IDX,
2547 IL_LAST_CCK_RATE = RATE_11M_IDX,
2548};
2549
2550/* #define vs. enum to keep from defaulting to 'large integer' */
2551#define RATE_6M_MASK (1 << RATE_6M_IDX)
2552#define RATE_9M_MASK (1 << RATE_9M_IDX)
2553#define RATE_12M_MASK (1 << RATE_12M_IDX)
2554#define RATE_18M_MASK (1 << RATE_18M_IDX)
2555#define RATE_24M_MASK (1 << RATE_24M_IDX)
2556#define RATE_36M_MASK (1 << RATE_36M_IDX)
2557#define RATE_48M_MASK (1 << RATE_48M_IDX)
2558#define RATE_54M_MASK (1 << RATE_54M_IDX)
2559#define RATE_60M_MASK (1 << RATE_60M_IDX)
2560#define RATE_1M_MASK (1 << RATE_1M_IDX)
2561#define RATE_2M_MASK (1 << RATE_2M_IDX)
2562#define RATE_5M_MASK (1 << RATE_5M_IDX)
2563#define RATE_11M_MASK (1 << RATE_11M_IDX)
2564
2565/* uCode API values for legacy bit rates, both OFDM and CCK */
2566enum {
2567 RATE_6M_PLCP = 13,
2568 RATE_9M_PLCP = 15,
2569 RATE_12M_PLCP = 5,
2570 RATE_18M_PLCP = 7,
2571 RATE_24M_PLCP = 9,
2572 RATE_36M_PLCP = 11,
2573 RATE_48M_PLCP = 1,
2574 RATE_54M_PLCP = 3,
2575 RATE_60M_PLCP = 3, /*FIXME:RS:should be removed */
2576 RATE_1M_PLCP = 10,
2577 RATE_2M_PLCP = 20,
2578 RATE_5M_PLCP = 55,
2579 RATE_11M_PLCP = 110,
2580 /*FIXME:RS:add RATE_LEGACY_INVM_PLCP = 0, */
2581};
2582
2583/* uCode API values for OFDM high-throughput (HT) bit rates */
2584enum {
2585 RATE_SISO_6M_PLCP = 0,
2586 RATE_SISO_12M_PLCP = 1,
2587 RATE_SISO_18M_PLCP = 2,
2588 RATE_SISO_24M_PLCP = 3,
2589 RATE_SISO_36M_PLCP = 4,
2590 RATE_SISO_48M_PLCP = 5,
2591 RATE_SISO_54M_PLCP = 6,
2592 RATE_SISO_60M_PLCP = 7,
2593 RATE_MIMO2_6M_PLCP = 0x8,
2594 RATE_MIMO2_12M_PLCP = 0x9,
2595 RATE_MIMO2_18M_PLCP = 0xa,
2596 RATE_MIMO2_24M_PLCP = 0xb,
2597 RATE_MIMO2_36M_PLCP = 0xc,
2598 RATE_MIMO2_48M_PLCP = 0xd,
2599 RATE_MIMO2_54M_PLCP = 0xe,
2600 RATE_MIMO2_60M_PLCP = 0xf,
2601 RATE_SISO_INVM_PLCP,
2602 RATE_MIMO2_INVM_PLCP = RATE_SISO_INVM_PLCP,
2603};
2604
2605/* MAC header values for bit rates */
2606enum {
2607 RATE_6M_IEEE = 12,
2608 RATE_9M_IEEE = 18,
2609 RATE_12M_IEEE = 24,
2610 RATE_18M_IEEE = 36,
2611 RATE_24M_IEEE = 48,
2612 RATE_36M_IEEE = 72,
2613 RATE_48M_IEEE = 96,
2614 RATE_54M_IEEE = 108,
2615 RATE_60M_IEEE = 120,
2616 RATE_1M_IEEE = 2,
2617 RATE_2M_IEEE = 4,
2618 RATE_5M_IEEE = 11,
2619 RATE_11M_IEEE = 22,
2620};
2621
2622#define IL_CCK_BASIC_RATES_MASK \
2623 (RATE_1M_MASK | \
2624 RATE_2M_MASK)
2625
2626#define IL_CCK_RATES_MASK \
2627 (IL_CCK_BASIC_RATES_MASK | \
2628 RATE_5M_MASK | \
2629 RATE_11M_MASK)
2630
2631#define IL_OFDM_BASIC_RATES_MASK \
2632 (RATE_6M_MASK | \
2633 RATE_12M_MASK | \
2634 RATE_24M_MASK)
2635
2636#define IL_OFDM_RATES_MASK \
2637 (IL_OFDM_BASIC_RATES_MASK | \
2638 RATE_9M_MASK | \
2639 RATE_18M_MASK | \
2640 RATE_36M_MASK | \
2641 RATE_48M_MASK | \
2642 RATE_54M_MASK)
2643
2644#define IL_BASIC_RATES_MASK \
2645 (IL_OFDM_BASIC_RATES_MASK | \
2646 IL_CCK_BASIC_RATES_MASK)
2647
2648#define RATES_MASK ((1 << RATE_COUNT) - 1)
2649#define RATES_MASK_3945 ((1 << RATE_COUNT_3945) - 1)
2650
2651#define IL_INVALID_VALUE -1
2652
2653#define IL_MIN_RSSI_VAL -100
2654#define IL_MAX_RSSI_VAL 0
2655
2656/* These values specify how many Tx frame attempts before
2657 * searching for a new modulation mode */
2658#define IL_LEGACY_FAILURE_LIMIT 160
2659#define IL_LEGACY_SUCCESS_LIMIT 480
2660#define IL_LEGACY_TBL_COUNT 160
2661
2662#define IL_NONE_LEGACY_FAILURE_LIMIT 400
2663#define IL_NONE_LEGACY_SUCCESS_LIMIT 4500
2664#define IL_NONE_LEGACY_TBL_COUNT 1500
2665
2666/* Success ratio (ACKed / attempted tx frames) values (perfect is 128 * 100) */
2667#define IL_RS_GOOD_RATIO 12800 /* 100% */
2668#define RATE_SCALE_SWITCH 10880 /* 85% */
2669#define RATE_HIGH_TH 10880 /* 85% */
2670#define RATE_INCREASE_TH 6400 /* 50% */
2671#define RATE_DECREASE_TH 1920 /* 15% */
2672
2673/* possible actions when in legacy mode */
2674#define IL_LEGACY_SWITCH_ANTENNA1 0
2675#define IL_LEGACY_SWITCH_ANTENNA2 1
2676#define IL_LEGACY_SWITCH_SISO 2
2677#define IL_LEGACY_SWITCH_MIMO2_AB 3
2678#define IL_LEGACY_SWITCH_MIMO2_AC 4
2679#define IL_LEGACY_SWITCH_MIMO2_BC 5
2680
2681/* possible actions when in siso mode */
2682#define IL_SISO_SWITCH_ANTENNA1 0
2683#define IL_SISO_SWITCH_ANTENNA2 1
2684#define IL_SISO_SWITCH_MIMO2_AB 2
2685#define IL_SISO_SWITCH_MIMO2_AC 3
2686#define IL_SISO_SWITCH_MIMO2_BC 4
2687#define IL_SISO_SWITCH_GI 5
2688
2689/* possible actions when in mimo mode */
2690#define IL_MIMO2_SWITCH_ANTENNA1 0
2691#define IL_MIMO2_SWITCH_ANTENNA2 1
2692#define IL_MIMO2_SWITCH_SISO_A 2
2693#define IL_MIMO2_SWITCH_SISO_B 3
2694#define IL_MIMO2_SWITCH_SISO_C 4
2695#define IL_MIMO2_SWITCH_GI 5
2696
2697#define IL_MAX_SEARCH IL_MIMO2_SWITCH_GI
2698
2699#define IL_ACTION_LIMIT 3 /* # possible actions */
2700
2701#define LQ_SIZE 2 /* 2 mode tables: "Active" and "Search" */
2702
2703/* load per tid defines for A-MPDU activation */
2704#define IL_AGG_TPT_THREHOLD 0
2705#define IL_AGG_LOAD_THRESHOLD 10
2706#define IL_AGG_ALL_TID 0xff
2707#define TID_QUEUE_CELL_SPACING 50 /*mS */
2708#define TID_QUEUE_MAX_SIZE 20
2709#define TID_ROUND_VALUE 5 /* mS */
2710#define TID_MAX_LOAD_COUNT 8
2711
2712#define TID_MAX_TIME_DIFF ((TID_QUEUE_MAX_SIZE - 1) * TID_QUEUE_CELL_SPACING)
2713#define TIME_WRAP_AROUND(x, y) (((y) > (x)) ? (y) - (x) : (0-(x)) + (y))
2714
2715extern const struct il_rate_info il_rates[RATE_COUNT];
2716
2717enum il_table_type {
2718 LQ_NONE,
2719 LQ_G, /* legacy types */
2720 LQ_A,
2721 LQ_SISO, /* high-throughput types */
2722 LQ_MIMO2,
2723 LQ_MAX,
2724};
2725
2726#define is_legacy(tbl) ((tbl) == LQ_G || (tbl) == LQ_A)
2727#define is_siso(tbl) ((tbl) == LQ_SISO)
2728#define is_mimo2(tbl) ((tbl) == LQ_MIMO2)
2729#define is_mimo(tbl) (is_mimo2(tbl))
2730#define is_Ht(tbl) (is_siso(tbl) || is_mimo(tbl))
2731#define is_a_band(tbl) ((tbl) == LQ_A)
2732#define is_g_and(tbl) ((tbl) == LQ_G)
2733
2734#define ANT_NONE 0x0
2735#define ANT_A BIT(0)
2736#define ANT_B BIT(1)
2737#define ANT_AB (ANT_A | ANT_B)
2738#define ANT_C BIT(2)
2739#define ANT_AC (ANT_A | ANT_C)
2740#define ANT_BC (ANT_B | ANT_C)
2741#define ANT_ABC (ANT_AB | ANT_C)
2742
2743#define IL_MAX_MCS_DISPLAY_SIZE 12
2744
2745struct il_rate_mcs_info {
2746 char mbps[IL_MAX_MCS_DISPLAY_SIZE];
2747 char mcs[IL_MAX_MCS_DISPLAY_SIZE];
2748};
2749
2750/**
2751 * struct il_rate_scale_data -- tx success history for one rate
2752 */
2753struct il_rate_scale_data {
2754 u64 data; /* bitmap of successful frames */
2755 s32 success_counter; /* number of frames successful */
2756 s32 success_ratio; /* per-cent * 128 */
2757 s32 counter; /* number of frames attempted */
2758 s32 average_tpt; /* success ratio * expected throughput */
2759 unsigned long stamp;
2760};
2761
2762/**
2763 * struct il_scale_tbl_info -- tx params and success history for all rates
2764 *
2765 * There are two of these in struct il_lq_sta,
2766 * one for "active", and one for "search".
2767 */
2768struct il_scale_tbl_info {
2769 enum il_table_type lq_type;
2770 u8 ant_type;
2771 u8 is_SGI; /* 1 = short guard interval */
2772 u8 is_ht40; /* 1 = 40 MHz channel width */
2773 u8 is_dup; /* 1 = duplicated data streams */
2774 u8 action; /* change modulation; IL_[LEGACY/SISO/MIMO]_SWITCH_* */
2775 u8 max_search; /* maximun number of tables we can search */
2776 s32 *expected_tpt; /* throughput metrics; expected_tpt_G, etc. */
2777 u32 current_rate; /* rate_n_flags, uCode API format */
2778 struct il_rate_scale_data win[RATE_COUNT]; /* rate histories */
2779};
2780
2781struct il_traffic_load {
2782 unsigned long time_stamp; /* age of the oldest stats */
2783 u32 packet_count[TID_QUEUE_MAX_SIZE]; /* packet count in this time
2784 * slice */
2785 u32 total; /* total num of packets during the
2786 * last TID_MAX_TIME_DIFF */
2787 u8 queue_count; /* number of queues that has
2788 * been used since the last cleanup */
2789 u8 head; /* start of the circular buffer */
2790};
2791
2792/**
2793 * struct il_lq_sta -- driver's rate scaling ilate structure
2794 *
2795 * Pointer to this gets passed back and forth between driver and mac80211.
2796 */
2797struct il_lq_sta {
2798 u8 active_tbl; /* idx of active table, range 0-1 */
2799 u8 enable_counter; /* indicates HT mode */
2800 u8 stay_in_tbl; /* 1: disallow, 0: allow search for new mode */
2801 u8 search_better_tbl; /* 1: currently trying alternate mode */
2802 s32 last_tpt;
2803
2804 /* The following determine when to search for a new mode */
2805 u32 table_count_limit;
2806 u32 max_failure_limit; /* # failed frames before new search */
2807 u32 max_success_limit; /* # successful frames before new search */
2808 u32 table_count;
2809 u32 total_failed; /* total failed frames, any/all rates */
2810 u32 total_success; /* total successful frames, any/all rates */
2811 u64 flush_timer; /* time staying in mode before new search */
2812
2813 u8 action_counter; /* # mode-switch actions tried */
2814 u8 is_green;
2815 u8 is_dup;
2816 enum ieee80211_band band;
2817
2818 /* The following are bitmaps of rates; RATE_6M_MASK, etc. */
2819 u32 supp_rates;
2820 u16 active_legacy_rate;
2821 u16 active_siso_rate;
2822 u16 active_mimo2_rate;
2823 s8 max_rate_idx; /* Max rate set by user */
2824 u8 missed_rate_counter;
2825
2826 struct il_link_quality_cmd lq;
2827 struct il_scale_tbl_info lq_info[LQ_SIZE]; /* "active", "search" */
2828 struct il_traffic_load load[TID_MAX_LOAD_COUNT];
2829 u8 tx_agg_tid_en;
2830#ifdef CONFIG_MAC80211_DEBUGFS
2831 struct dentry *rs_sta_dbgfs_scale_table_file;
2832 struct dentry *rs_sta_dbgfs_stats_table_file;
2833 struct dentry *rs_sta_dbgfs_rate_scale_data_file;
2834 struct dentry *rs_sta_dbgfs_tx_agg_tid_en_file;
2835 u32 dbg_fixed_rate;
2836#endif
2837 struct il_priv *drv;
2838
2839 /* used to be in sta_info */
2840 int last_txrate_idx;
2841 /* last tx rate_n_flags */
2842 u32 last_rate_n_flags;
2843 /* packets destined for this STA are aggregated */
2844 u8 is_agg;
2845};
2846
2847/*
2848 * il_station_priv: Driver's ilate station information
2849 *
2850 * When mac80211 creates a station it reserves some space (hw->sta_data_size)
2851 * in the structure for use by driver. This structure is places in that
2852 * space.
2853 *
2854 * The common struct MUST be first because it is shared between
2855 * 3945 and 4965!
2856 */
2857struct il_station_priv {
2858 struct il_station_priv_common common;
2859 struct il_lq_sta lq_sta;
2860 atomic_t pending_frames;
2861 bool client;
2862 bool asleep;
2863};
2864
2865static inline u8
2866il4965_num_of_ant(u8 m)
2867{
2868 return !!(m & ANT_A) + !!(m & ANT_B) + !!(m & ANT_C);
2869}
2870
2871static inline u8
2872il4965_first_antenna(u8 mask)
2873{
2874 if (mask & ANT_A)
2875 return ANT_A;
2876 if (mask & ANT_B)
2877 return ANT_B;
2878 return ANT_C;
2879}
2880
2881/**
2882 * il3945_rate_scale_init - Initialize the rate scale table based on assoc info
2883 *
2884 * The specific throughput table used is based on the type of network
2885 * the associated with, including A, B, G, and G w/ TGG protection
2886 */
2887void il3945_rate_scale_init(struct ieee80211_hw *hw, s32 sta_id);
2888
2889/* Initialize station's rate scaling information after adding station */
2890void il4965_rs_rate_init(struct il_priv *il, struct ieee80211_sta *sta,
2891 u8 sta_id);
2892void il3945_rs_rate_init(struct il_priv *il, struct ieee80211_sta *sta,
2893 u8 sta_id);
2894
2895/**
2896 * il_rate_control_register - Register the rate control algorithm callbacks
2897 *
2898 * Since the rate control algorithm is hardware specific, there is no need
2899 * or reason to place it as a stand alone module. The driver can call
2900 * il_rate_control_register in order to register the rate control callbacks
2901 * with the mac80211 subsystem. This should be performed prior to calling
2902 * ieee80211_register_hw
2903 *
2904 */
2905int il4965_rate_control_register(void);
2906int il3945_rate_control_register(void);
2907
2908/**
2909 * il_rate_control_unregister - Unregister the rate control callbacks
2910 *
2911 * This should be called after calling ieee80211_unregister_hw, but before
2912 * the driver is unloaded.
2913 */
2914void il4965_rate_control_unregister(void);
2915void il3945_rate_control_unregister(void);
2916
2917int il_power_update_mode(struct il_priv *il, bool force);
2918void il_power_initialize(struct il_priv *il);
2919
2920extern u32 il_debug_level;
2921
2922#ifdef CONFIG_IWLEGACY_DEBUG
2923/*
2924 * il_get_debug_level: Return active debug level for device
2925 *
2926 * Using sysfs it is possible to set per device debug level. This debug
2927 * level will be used if set, otherwise the global debug level which can be
2928 * set via module parameter is used.
2929 */
2930static inline u32
2931il_get_debug_level(struct il_priv *il)
2932{
2933 if (il->debug_level)
2934 return il->debug_level;
2935 else
2936 return il_debug_level;
2937}
2938#else
2939static inline u32
2940il_get_debug_level(struct il_priv *il)
2941{
2942 return il_debug_level;
2943}
2944#endif
2945
2946#define il_print_hex_error(il, p, len) \
2947do { \
2948 print_hex_dump(KERN_ERR, "iwl data: ", \
2949 DUMP_PREFIX_OFFSET, 16, 1, p, len, 1); \
2950} while (0)
2951
2952#ifdef CONFIG_IWLEGACY_DEBUG
2953#define IL_DBG(level, fmt, args...) \
2954do { \
2955 if (il_get_debug_level(il) & level) \
2956 dev_err(&il->hw->wiphy->dev, "%c %s " fmt, \
2957 in_interrupt() ? 'I' : 'U', __func__ , ##args); \
2958} while (0)
2959
2960#define il_print_hex_dump(il, level, p, len) \
2961do { \
2962 if (il_get_debug_level(il) & level) \
2963 print_hex_dump(KERN_DEBUG, "iwl data: ", \
2964 DUMP_PREFIX_OFFSET, 16, 1, p, len, 1); \
2965} while (0)
2966
2967#else
2968#define IL_DBG(level, fmt, args...)
2969static inline void
2970il_print_hex_dump(struct il_priv *il, int level, const void *p, u32 len)
2971{
2972}
2973#endif /* CONFIG_IWLEGACY_DEBUG */
2974
2975#ifdef CONFIG_IWLEGACY_DEBUGFS
2976int il_dbgfs_register(struct il_priv *il, const char *name);
2977void il_dbgfs_unregister(struct il_priv *il);
2978#else
2979static inline int
2980il_dbgfs_register(struct il_priv *il, const char *name)
2981{
2982 return 0;
2983}
2984
2985static inline void
2986il_dbgfs_unregister(struct il_priv *il)
2987{
2988}
2989#endif /* CONFIG_IWLEGACY_DEBUGFS */
2990
2991/*
2992 * To use the debug system:
2993 *
2994 * If you are defining a new debug classification, simply add it to the #define
2995 * list here in the form of
2996 *
2997 * #define IL_DL_xxxx VALUE
2998 *
2999 * where xxxx should be the name of the classification (for example, WEP).
3000 *
3001 * You then need to either add a IL_xxxx_DEBUG() macro definition for your
3002 * classification, or use IL_DBG(IL_DL_xxxx, ...) whenever you want
3003 * to send output to that classification.
3004 *
3005 * The active debug levels can be accessed via files
3006 *
3007 * /sys/module/iwl4965/parameters/debug
3008 * /sys/module/iwl3945/parameters/debug
3009 * /sys/class/net/wlan0/device/debug_level
3010 *
3011 * when CONFIG_IWLEGACY_DEBUG=y.
3012 */
3013
3014/* 0x0000000F - 0x00000001 */
3015#define IL_DL_INFO (1 << 0)
3016#define IL_DL_MAC80211 (1 << 1)
3017#define IL_DL_HCMD (1 << 2)
3018#define IL_DL_STATE (1 << 3)
3019/* 0x000000F0 - 0x00000010 */
3020#define IL_DL_MACDUMP (1 << 4)
3021#define IL_DL_HCMD_DUMP (1 << 5)
3022#define IL_DL_EEPROM (1 << 6)
3023#define IL_DL_RADIO (1 << 7)
3024/* 0x00000F00 - 0x00000100 */
3025#define IL_DL_POWER (1 << 8)
3026#define IL_DL_TEMP (1 << 9)
3027#define IL_DL_NOTIF (1 << 10)
3028#define IL_DL_SCAN (1 << 11)
3029/* 0x0000F000 - 0x00001000 */
3030#define IL_DL_ASSOC (1 << 12)
3031#define IL_DL_DROP (1 << 13)
3032#define IL_DL_TXPOWER (1 << 14)
3033#define IL_DL_AP (1 << 15)
3034/* 0x000F0000 - 0x00010000 */
3035#define IL_DL_FW (1 << 16)
3036#define IL_DL_RF_KILL (1 << 17)
3037#define IL_DL_FW_ERRORS (1 << 18)
3038#define IL_DL_LED (1 << 19)
3039/* 0x00F00000 - 0x00100000 */
3040#define IL_DL_RATE (1 << 20)
3041#define IL_DL_CALIB (1 << 21)
3042#define IL_DL_WEP (1 << 22)
3043#define IL_DL_TX (1 << 23)
3044/* 0x0F000000 - 0x01000000 */
3045#define IL_DL_RX (1 << 24)
3046#define IL_DL_ISR (1 << 25)
3047#define IL_DL_HT (1 << 26)
3048/* 0xF0000000 - 0x10000000 */
3049#define IL_DL_11H (1 << 28)
3050#define IL_DL_STATS (1 << 29)
3051#define IL_DL_TX_REPLY (1 << 30)
3052#define IL_DL_QOS (1 << 31)
3053
3054#define D_INFO(f, a...) IL_DBG(IL_DL_INFO, f, ## a)
3055#define D_MAC80211(f, a...) IL_DBG(IL_DL_MAC80211, f, ## a)
3056#define D_MACDUMP(f, a...) IL_DBG(IL_DL_MACDUMP, f, ## a)
3057#define D_TEMP(f, a...) IL_DBG(IL_DL_TEMP, f, ## a)
3058#define D_SCAN(f, a...) IL_DBG(IL_DL_SCAN, f, ## a)
3059#define D_RX(f, a...) IL_DBG(IL_DL_RX, f, ## a)
3060#define D_TX(f, a...) IL_DBG(IL_DL_TX, f, ## a)
3061#define D_ISR(f, a...) IL_DBG(IL_DL_ISR, f, ## a)
3062#define D_LED(f, a...) IL_DBG(IL_DL_LED, f, ## a)
3063#define D_WEP(f, a...) IL_DBG(IL_DL_WEP, f, ## a)
3064#define D_HC(f, a...) IL_DBG(IL_DL_HCMD, f, ## a)
3065#define D_HC_DUMP(f, a...) IL_DBG(IL_DL_HCMD_DUMP, f, ## a)
3066#define D_EEPROM(f, a...) IL_DBG(IL_DL_EEPROM, f, ## a)
3067#define D_CALIB(f, a...) IL_DBG(IL_DL_CALIB, f, ## a)
3068#define D_FW(f, a...) IL_DBG(IL_DL_FW, f, ## a)
3069#define D_RF_KILL(f, a...) IL_DBG(IL_DL_RF_KILL, f, ## a)
3070#define D_DROP(f, a...) IL_DBG(IL_DL_DROP, f, ## a)
3071#define D_AP(f, a...) IL_DBG(IL_DL_AP, f, ## a)
3072#define D_TXPOWER(f, a...) IL_DBG(IL_DL_TXPOWER, f, ## a)
3073#define D_RATE(f, a...) IL_DBG(IL_DL_RATE, f, ## a)
3074#define D_NOTIF(f, a...) IL_DBG(IL_DL_NOTIF, f, ## a)
3075#define D_ASSOC(f, a...) IL_DBG(IL_DL_ASSOC, f, ## a)
3076#define D_HT(f, a...) IL_DBG(IL_DL_HT, f, ## a)
3077#define D_STATS(f, a...) IL_DBG(IL_DL_STATS, f, ## a)
3078#define D_TX_REPLY(f, a...) IL_DBG(IL_DL_TX_REPLY, f, ## a)
3079#define D_QOS(f, a...) IL_DBG(IL_DL_QOS, f, ## a)
3080#define D_RADIO(f, a...) IL_DBG(IL_DL_RADIO, f, ## a)
3081#define D_POWER(f, a...) IL_DBG(IL_DL_POWER, f, ## a)
3082#define D_11H(f, a...) IL_DBG(IL_DL_11H, f, ## a)
3083
3084#endif /* __il_core_h__ */
diff --git a/drivers/net/wireless/intel/iwlegacy/csr.h b/drivers/net/wireless/intel/iwlegacy/csr.h
new file mode 100644
index 000000000000..9138e15004fa
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlegacy/csr.h
@@ -0,0 +1,419 @@
1/******************************************************************************
2 *
3 * This file is provided under a dual BSD/GPLv2 license. When using or
4 * redistributing this file, you may do so under either license.
5 *
6 * GPL LICENSE SUMMARY
7 *
8 * Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved.
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of version 2 of the GNU General Public License as
12 * published by the Free Software Foundation.
13 *
14 * This program is distributed in the hope that it will be useful, but
15 * WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
22 * USA
23 *
24 * The full GNU General Public License is included in this distribution
25 * in the file called LICENSE.GPL.
26 *
27 * Contact Information:
28 * Intel Linux Wireless <ilw@linux.intel.com>
29 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
30 *
31 * BSD LICENSE
32 *
33 * Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved.
34 * All rights reserved.
35 *
36 * Redistribution and use in source and binary forms, with or without
37 * modification, are permitted provided that the following conditions
38 * are met:
39 *
40 * * Redistributions of source code must retain the above copyright
41 * notice, this list of conditions and the following disclaimer.
42 * * Redistributions in binary form must reproduce the above copyright
43 * notice, this list of conditions and the following disclaimer in
44 * the documentation and/or other materials provided with the
45 * distribution.
46 * * Neither the name Intel Corporation nor the names of its
47 * contributors may be used to endorse or promote products derived
48 * from this software without specific prior written permission.
49 *
50 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
51 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
52 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
53 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
54 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
55 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
56 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
57 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
58 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
59 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
60 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
61 *
62 *****************************************************************************/
63#ifndef __il_csr_h__
64#define __il_csr_h__
65/*
66 * CSR (control and status registers)
67 *
68 * CSR registers are mapped directly into PCI bus space, and are accessible
69 * whenever platform supplies power to device, even when device is in
70 * low power states due to driver-invoked device resets
71 * (e.g. CSR_RESET_REG_FLAG_SW_RESET) or uCode-driven power-saving modes.
72 *
73 * Use _il_wr() and _il_rd() family to access these registers;
74 * these provide simple PCI bus access, without waking up the MAC.
75 * Do not use il_wr() family for these registers;
76 * no need to "grab nic access" via CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ.
77 * The MAC (uCode processor, etc.) does not need to be powered up for accessing
78 * the CSR registers.
79 *
80 * NOTE: Device does need to be awake in order to read this memory
81 * via CSR_EEPROM register
82 */
83#define CSR_BASE (0x000)
84
85#define CSR_HW_IF_CONFIG_REG (CSR_BASE+0x000) /* hardware interface config */
86#define CSR_INT_COALESCING (CSR_BASE+0x004) /* accum ints, 32-usec units */
87#define CSR_INT (CSR_BASE+0x008) /* host interrupt status/ack */
88#define CSR_INT_MASK (CSR_BASE+0x00c) /* host interrupt enable */
89#define CSR_FH_INT_STATUS (CSR_BASE+0x010) /* busmaster int status/ack */
90#define CSR_GPIO_IN (CSR_BASE+0x018) /* read external chip pins */
91#define CSR_RESET (CSR_BASE+0x020) /* busmaster enable, NMI, etc */
92#define CSR_GP_CNTRL (CSR_BASE+0x024)
93
94/* 2nd byte of CSR_INT_COALESCING, not accessible via _il_wr()! */
95#define CSR_INT_PERIODIC_REG (CSR_BASE+0x005)
96
97/*
98 * Hardware revision info
99 * Bit fields:
100 * 31-8: Reserved
101 * 7-4: Type of device: see CSR_HW_REV_TYPE_xxx definitions
102 * 3-2: Revision step: 0 = A, 1 = B, 2 = C, 3 = D
103 * 1-0: "Dash" (-) value, as in A-1, etc.
104 *
105 * NOTE: Revision step affects calculation of CCK txpower for 4965.
106 * NOTE: See also CSR_HW_REV_WA_REG (work-around for bug in 4965).
107 */
108#define CSR_HW_REV (CSR_BASE+0x028)
109
110/*
111 * EEPROM memory reads
112 *
113 * NOTE: Device must be awake, initialized via apm_ops.init(),
114 * in order to read.
115 */
116#define CSR_EEPROM_REG (CSR_BASE+0x02c)
117#define CSR_EEPROM_GP (CSR_BASE+0x030)
118
119#define CSR_GIO_REG (CSR_BASE+0x03C)
120#define CSR_GP_UCODE_REG (CSR_BASE+0x048)
121#define CSR_GP_DRIVER_REG (CSR_BASE+0x050)
122
123/*
124 * UCODE-DRIVER GP (general purpose) mailbox registers.
125 * SET/CLR registers set/clear bit(s) if "1" is written.
126 */
127#define CSR_UCODE_DRV_GP1 (CSR_BASE+0x054)
128#define CSR_UCODE_DRV_GP1_SET (CSR_BASE+0x058)
129#define CSR_UCODE_DRV_GP1_CLR (CSR_BASE+0x05c)
130#define CSR_UCODE_DRV_GP2 (CSR_BASE+0x060)
131
132#define CSR_LED_REG (CSR_BASE+0x094)
133#define CSR_DRAM_INT_TBL_REG (CSR_BASE+0x0A0)
134
135/* GIO Chicken Bits (PCI Express bus link power management) */
136#define CSR_GIO_CHICKEN_BITS (CSR_BASE+0x100)
137
138/* Analog phase-lock-loop configuration */
139#define CSR_ANA_PLL_CFG (CSR_BASE+0x20c)
140
141/*
142 * CSR Hardware Revision Workaround Register. Indicates hardware rev;
143 * "step" determines CCK backoff for txpower calculation. Used for 4965 only.
144 * See also CSR_HW_REV register.
145 * Bit fields:
146 * 3-2: 0 = A, 1 = B, 2 = C, 3 = D step
147 * 1-0: "Dash" (-) value, as in C-1, etc.
148 */
149#define CSR_HW_REV_WA_REG (CSR_BASE+0x22C)
150
151#define CSR_DBG_HPET_MEM_REG (CSR_BASE+0x240)
152#define CSR_DBG_LINK_PWR_MGMT_REG (CSR_BASE+0x250)
153
154/* Bits for CSR_HW_IF_CONFIG_REG */
155#define CSR49_HW_IF_CONFIG_REG_BIT_4965_R (0x00000010)
156#define CSR_HW_IF_CONFIG_REG_MSK_BOARD_VER (0x00000C00)
157#define CSR_HW_IF_CONFIG_REG_BIT_MAC_SI (0x00000100)
158#define CSR_HW_IF_CONFIG_REG_BIT_RADIO_SI (0x00000200)
159
160#define CSR39_HW_IF_CONFIG_REG_BIT_3945_MB (0x00000100)
161#define CSR39_HW_IF_CONFIG_REG_BIT_3945_MM (0x00000200)
162#define CSR39_HW_IF_CONFIG_REG_BIT_SKU_MRC (0x00000400)
163#define CSR39_HW_IF_CONFIG_REG_BIT_BOARD_TYPE (0x00000800)
164#define CSR39_HW_IF_CONFIG_REG_BITS_SILICON_TYPE_A (0x00000000)
165#define CSR39_HW_IF_CONFIG_REG_BITS_SILICON_TYPE_B (0x00001000)
166
167#define CSR_HW_IF_CONFIG_REG_BIT_HAP_WAKE_L1A (0x00080000)
168#define CSR_HW_IF_CONFIG_REG_BIT_EEPROM_OWN_SEM (0x00200000)
169#define CSR_HW_IF_CONFIG_REG_BIT_NIC_READY (0x00400000) /* PCI_OWN_SEM */
170#define CSR_HW_IF_CONFIG_REG_BIT_NIC_PREPARE_DONE (0x02000000) /* ME_OWN */
171#define CSR_HW_IF_CONFIG_REG_PREPARE (0x08000000) /* WAKE_ME */
172
173#define CSR_INT_PERIODIC_DIS (0x00) /* disable periodic int */
174#define CSR_INT_PERIODIC_ENA (0xFF) /* 255*32 usec ~ 8 msec */
175
176/* interrupt flags in INTA, set by uCode or hardware (e.g. dma),
177 * acknowledged (reset) by host writing "1" to flagged bits. */
178#define CSR_INT_BIT_FH_RX (1 << 31) /* Rx DMA, cmd responses, FH_INT[17:16] */
179#define CSR_INT_BIT_HW_ERR (1 << 29) /* DMA hardware error FH_INT[31] */
180#define CSR_INT_BIT_RX_PERIODIC (1 << 28) /* Rx periodic */
181#define CSR_INT_BIT_FH_TX (1 << 27) /* Tx DMA FH_INT[1:0] */
182#define CSR_INT_BIT_SCD (1 << 26) /* TXQ pointer advanced */
183#define CSR_INT_BIT_SW_ERR (1 << 25) /* uCode error */
184#define CSR_INT_BIT_RF_KILL (1 << 7) /* HW RFKILL switch GP_CNTRL[27] toggled */
185#define CSR_INT_BIT_CT_KILL (1 << 6) /* Critical temp (chip too hot) rfkill */
186#define CSR_INT_BIT_SW_RX (1 << 3) /* Rx, command responses, 3945 */
187#define CSR_INT_BIT_WAKEUP (1 << 1) /* NIC controller waking up (pwr mgmt) */
188#define CSR_INT_BIT_ALIVE (1 << 0) /* uCode interrupts once it initializes */
189
190#define CSR_INI_SET_MASK (CSR_INT_BIT_FH_RX | \
191 CSR_INT_BIT_HW_ERR | \
192 CSR_INT_BIT_FH_TX | \
193 CSR_INT_BIT_SW_ERR | \
194 CSR_INT_BIT_RF_KILL | \
195 CSR_INT_BIT_SW_RX | \
196 CSR_INT_BIT_WAKEUP | \
197 CSR_INT_BIT_ALIVE)
198
199/* interrupt flags in FH (flow handler) (PCI busmaster DMA) */
200#define CSR_FH_INT_BIT_ERR (1 << 31) /* Error */
201#define CSR_FH_INT_BIT_HI_PRIOR (1 << 30) /* High priority Rx, bypass coalescing */
202#define CSR39_FH_INT_BIT_RX_CHNL2 (1 << 18) /* Rx channel 2 (3945 only) */
203#define CSR_FH_INT_BIT_RX_CHNL1 (1 << 17) /* Rx channel 1 */
204#define CSR_FH_INT_BIT_RX_CHNL0 (1 << 16) /* Rx channel 0 */
205#define CSR39_FH_INT_BIT_TX_CHNL6 (1 << 6) /* Tx channel 6 (3945 only) */
206#define CSR_FH_INT_BIT_TX_CHNL1 (1 << 1) /* Tx channel 1 */
207#define CSR_FH_INT_BIT_TX_CHNL0 (1 << 0) /* Tx channel 0 */
208
209#define CSR39_FH_INT_RX_MASK (CSR_FH_INT_BIT_HI_PRIOR | \
210 CSR39_FH_INT_BIT_RX_CHNL2 | \
211 CSR_FH_INT_BIT_RX_CHNL1 | \
212 CSR_FH_INT_BIT_RX_CHNL0)
213
214#define CSR39_FH_INT_TX_MASK (CSR39_FH_INT_BIT_TX_CHNL6 | \
215 CSR_FH_INT_BIT_TX_CHNL1 | \
216 CSR_FH_INT_BIT_TX_CHNL0)
217
218#define CSR49_FH_INT_RX_MASK (CSR_FH_INT_BIT_HI_PRIOR | \
219 CSR_FH_INT_BIT_RX_CHNL1 | \
220 CSR_FH_INT_BIT_RX_CHNL0)
221
222#define CSR49_FH_INT_TX_MASK (CSR_FH_INT_BIT_TX_CHNL1 | \
223 CSR_FH_INT_BIT_TX_CHNL0)
224
225/* GPIO */
226#define CSR_GPIO_IN_BIT_AUX_POWER (0x00000200)
227#define CSR_GPIO_IN_VAL_VAUX_PWR_SRC (0x00000000)
228#define CSR_GPIO_IN_VAL_VMAIN_PWR_SRC (0x00000200)
229
230/* RESET */
231#define CSR_RESET_REG_FLAG_NEVO_RESET (0x00000001)
232#define CSR_RESET_REG_FLAG_FORCE_NMI (0x00000002)
233#define CSR_RESET_REG_FLAG_SW_RESET (0x00000080)
234#define CSR_RESET_REG_FLAG_MASTER_DISABLED (0x00000100)
235#define CSR_RESET_REG_FLAG_STOP_MASTER (0x00000200)
236#define CSR_RESET_LINK_PWR_MGMT_DISABLED (0x80000000)
237
238/*
239 * GP (general purpose) CONTROL REGISTER
240 * Bit fields:
241 * 27: HW_RF_KILL_SW
242 * Indicates state of (platform's) hardware RF-Kill switch
243 * 26-24: POWER_SAVE_TYPE
244 * Indicates current power-saving mode:
245 * 000 -- No power saving
246 * 001 -- MAC power-down
247 * 010 -- PHY (radio) power-down
248 * 011 -- Error
249 * 9-6: SYS_CONFIG
250 * Indicates current system configuration, reflecting pins on chip
251 * as forced high/low by device circuit board.
252 * 4: GOING_TO_SLEEP
253 * Indicates MAC is entering a power-saving sleep power-down.
254 * Not a good time to access device-internal resources.
255 * 3: MAC_ACCESS_REQ
256 * Host sets this to request and maintain MAC wakeup, to allow host
257 * access to device-internal resources. Host must wait for
258 * MAC_CLOCK_READY (and !GOING_TO_SLEEP) before accessing non-CSR
259 * device registers.
260 * 2: INIT_DONE
261 * Host sets this to put device into fully operational D0 power mode.
262 * Host resets this after SW_RESET to put device into low power mode.
263 * 0: MAC_CLOCK_READY
264 * Indicates MAC (ucode processor, etc.) is powered up and can run.
265 * Internal resources are accessible.
266 * NOTE: This does not indicate that the processor is actually running.
267 * NOTE: This does not indicate that 4965 or 3945 has completed
268 * init or post-power-down restore of internal SRAM memory.
269 * Use CSR_UCODE_DRV_GP1_BIT_MAC_SLEEP as indication that
270 * SRAM is restored and uCode is in normal operation mode.
271 * Later devices (5xxx/6xxx/1xxx) use non-volatile SRAM, and
272 * do not need to save/restore it.
273 * NOTE: After device reset, this bit remains "0" until host sets
274 * INIT_DONE
275 */
276#define CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY (0x00000001)
277#define CSR_GP_CNTRL_REG_FLAG_INIT_DONE (0x00000004)
278#define CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ (0x00000008)
279#define CSR_GP_CNTRL_REG_FLAG_GOING_TO_SLEEP (0x00000010)
280
281#define CSR_GP_CNTRL_REG_VAL_MAC_ACCESS_EN (0x00000001)
282
283#define CSR_GP_CNTRL_REG_MSK_POWER_SAVE_TYPE (0x07000000)
284#define CSR_GP_CNTRL_REG_FLAG_MAC_POWER_SAVE (0x04000000)
285#define CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW (0x08000000)
286
287/* EEPROM REG */
288#define CSR_EEPROM_REG_READ_VALID_MSK (0x00000001)
289#define CSR_EEPROM_REG_BIT_CMD (0x00000002)
290#define CSR_EEPROM_REG_MSK_ADDR (0x0000FFFC)
291#define CSR_EEPROM_REG_MSK_DATA (0xFFFF0000)
292
293/* EEPROM GP */
294#define CSR_EEPROM_GP_VALID_MSK (0x00000007) /* signature */
295#define CSR_EEPROM_GP_IF_OWNER_MSK (0x00000180)
296#define CSR_EEPROM_GP_GOOD_SIG_EEP_LESS_THAN_4K (0x00000002)
297#define CSR_EEPROM_GP_GOOD_SIG_EEP_MORE_THAN_4K (0x00000004)
298
299/* GP REG */
300#define CSR_GP_REG_POWER_SAVE_STATUS_MSK (0x03000000) /* bit 24/25 */
301#define CSR_GP_REG_NO_POWER_SAVE (0x00000000)
302#define CSR_GP_REG_MAC_POWER_SAVE (0x01000000)
303#define CSR_GP_REG_PHY_POWER_SAVE (0x02000000)
304#define CSR_GP_REG_POWER_SAVE_ERROR (0x03000000)
305
306/* CSR GIO */
307#define CSR_GIO_REG_VAL_L0S_ENABLED (0x00000002)
308
309/*
310 * UCODE-DRIVER GP (general purpose) mailbox register 1
311 * Host driver and uCode write and/or read this register to communicate with
312 * each other.
313 * Bit fields:
314 * 4: UCODE_DISABLE
315 * Host sets this to request permanent halt of uCode, same as
316 * sending CARD_STATE command with "halt" bit set.
317 * 3: CT_KILL_EXIT
318 * Host sets this to request exit from CT_KILL state, i.e. host thinks
319 * device temperature is low enough to continue normal operation.
320 * 2: CMD_BLOCKED
321 * Host sets this during RF KILL power-down sequence (HW, SW, CT KILL)
322 * to release uCode to clear all Tx and command queues, enter
323 * unassociated mode, and power down.
324 * NOTE: Some devices also use HBUS_TARG_MBX_C register for this bit.
325 * 1: SW_BIT_RFKILL
326 * Host sets this when issuing CARD_STATE command to request
327 * device sleep.
328 * 0: MAC_SLEEP
329 * uCode sets this when preparing a power-saving power-down.
330 * uCode resets this when power-up is complete and SRAM is sane.
331 * NOTE: 3945/4965 saves internal SRAM data to host when powering down,
332 * and must restore this data after powering back up.
333 * MAC_SLEEP is the best indication that restore is complete.
334 * Later devices (5xxx/6xxx/1xxx) use non-volatile SRAM, and
335 * do not need to save/restore it.
336 */
337#define CSR_UCODE_DRV_GP1_BIT_MAC_SLEEP (0x00000001)
338#define CSR_UCODE_SW_BIT_RFKILL (0x00000002)
339#define CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED (0x00000004)
340#define CSR_UCODE_DRV_GP1_REG_BIT_CT_KILL_EXIT (0x00000008)
341
342/* GIO Chicken Bits (PCI Express bus link power management) */
343#define CSR_GIO_CHICKEN_BITS_REG_BIT_L1A_NO_L0S_RX (0x00800000)
344#define CSR_GIO_CHICKEN_BITS_REG_BIT_DIS_L0S_EXIT_TIMER (0x20000000)
345
346/* LED */
347#define CSR_LED_BSM_CTRL_MSK (0xFFFFFFDF)
348#define CSR_LED_REG_TRUN_ON (0x78)
349#define CSR_LED_REG_TRUN_OFF (0x38)
350
351/* ANA_PLL */
352#define CSR39_ANA_PLL_CFG_VAL (0x01000000)
353
354/* HPET MEM debug */
355#define CSR_DBG_HPET_MEM_REG_VAL (0xFFFF0000)
356
357/* DRAM INT TBL */
358#define CSR_DRAM_INT_TBL_ENABLE (1 << 31)
359#define CSR_DRAM_INIT_TBL_WRAP_CHECK (1 << 27)
360
361/*
362 * HBUS (Host-side Bus)
363 *
364 * HBUS registers are mapped directly into PCI bus space, but are used
365 * to indirectly access device's internal memory or registers that
366 * may be powered-down.
367 *
368 * Use il_wr()/il_rd() family
369 * for these registers;
370 * host must "grab nic access" via CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ
371 * to make sure the MAC (uCode processor, etc.) is powered up for accessing
372 * internal resources.
373 *
374 * Do not use _il_wr()/_il_rd() family to access these registers;
375 * these provide only simple PCI bus access, without waking up the MAC.
376 */
377#define HBUS_BASE (0x400)
378
379/*
380 * Registers for accessing device's internal SRAM memory (e.g. SCD SRAM
381 * structures, error log, event log, verifying uCode load).
382 * First write to address register, then read from or write to data register
383 * to complete the job. Once the address register is set up, accesses to
384 * data registers auto-increment the address by one dword.
385 * Bit usage for address registers (read or write):
386 * 0-31: memory address within device
387 */
388#define HBUS_TARG_MEM_RADDR (HBUS_BASE+0x00c)
389#define HBUS_TARG_MEM_WADDR (HBUS_BASE+0x010)
390#define HBUS_TARG_MEM_WDAT (HBUS_BASE+0x018)
391#define HBUS_TARG_MEM_RDAT (HBUS_BASE+0x01c)
392
393/* Mailbox C, used as workaround alternative to CSR_UCODE_DRV_GP1 mailbox */
394#define HBUS_TARG_MBX_C (HBUS_BASE+0x030)
395#define HBUS_TARG_MBX_C_REG_BIT_CMD_BLOCKED (0x00000004)
396
397/*
398 * Registers for accessing device's internal peripheral registers
399 * (e.g. SCD, BSM, etc.). First write to address register,
400 * then read from or write to data register to complete the job.
401 * Bit usage for address registers (read or write):
402 * 0-15: register address (offset) within device
403 * 24-25: (# bytes - 1) to read or write (e.g. 3 for dword)
404 */
405#define HBUS_TARG_PRPH_WADDR (HBUS_BASE+0x044)
406#define HBUS_TARG_PRPH_RADDR (HBUS_BASE+0x048)
407#define HBUS_TARG_PRPH_WDAT (HBUS_BASE+0x04c)
408#define HBUS_TARG_PRPH_RDAT (HBUS_BASE+0x050)
409
410/*
411 * Per-Tx-queue write pointer (idx, really!)
412 * Indicates idx to next TFD that driver will fill (1 past latest filled).
413 * Bit usage:
414 * 0-7: queue write idx
415 * 11-8: queue selector
416 */
417#define HBUS_TARG_WRPTR (HBUS_BASE+0x060)
418
419#endif /* !__il_csr_h__ */
diff --git a/drivers/net/wireless/intel/iwlegacy/debug.c b/drivers/net/wireless/intel/iwlegacy/debug.c
new file mode 100644
index 000000000000..908b9f4fef6f
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlegacy/debug.c
@@ -0,0 +1,1426 @@
1/******************************************************************************
2 *
3 * GPL LICENSE SUMMARY
4 *
5 * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of version 2 of the GNU General Public License as
9 * published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful, but
12 * WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
19 * USA
20 *
21 * The full GNU General Public License is included in this distribution
22 * in the file called LICENSE.GPL.
23 *
24 * Contact Information:
25 * Intel Linux Wireless <ilw@linux.intel.com>
26 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
27 *****************************************************************************/
28#include <linux/ieee80211.h>
29#include <linux/export.h>
30#include <net/mac80211.h>
31
32#include "common.h"
33
34static void
35il_clear_traffic_stats(struct il_priv *il)
36{
37 memset(&il->tx_stats, 0, sizeof(struct traffic_stats));
38 memset(&il->rx_stats, 0, sizeof(struct traffic_stats));
39}
40
41/*
42 * il_update_stats function record all the MGMT, CTRL and DATA pkt for
43 * both TX and Rx . Use debugfs to display the rx/rx_stats
44 */
45void
46il_update_stats(struct il_priv *il, bool is_tx, __le16 fc, u16 len)
47{
48 struct traffic_stats *stats;
49
50 if (is_tx)
51 stats = &il->tx_stats;
52 else
53 stats = &il->rx_stats;
54
55 if (ieee80211_is_mgmt(fc)) {
56 switch (fc & cpu_to_le16(IEEE80211_FCTL_STYPE)) {
57 case cpu_to_le16(IEEE80211_STYPE_ASSOC_REQ):
58 stats->mgmt[MANAGEMENT_ASSOC_REQ]++;
59 break;
60 case cpu_to_le16(IEEE80211_STYPE_ASSOC_RESP):
61 stats->mgmt[MANAGEMENT_ASSOC_RESP]++;
62 break;
63 case cpu_to_le16(IEEE80211_STYPE_REASSOC_REQ):
64 stats->mgmt[MANAGEMENT_REASSOC_REQ]++;
65 break;
66 case cpu_to_le16(IEEE80211_STYPE_REASSOC_RESP):
67 stats->mgmt[MANAGEMENT_REASSOC_RESP]++;
68 break;
69 case cpu_to_le16(IEEE80211_STYPE_PROBE_REQ):
70 stats->mgmt[MANAGEMENT_PROBE_REQ]++;
71 break;
72 case cpu_to_le16(IEEE80211_STYPE_PROBE_RESP):
73 stats->mgmt[MANAGEMENT_PROBE_RESP]++;
74 break;
75 case cpu_to_le16(IEEE80211_STYPE_BEACON):
76 stats->mgmt[MANAGEMENT_BEACON]++;
77 break;
78 case cpu_to_le16(IEEE80211_STYPE_ATIM):
79 stats->mgmt[MANAGEMENT_ATIM]++;
80 break;
81 case cpu_to_le16(IEEE80211_STYPE_DISASSOC):
82 stats->mgmt[MANAGEMENT_DISASSOC]++;
83 break;
84 case cpu_to_le16(IEEE80211_STYPE_AUTH):
85 stats->mgmt[MANAGEMENT_AUTH]++;
86 break;
87 case cpu_to_le16(IEEE80211_STYPE_DEAUTH):
88 stats->mgmt[MANAGEMENT_DEAUTH]++;
89 break;
90 case cpu_to_le16(IEEE80211_STYPE_ACTION):
91 stats->mgmt[MANAGEMENT_ACTION]++;
92 break;
93 }
94 } else if (ieee80211_is_ctl(fc)) {
95 switch (fc & cpu_to_le16(IEEE80211_FCTL_STYPE)) {
96 case cpu_to_le16(IEEE80211_STYPE_BACK_REQ):
97 stats->ctrl[CONTROL_BACK_REQ]++;
98 break;
99 case cpu_to_le16(IEEE80211_STYPE_BACK):
100 stats->ctrl[CONTROL_BACK]++;
101 break;
102 case cpu_to_le16(IEEE80211_STYPE_PSPOLL):
103 stats->ctrl[CONTROL_PSPOLL]++;
104 break;
105 case cpu_to_le16(IEEE80211_STYPE_RTS):
106 stats->ctrl[CONTROL_RTS]++;
107 break;
108 case cpu_to_le16(IEEE80211_STYPE_CTS):
109 stats->ctrl[CONTROL_CTS]++;
110 break;
111 case cpu_to_le16(IEEE80211_STYPE_ACK):
112 stats->ctrl[CONTROL_ACK]++;
113 break;
114 case cpu_to_le16(IEEE80211_STYPE_CFEND):
115 stats->ctrl[CONTROL_CFEND]++;
116 break;
117 case cpu_to_le16(IEEE80211_STYPE_CFENDACK):
118 stats->ctrl[CONTROL_CFENDACK]++;
119 break;
120 }
121 } else {
122 /* data */
123 stats->data_cnt++;
124 stats->data_bytes += len;
125 }
126}
127EXPORT_SYMBOL(il_update_stats);
128
129/* create and remove of files */
130#define DEBUGFS_ADD_FILE(name, parent, mode) do { \
131 if (!debugfs_create_file(#name, mode, parent, il, \
132 &il_dbgfs_##name##_ops)) \
133 goto err; \
134} while (0)
135
136#define DEBUGFS_ADD_BOOL(name, parent, ptr) do { \
137 struct dentry *__tmp; \
138 __tmp = debugfs_create_bool(#name, S_IWUSR | S_IRUSR, \
139 parent, ptr); \
140 if (IS_ERR(__tmp) || !__tmp) \
141 goto err; \
142} while (0)
143
144#define DEBUGFS_ADD_X32(name, parent, ptr) do { \
145 struct dentry *__tmp; \
146 __tmp = debugfs_create_x32(#name, S_IWUSR | S_IRUSR, \
147 parent, ptr); \
148 if (IS_ERR(__tmp) || !__tmp) \
149 goto err; \
150} while (0)
151
152/* file operation */
153#define DEBUGFS_READ_FUNC(name) \
154static ssize_t il_dbgfs_##name##_read(struct file *file, \
155 char __user *user_buf, \
156 size_t count, loff_t *ppos);
157
158#define DEBUGFS_WRITE_FUNC(name) \
159static ssize_t il_dbgfs_##name##_write(struct file *file, \
160 const char __user *user_buf, \
161 size_t count, loff_t *ppos);
162
163
164#define DEBUGFS_READ_FILE_OPS(name) \
165 DEBUGFS_READ_FUNC(name); \
166static const struct file_operations il_dbgfs_##name##_ops = { \
167 .read = il_dbgfs_##name##_read, \
168 .open = simple_open, \
169 .llseek = generic_file_llseek, \
170};
171
172#define DEBUGFS_WRITE_FILE_OPS(name) \
173 DEBUGFS_WRITE_FUNC(name); \
174static const struct file_operations il_dbgfs_##name##_ops = { \
175 .write = il_dbgfs_##name##_write, \
176 .open = simple_open, \
177 .llseek = generic_file_llseek, \
178};
179
180#define DEBUGFS_READ_WRITE_FILE_OPS(name) \
181 DEBUGFS_READ_FUNC(name); \
182 DEBUGFS_WRITE_FUNC(name); \
183static const struct file_operations il_dbgfs_##name##_ops = { \
184 .write = il_dbgfs_##name##_write, \
185 .read = il_dbgfs_##name##_read, \
186 .open = simple_open, \
187 .llseek = generic_file_llseek, \
188};
189
190static const char *
191il_get_mgmt_string(int cmd)
192{
193 switch (cmd) {
194 IL_CMD(MANAGEMENT_ASSOC_REQ);
195 IL_CMD(MANAGEMENT_ASSOC_RESP);
196 IL_CMD(MANAGEMENT_REASSOC_REQ);
197 IL_CMD(MANAGEMENT_REASSOC_RESP);
198 IL_CMD(MANAGEMENT_PROBE_REQ);
199 IL_CMD(MANAGEMENT_PROBE_RESP);
200 IL_CMD(MANAGEMENT_BEACON);
201 IL_CMD(MANAGEMENT_ATIM);
202 IL_CMD(MANAGEMENT_DISASSOC);
203 IL_CMD(MANAGEMENT_AUTH);
204 IL_CMD(MANAGEMENT_DEAUTH);
205 IL_CMD(MANAGEMENT_ACTION);
206 default:
207 return "UNKNOWN";
208
209 }
210}
211
212static const char *
213il_get_ctrl_string(int cmd)
214{
215 switch (cmd) {
216 IL_CMD(CONTROL_BACK_REQ);
217 IL_CMD(CONTROL_BACK);
218 IL_CMD(CONTROL_PSPOLL);
219 IL_CMD(CONTROL_RTS);
220 IL_CMD(CONTROL_CTS);
221 IL_CMD(CONTROL_ACK);
222 IL_CMD(CONTROL_CFEND);
223 IL_CMD(CONTROL_CFENDACK);
224 default:
225 return "UNKNOWN";
226
227 }
228}
229
230static ssize_t
231il_dbgfs_tx_stats_read(struct file *file, char __user *user_buf, size_t count,
232 loff_t *ppos)
233{
234
235 struct il_priv *il = file->private_data;
236 char *buf;
237 int pos = 0;
238
239 int cnt;
240 ssize_t ret;
241 const size_t bufsz =
242 100 + sizeof(char) * 50 * (MANAGEMENT_MAX + CONTROL_MAX);
243 buf = kzalloc(bufsz, GFP_KERNEL);
244 if (!buf)
245 return -ENOMEM;
246 pos += scnprintf(buf + pos, bufsz - pos, "Management:\n");
247 for (cnt = 0; cnt < MANAGEMENT_MAX; cnt++) {
248 pos +=
249 scnprintf(buf + pos, bufsz - pos, "\t%25s\t\t: %u\n",
250 il_get_mgmt_string(cnt), il->tx_stats.mgmt[cnt]);
251 }
252 pos += scnprintf(buf + pos, bufsz - pos, "Control\n");
253 for (cnt = 0; cnt < CONTROL_MAX; cnt++) {
254 pos +=
255 scnprintf(buf + pos, bufsz - pos, "\t%25s\t\t: %u\n",
256 il_get_ctrl_string(cnt), il->tx_stats.ctrl[cnt]);
257 }
258 pos += scnprintf(buf + pos, bufsz - pos, "Data:\n");
259 pos +=
260 scnprintf(buf + pos, bufsz - pos, "\tcnt: %u\n",
261 il->tx_stats.data_cnt);
262 pos +=
263 scnprintf(buf + pos, bufsz - pos, "\tbytes: %llu\n",
264 il->tx_stats.data_bytes);
265 ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
266 kfree(buf);
267 return ret;
268}
269
270static ssize_t
271il_dbgfs_clear_traffic_stats_write(struct file *file,
272 const char __user *user_buf, size_t count,
273 loff_t *ppos)
274{
275 struct il_priv *il = file->private_data;
276 u32 clear_flag;
277 char buf[8];
278 int buf_size;
279
280 memset(buf, 0, sizeof(buf));
281 buf_size = min(count, sizeof(buf) - 1);
282 if (copy_from_user(buf, user_buf, buf_size))
283 return -EFAULT;
284 if (sscanf(buf, "%x", &clear_flag) != 1)
285 return -EFAULT;
286 il_clear_traffic_stats(il);
287
288 return count;
289}
290
291static ssize_t
292il_dbgfs_rx_stats_read(struct file *file, char __user *user_buf, size_t count,
293 loff_t *ppos)
294{
295
296 struct il_priv *il = file->private_data;
297 char *buf;
298 int pos = 0;
299 int cnt;
300 ssize_t ret;
301 const size_t bufsz =
302 100 + sizeof(char) * 50 * (MANAGEMENT_MAX + CONTROL_MAX);
303 buf = kzalloc(bufsz, GFP_KERNEL);
304 if (!buf)
305 return -ENOMEM;
306
307 pos += scnprintf(buf + pos, bufsz - pos, "Management:\n");
308 for (cnt = 0; cnt < MANAGEMENT_MAX; cnt++) {
309 pos +=
310 scnprintf(buf + pos, bufsz - pos, "\t%25s\t\t: %u\n",
311 il_get_mgmt_string(cnt), il->rx_stats.mgmt[cnt]);
312 }
313 pos += scnprintf(buf + pos, bufsz - pos, "Control:\n");
314 for (cnt = 0; cnt < CONTROL_MAX; cnt++) {
315 pos +=
316 scnprintf(buf + pos, bufsz - pos, "\t%25s\t\t: %u\n",
317 il_get_ctrl_string(cnt), il->rx_stats.ctrl[cnt]);
318 }
319 pos += scnprintf(buf + pos, bufsz - pos, "Data:\n");
320 pos +=
321 scnprintf(buf + pos, bufsz - pos, "\tcnt: %u\n",
322 il->rx_stats.data_cnt);
323 pos +=
324 scnprintf(buf + pos, bufsz - pos, "\tbytes: %llu\n",
325 il->rx_stats.data_bytes);
326
327 ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
328 kfree(buf);
329 return ret;
330}
331
332#define BYTE1_MASK 0x000000ff;
333#define BYTE2_MASK 0x0000ffff;
334#define BYTE3_MASK 0x00ffffff;
335static ssize_t
336il_dbgfs_sram_read(struct file *file, char __user *user_buf, size_t count,
337 loff_t *ppos)
338{
339 u32 val;
340 char *buf;
341 ssize_t ret;
342 int i;
343 int pos = 0;
344 struct il_priv *il = file->private_data;
345 size_t bufsz;
346
347 /* default is to dump the entire data segment */
348 if (!il->dbgfs_sram_offset && !il->dbgfs_sram_len) {
349 il->dbgfs_sram_offset = 0x800000;
350 if (il->ucode_type == UCODE_INIT)
351 il->dbgfs_sram_len = il->ucode_init_data.len;
352 else
353 il->dbgfs_sram_len = il->ucode_data.len;
354 }
355 bufsz = 30 + il->dbgfs_sram_len * sizeof(char) * 10;
356 buf = kmalloc(bufsz, GFP_KERNEL);
357 if (!buf)
358 return -ENOMEM;
359 pos +=
360 scnprintf(buf + pos, bufsz - pos, "sram_len: 0x%x\n",
361 il->dbgfs_sram_len);
362 pos +=
363 scnprintf(buf + pos, bufsz - pos, "sram_offset: 0x%x\n",
364 il->dbgfs_sram_offset);
365 for (i = il->dbgfs_sram_len; i > 0; i -= 4) {
366 val =
367 il_read_targ_mem(il,
368 il->dbgfs_sram_offset +
369 il->dbgfs_sram_len - i);
370 if (i < 4) {
371 switch (i) {
372 case 1:
373 val &= BYTE1_MASK;
374 break;
375 case 2:
376 val &= BYTE2_MASK;
377 break;
378 case 3:
379 val &= BYTE3_MASK;
380 break;
381 }
382 }
383 if (!(i % 16))
384 pos += scnprintf(buf + pos, bufsz - pos, "\n");
385 pos += scnprintf(buf + pos, bufsz - pos, "0x%08x ", val);
386 }
387 pos += scnprintf(buf + pos, bufsz - pos, "\n");
388
389 ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
390 kfree(buf);
391 return ret;
392}
393
394static ssize_t
395il_dbgfs_sram_write(struct file *file, const char __user *user_buf,
396 size_t count, loff_t *ppos)
397{
398 struct il_priv *il = file->private_data;
399 char buf[64];
400 int buf_size;
401 u32 offset, len;
402
403 memset(buf, 0, sizeof(buf));
404 buf_size = min(count, sizeof(buf) - 1);
405 if (copy_from_user(buf, user_buf, buf_size))
406 return -EFAULT;
407
408 if (sscanf(buf, "%x,%x", &offset, &len) == 2) {
409 il->dbgfs_sram_offset = offset;
410 il->dbgfs_sram_len = len;
411 } else {
412 il->dbgfs_sram_offset = 0;
413 il->dbgfs_sram_len = 0;
414 }
415
416 return count;
417}
418
419static ssize_t
420il_dbgfs_stations_read(struct file *file, char __user *user_buf, size_t count,
421 loff_t *ppos)
422{
423 struct il_priv *il = file->private_data;
424 struct il_station_entry *station;
425 int max_sta = il->hw_params.max_stations;
426 char *buf;
427 int i, j, pos = 0;
428 ssize_t ret;
429 /* Add 30 for initial string */
430 const size_t bufsz = 30 + sizeof(char) * 500 * (il->num_stations);
431
432 buf = kmalloc(bufsz, GFP_KERNEL);
433 if (!buf)
434 return -ENOMEM;
435
436 pos +=
437 scnprintf(buf + pos, bufsz - pos, "num of stations: %d\n\n",
438 il->num_stations);
439
440 for (i = 0; i < max_sta; i++) {
441 station = &il->stations[i];
442 if (!station->used)
443 continue;
444 pos +=
445 scnprintf(buf + pos, bufsz - pos,
446 "station %d - addr: %pM, flags: %#x\n", i,
447 station->sta.sta.addr,
448 station->sta.station_flags_msk);
449 pos +=
450 scnprintf(buf + pos, bufsz - pos,
451 "TID\tseq_num\ttxq_id\tframes\ttfds\t");
452 pos +=
453 scnprintf(buf + pos, bufsz - pos,
454 "start_idx\tbitmap\t\t\trate_n_flags\n");
455
456 for (j = 0; j < MAX_TID_COUNT; j++) {
457 pos +=
458 scnprintf(buf + pos, bufsz - pos,
459 "%d:\t%#x\t%#x\t%u\t%u\t%u\t\t%#.16llx\t%#x",
460 j, station->tid[j].seq_number,
461 station->tid[j].agg.txq_id,
462 station->tid[j].agg.frame_count,
463 station->tid[j].tfds_in_queue,
464 station->tid[j].agg.start_idx,
465 station->tid[j].agg.bitmap,
466 station->tid[j].agg.rate_n_flags);
467
468 if (station->tid[j].agg.wait_for_ba)
469 pos +=
470 scnprintf(buf + pos, bufsz - pos,
471 " - waitforba");
472 pos += scnprintf(buf + pos, bufsz - pos, "\n");
473 }
474
475 pos += scnprintf(buf + pos, bufsz - pos, "\n");
476 }
477
478 ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
479 kfree(buf);
480 return ret;
481}
482
483static ssize_t
484il_dbgfs_nvm_read(struct file *file, char __user *user_buf, size_t count,
485 loff_t *ppos)
486{
487 ssize_t ret;
488 struct il_priv *il = file->private_data;
489 int pos = 0, ofs = 0, buf_size = 0;
490 const u8 *ptr;
491 char *buf;
492 u16 eeprom_ver;
493 size_t eeprom_len = il->cfg->eeprom_size;
494 buf_size = 4 * eeprom_len + 256;
495
496 if (eeprom_len % 16) {
497 IL_ERR("NVM size is not multiple of 16.\n");
498 return -ENODATA;
499 }
500
501 ptr = il->eeprom;
502 if (!ptr) {
503 IL_ERR("Invalid EEPROM memory\n");
504 return -ENOMEM;
505 }
506
507 /* 4 characters for byte 0xYY */
508 buf = kzalloc(buf_size, GFP_KERNEL);
509 if (!buf) {
510 IL_ERR("Can not allocate Buffer\n");
511 return -ENOMEM;
512 }
513 eeprom_ver = il_eeprom_query16(il, EEPROM_VERSION);
514 pos +=
515 scnprintf(buf + pos, buf_size - pos, "EEPROM " "version: 0x%x\n",
516 eeprom_ver);
517 for (ofs = 0; ofs < eeprom_len; ofs += 16) {
518 pos += scnprintf(buf + pos, buf_size - pos, "0x%.4x %16ph\n",
519 ofs, ptr + ofs);
520 }
521
522 ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
523 kfree(buf);
524 return ret;
525}
526
527static ssize_t
528il_dbgfs_channels_read(struct file *file, char __user *user_buf, size_t count,
529 loff_t *ppos)
530{
531 struct il_priv *il = file->private_data;
532 struct ieee80211_channel *channels = NULL;
533 const struct ieee80211_supported_band *supp_band = NULL;
534 int pos = 0, i, bufsz = PAGE_SIZE;
535 char *buf;
536 ssize_t ret;
537
538 if (!test_bit(S_GEO_CONFIGURED, &il->status))
539 return -EAGAIN;
540
541 buf = kzalloc(bufsz, GFP_KERNEL);
542 if (!buf) {
543 IL_ERR("Can not allocate Buffer\n");
544 return -ENOMEM;
545 }
546
547 supp_band = il_get_hw_mode(il, IEEE80211_BAND_2GHZ);
548 if (supp_band) {
549 channels = supp_band->channels;
550
551 pos +=
552 scnprintf(buf + pos, bufsz - pos,
553 "Displaying %d channels in 2.4GHz band 802.11bg):\n",
554 supp_band->n_channels);
555
556 for (i = 0; i < supp_band->n_channels; i++)
557 pos +=
558 scnprintf(buf + pos, bufsz - pos,
559 "%d: %ddBm: BSS%s%s, %s.\n",
560 channels[i].hw_value,
561 channels[i].max_power,
562 channels[i].
563 flags & IEEE80211_CHAN_RADAR ?
564 " (IEEE 802.11h required)" : "",
565 ((channels[i].
566 flags & IEEE80211_CHAN_NO_IR) ||
567 (channels[i].
568 flags & IEEE80211_CHAN_RADAR)) ? "" :
569 ", IBSS",
570 channels[i].
571 flags & IEEE80211_CHAN_NO_IR ?
572 "passive only" : "active/passive");
573 }
574 supp_band = il_get_hw_mode(il, IEEE80211_BAND_5GHZ);
575 if (supp_band) {
576 channels = supp_band->channels;
577
578 pos +=
579 scnprintf(buf + pos, bufsz - pos,
580 "Displaying %d channels in 5.2GHz band (802.11a)\n",
581 supp_band->n_channels);
582
583 for (i = 0; i < supp_band->n_channels; i++)
584 pos +=
585 scnprintf(buf + pos, bufsz - pos,
586 "%d: %ddBm: BSS%s%s, %s.\n",
587 channels[i].hw_value,
588 channels[i].max_power,
589 channels[i].
590 flags & IEEE80211_CHAN_RADAR ?
591 " (IEEE 802.11h required)" : "",
592 ((channels[i].
593 flags & IEEE80211_CHAN_NO_IR) ||
594 (channels[i].
595 flags & IEEE80211_CHAN_RADAR)) ? "" :
596 ", IBSS",
597 channels[i].
598 flags & IEEE80211_CHAN_NO_IR ?
599 "passive only" : "active/passive");
600 }
601 ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
602 kfree(buf);
603 return ret;
604}
605
606static ssize_t
607il_dbgfs_status_read(struct file *file, char __user *user_buf, size_t count,
608 loff_t *ppos)
609{
610
611 struct il_priv *il = file->private_data;
612 char buf[512];
613 int pos = 0;
614 const size_t bufsz = sizeof(buf);
615
616 pos +=
617 scnprintf(buf + pos, bufsz - pos, "S_HCMD_ACTIVE:\t %d\n",
618 test_bit(S_HCMD_ACTIVE, &il->status));
619 pos +=
620 scnprintf(buf + pos, bufsz - pos, "S_INT_ENABLED:\t %d\n",
621 test_bit(S_INT_ENABLED, &il->status));
622 pos +=
623 scnprintf(buf + pos, bufsz - pos, "S_RFKILL:\t %d\n",
624 test_bit(S_RFKILL, &il->status));
625 pos +=
626 scnprintf(buf + pos, bufsz - pos, "S_CT_KILL:\t\t %d\n",
627 test_bit(S_CT_KILL, &il->status));
628 pos +=
629 scnprintf(buf + pos, bufsz - pos, "S_INIT:\t\t %d\n",
630 test_bit(S_INIT, &il->status));
631 pos +=
632 scnprintf(buf + pos, bufsz - pos, "S_ALIVE:\t\t %d\n",
633 test_bit(S_ALIVE, &il->status));
634 pos +=
635 scnprintf(buf + pos, bufsz - pos, "S_READY:\t\t %d\n",
636 test_bit(S_READY, &il->status));
637 pos +=
638 scnprintf(buf + pos, bufsz - pos, "S_TEMPERATURE:\t %d\n",
639 test_bit(S_TEMPERATURE, &il->status));
640 pos +=
641 scnprintf(buf + pos, bufsz - pos, "S_GEO_CONFIGURED:\t %d\n",
642 test_bit(S_GEO_CONFIGURED, &il->status));
643 pos +=
644 scnprintf(buf + pos, bufsz - pos, "S_EXIT_PENDING:\t %d\n",
645 test_bit(S_EXIT_PENDING, &il->status));
646 pos +=
647 scnprintf(buf + pos, bufsz - pos, "S_STATS:\t %d\n",
648 test_bit(S_STATS, &il->status));
649 pos +=
650 scnprintf(buf + pos, bufsz - pos, "S_SCANNING:\t %d\n",
651 test_bit(S_SCANNING, &il->status));
652 pos +=
653 scnprintf(buf + pos, bufsz - pos, "S_SCAN_ABORTING:\t %d\n",
654 test_bit(S_SCAN_ABORTING, &il->status));
655 pos +=
656 scnprintf(buf + pos, bufsz - pos, "S_SCAN_HW:\t\t %d\n",
657 test_bit(S_SCAN_HW, &il->status));
658 pos +=
659 scnprintf(buf + pos, bufsz - pos, "S_POWER_PMI:\t %d\n",
660 test_bit(S_POWER_PMI, &il->status));
661 pos +=
662 scnprintf(buf + pos, bufsz - pos, "S_FW_ERROR:\t %d\n",
663 test_bit(S_FW_ERROR, &il->status));
664 return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
665}
666
667static ssize_t
668il_dbgfs_interrupt_read(struct file *file, char __user *user_buf, size_t count,
669 loff_t *ppos)
670{
671
672 struct il_priv *il = file->private_data;
673 int pos = 0;
674 int cnt = 0;
675 char *buf;
676 int bufsz = 24 * 64; /* 24 items * 64 char per item */
677 ssize_t ret;
678
679 buf = kzalloc(bufsz, GFP_KERNEL);
680 if (!buf) {
681 IL_ERR("Can not allocate Buffer\n");
682 return -ENOMEM;
683 }
684
685 pos +=
686 scnprintf(buf + pos, bufsz - pos, "Interrupt Statistics Report:\n");
687
688 pos +=
689 scnprintf(buf + pos, bufsz - pos, "HW Error:\t\t\t %u\n",
690 il->isr_stats.hw);
691 pos +=
692 scnprintf(buf + pos, bufsz - pos, "SW Error:\t\t\t %u\n",
693 il->isr_stats.sw);
694 if (il->isr_stats.sw || il->isr_stats.hw) {
695 pos +=
696 scnprintf(buf + pos, bufsz - pos,
697 "\tLast Restarting Code: 0x%X\n",
698 il->isr_stats.err_code);
699 }
700#ifdef CONFIG_IWLEGACY_DEBUG
701 pos +=
702 scnprintf(buf + pos, bufsz - pos, "Frame transmitted:\t\t %u\n",
703 il->isr_stats.sch);
704 pos +=
705 scnprintf(buf + pos, bufsz - pos, "Alive interrupt:\t\t %u\n",
706 il->isr_stats.alive);
707#endif
708 pos +=
709 scnprintf(buf + pos, bufsz - pos,
710 "HW RF KILL switch toggled:\t %u\n",
711 il->isr_stats.rfkill);
712
713 pos +=
714 scnprintf(buf + pos, bufsz - pos, "CT KILL:\t\t\t %u\n",
715 il->isr_stats.ctkill);
716
717 pos +=
718 scnprintf(buf + pos, bufsz - pos, "Wakeup Interrupt:\t\t %u\n",
719 il->isr_stats.wakeup);
720
721 pos +=
722 scnprintf(buf + pos, bufsz - pos, "Rx command responses:\t\t %u\n",
723 il->isr_stats.rx);
724 for (cnt = 0; cnt < IL_CN_MAX; cnt++) {
725 if (il->isr_stats.handlers[cnt] > 0)
726 pos +=
727 scnprintf(buf + pos, bufsz - pos,
728 "\tRx handler[%36s]:\t\t %u\n",
729 il_get_cmd_string(cnt),
730 il->isr_stats.handlers[cnt]);
731 }
732
733 pos +=
734 scnprintf(buf + pos, bufsz - pos, "Tx/FH interrupt:\t\t %u\n",
735 il->isr_stats.tx);
736
737 pos +=
738 scnprintf(buf + pos, bufsz - pos, "Unexpected INTA:\t\t %u\n",
739 il->isr_stats.unhandled);
740
741 ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
742 kfree(buf);
743 return ret;
744}
745
746static ssize_t
747il_dbgfs_interrupt_write(struct file *file, const char __user *user_buf,
748 size_t count, loff_t *ppos)
749{
750 struct il_priv *il = file->private_data;
751 char buf[8];
752 int buf_size;
753 u32 reset_flag;
754
755 memset(buf, 0, sizeof(buf));
756 buf_size = min(count, sizeof(buf) - 1);
757 if (copy_from_user(buf, user_buf, buf_size))
758 return -EFAULT;
759 if (sscanf(buf, "%x", &reset_flag) != 1)
760 return -EFAULT;
761 if (reset_flag == 0)
762 il_clear_isr_stats(il);
763
764 return count;
765}
766
767static ssize_t
768il_dbgfs_qos_read(struct file *file, char __user *user_buf, size_t count,
769 loff_t *ppos)
770{
771 struct il_priv *il = file->private_data;
772 int pos = 0, i;
773 char buf[256];
774 const size_t bufsz = sizeof(buf);
775
776 for (i = 0; i < AC_NUM; i++) {
777 pos +=
778 scnprintf(buf + pos, bufsz - pos,
779 "\tcw_min\tcw_max\taifsn\ttxop\n");
780 pos +=
781 scnprintf(buf + pos, bufsz - pos,
782 "AC[%d]\t%u\t%u\t%u\t%u\n", i,
783 il->qos_data.def_qos_parm.ac[i].cw_min,
784 il->qos_data.def_qos_parm.ac[i].cw_max,
785 il->qos_data.def_qos_parm.ac[i].aifsn,
786 il->qos_data.def_qos_parm.ac[i].edca_txop);
787 }
788
789 return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
790}
791
792static ssize_t
793il_dbgfs_disable_ht40_write(struct file *file, const char __user *user_buf,
794 size_t count, loff_t *ppos)
795{
796 struct il_priv *il = file->private_data;
797 char buf[8];
798 int buf_size;
799 int ht40;
800
801 memset(buf, 0, sizeof(buf));
802 buf_size = min(count, sizeof(buf) - 1);
803 if (copy_from_user(buf, user_buf, buf_size))
804 return -EFAULT;
805 if (sscanf(buf, "%d", &ht40) != 1)
806 return -EFAULT;
807 if (!il_is_any_associated(il))
808 il->disable_ht40 = ht40 ? true : false;
809 else {
810 IL_ERR("Sta associated with AP - "
811 "Change to 40MHz channel support is not allowed\n");
812 return -EINVAL;
813 }
814
815 return count;
816}
817
818static ssize_t
819il_dbgfs_disable_ht40_read(struct file *file, char __user *user_buf,
820 size_t count, loff_t *ppos)
821{
822 struct il_priv *il = file->private_data;
823 char buf[100];
824 int pos = 0;
825 const size_t bufsz = sizeof(buf);
826
827 pos +=
828 scnprintf(buf + pos, bufsz - pos, "11n 40MHz Mode: %s\n",
829 il->disable_ht40 ? "Disabled" : "Enabled");
830 return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
831}
832
833DEBUGFS_READ_WRITE_FILE_OPS(sram);
834DEBUGFS_READ_FILE_OPS(nvm);
835DEBUGFS_READ_FILE_OPS(stations);
836DEBUGFS_READ_FILE_OPS(channels);
837DEBUGFS_READ_FILE_OPS(status);
838DEBUGFS_READ_WRITE_FILE_OPS(interrupt);
839DEBUGFS_READ_FILE_OPS(qos);
840DEBUGFS_READ_WRITE_FILE_OPS(disable_ht40);
841
842static ssize_t
843il_dbgfs_tx_queue_read(struct file *file, char __user *user_buf, size_t count,
844 loff_t *ppos)
845{
846
847 struct il_priv *il = file->private_data;
848 struct il_tx_queue *txq;
849 struct il_queue *q;
850 char *buf;
851 int pos = 0;
852 int cnt;
853 int ret;
854 const size_t bufsz =
855 sizeof(char) * 64 * il->cfg->num_of_queues;
856
857 if (!il->txq) {
858 IL_ERR("txq not ready\n");
859 return -EAGAIN;
860 }
861 buf = kzalloc(bufsz, GFP_KERNEL);
862 if (!buf)
863 return -ENOMEM;
864
865 for (cnt = 0; cnt < il->hw_params.max_txq_num; cnt++) {
866 txq = &il->txq[cnt];
867 q = &txq->q;
868 pos +=
869 scnprintf(buf + pos, bufsz - pos,
870 "hwq %.2d: read=%u write=%u stop=%d"
871 " swq_id=%#.2x (ac %d/hwq %d)\n", cnt,
872 q->read_ptr, q->write_ptr,
873 !!test_bit(cnt, il->queue_stopped),
874 txq->swq_id, txq->swq_id & 3,
875 (txq->swq_id >> 2) & 0x1f);
876 if (cnt >= 4)
877 continue;
878 /* for the ACs, display the stop count too */
879 pos +=
880 scnprintf(buf + pos, bufsz - pos,
881 " stop-count: %d\n",
882 atomic_read(&il->queue_stop_count[cnt]));
883 }
884 ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
885 kfree(buf);
886 return ret;
887}
888
889static ssize_t
890il_dbgfs_rx_queue_read(struct file *file, char __user *user_buf, size_t count,
891 loff_t *ppos)
892{
893
894 struct il_priv *il = file->private_data;
895 struct il_rx_queue *rxq = &il->rxq;
896 char buf[256];
897 int pos = 0;
898 const size_t bufsz = sizeof(buf);
899
900 pos += scnprintf(buf + pos, bufsz - pos, "read: %u\n", rxq->read);
901 pos += scnprintf(buf + pos, bufsz - pos, "write: %u\n", rxq->write);
902 pos +=
903 scnprintf(buf + pos, bufsz - pos, "free_count: %u\n",
904 rxq->free_count);
905 if (rxq->rb_stts) {
906 pos +=
907 scnprintf(buf + pos, bufsz - pos, "closed_rb_num: %u\n",
908 le16_to_cpu(rxq->rb_stts->
909 closed_rb_num) & 0x0FFF);
910 } else {
911 pos +=
912 scnprintf(buf + pos, bufsz - pos,
913 "closed_rb_num: Not Allocated\n");
914 }
915 return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
916}
917
918static ssize_t
919il_dbgfs_ucode_rx_stats_read(struct file *file, char __user *user_buf,
920 size_t count, loff_t *ppos)
921{
922 struct il_priv *il = file->private_data;
923
924 return il->debugfs_ops->rx_stats_read(file, user_buf, count, ppos);
925}
926
927static ssize_t
928il_dbgfs_ucode_tx_stats_read(struct file *file, char __user *user_buf,
929 size_t count, loff_t *ppos)
930{
931 struct il_priv *il = file->private_data;
932
933 return il->debugfs_ops->tx_stats_read(file, user_buf, count, ppos);
934}
935
936static ssize_t
937il_dbgfs_ucode_general_stats_read(struct file *file, char __user *user_buf,
938 size_t count, loff_t *ppos)
939{
940 struct il_priv *il = file->private_data;
941
942 return il->debugfs_ops->general_stats_read(file, user_buf, count, ppos);
943}
944
945static ssize_t
946il_dbgfs_sensitivity_read(struct file *file, char __user *user_buf,
947 size_t count, loff_t *ppos)
948{
949
950 struct il_priv *il = file->private_data;
951 int pos = 0;
952 int cnt = 0;
953 char *buf;
954 int bufsz = sizeof(struct il_sensitivity_data) * 4 + 100;
955 ssize_t ret;
956 struct il_sensitivity_data *data;
957
958 data = &il->sensitivity_data;
959 buf = kzalloc(bufsz, GFP_KERNEL);
960 if (!buf) {
961 IL_ERR("Can not allocate Buffer\n");
962 return -ENOMEM;
963 }
964
965 pos +=
966 scnprintf(buf + pos, bufsz - pos, "auto_corr_ofdm:\t\t\t %u\n",
967 data->auto_corr_ofdm);
968 pos +=
969 scnprintf(buf + pos, bufsz - pos, "auto_corr_ofdm_mrc:\t\t %u\n",
970 data->auto_corr_ofdm_mrc);
971 pos +=
972 scnprintf(buf + pos, bufsz - pos, "auto_corr_ofdm_x1:\t\t %u\n",
973 data->auto_corr_ofdm_x1);
974 pos +=
975 scnprintf(buf + pos, bufsz - pos, "auto_corr_ofdm_mrc_x1:\t\t %u\n",
976 data->auto_corr_ofdm_mrc_x1);
977 pos +=
978 scnprintf(buf + pos, bufsz - pos, "auto_corr_cck:\t\t\t %u\n",
979 data->auto_corr_cck);
980 pos +=
981 scnprintf(buf + pos, bufsz - pos, "auto_corr_cck_mrc:\t\t %u\n",
982 data->auto_corr_cck_mrc);
983 pos +=
984 scnprintf(buf + pos, bufsz - pos,
985 "last_bad_plcp_cnt_ofdm:\t\t %u\n",
986 data->last_bad_plcp_cnt_ofdm);
987 pos +=
988 scnprintf(buf + pos, bufsz - pos, "last_fa_cnt_ofdm:\t\t %u\n",
989 data->last_fa_cnt_ofdm);
990 pos +=
991 scnprintf(buf + pos, bufsz - pos, "last_bad_plcp_cnt_cck:\t\t %u\n",
992 data->last_bad_plcp_cnt_cck);
993 pos +=
994 scnprintf(buf + pos, bufsz - pos, "last_fa_cnt_cck:\t\t %u\n",
995 data->last_fa_cnt_cck);
996 pos +=
997 scnprintf(buf + pos, bufsz - pos, "nrg_curr_state:\t\t\t %u\n",
998 data->nrg_curr_state);
999 pos +=
1000 scnprintf(buf + pos, bufsz - pos, "nrg_prev_state:\t\t\t %u\n",
1001 data->nrg_prev_state);
1002 pos += scnprintf(buf + pos, bufsz - pos, "nrg_value:\t\t\t");
1003 for (cnt = 0; cnt < 10; cnt++) {
1004 pos +=
1005 scnprintf(buf + pos, bufsz - pos, " %u",
1006 data->nrg_value[cnt]);
1007 }
1008 pos += scnprintf(buf + pos, bufsz - pos, "\n");
1009 pos += scnprintf(buf + pos, bufsz - pos, "nrg_silence_rssi:\t\t");
1010 for (cnt = 0; cnt < NRG_NUM_PREV_STAT_L; cnt++) {
1011 pos +=
1012 scnprintf(buf + pos, bufsz - pos, " %u",
1013 data->nrg_silence_rssi[cnt]);
1014 }
1015 pos += scnprintf(buf + pos, bufsz - pos, "\n");
1016 pos +=
1017 scnprintf(buf + pos, bufsz - pos, "nrg_silence_ref:\t\t %u\n",
1018 data->nrg_silence_ref);
1019 pos +=
1020 scnprintf(buf + pos, bufsz - pos, "nrg_energy_idx:\t\t\t %u\n",
1021 data->nrg_energy_idx);
1022 pos +=
1023 scnprintf(buf + pos, bufsz - pos, "nrg_silence_idx:\t\t %u\n",
1024 data->nrg_silence_idx);
1025 pos +=
1026 scnprintf(buf + pos, bufsz - pos, "nrg_th_cck:\t\t\t %u\n",
1027 data->nrg_th_cck);
1028 pos +=
1029 scnprintf(buf + pos, bufsz - pos,
1030 "nrg_auto_corr_silence_diff:\t %u\n",
1031 data->nrg_auto_corr_silence_diff);
1032 pos +=
1033 scnprintf(buf + pos, bufsz - pos, "num_in_cck_no_fa:\t\t %u\n",
1034 data->num_in_cck_no_fa);
1035 pos +=
1036 scnprintf(buf + pos, bufsz - pos, "nrg_th_ofdm:\t\t\t %u\n",
1037 data->nrg_th_ofdm);
1038
1039 ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
1040 kfree(buf);
1041 return ret;
1042}
1043
1044static ssize_t
1045il_dbgfs_chain_noise_read(struct file *file, char __user *user_buf,
1046 size_t count, loff_t *ppos)
1047{
1048
1049 struct il_priv *il = file->private_data;
1050 int pos = 0;
1051 int cnt = 0;
1052 char *buf;
1053 int bufsz = sizeof(struct il_chain_noise_data) * 4 + 100;
1054 ssize_t ret;
1055 struct il_chain_noise_data *data;
1056
1057 data = &il->chain_noise_data;
1058 buf = kzalloc(bufsz, GFP_KERNEL);
1059 if (!buf) {
1060 IL_ERR("Can not allocate Buffer\n");
1061 return -ENOMEM;
1062 }
1063
1064 pos +=
1065 scnprintf(buf + pos, bufsz - pos, "active_chains:\t\t\t %u\n",
1066 data->active_chains);
1067 pos +=
1068 scnprintf(buf + pos, bufsz - pos, "chain_noise_a:\t\t\t %u\n",
1069 data->chain_noise_a);
1070 pos +=
1071 scnprintf(buf + pos, bufsz - pos, "chain_noise_b:\t\t\t %u\n",
1072 data->chain_noise_b);
1073 pos +=
1074 scnprintf(buf + pos, bufsz - pos, "chain_noise_c:\t\t\t %u\n",
1075 data->chain_noise_c);
1076 pos +=
1077 scnprintf(buf + pos, bufsz - pos, "chain_signal_a:\t\t\t %u\n",
1078 data->chain_signal_a);
1079 pos +=
1080 scnprintf(buf + pos, bufsz - pos, "chain_signal_b:\t\t\t %u\n",
1081 data->chain_signal_b);
1082 pos +=
1083 scnprintf(buf + pos, bufsz - pos, "chain_signal_c:\t\t\t %u\n",
1084 data->chain_signal_c);
1085 pos +=
1086 scnprintf(buf + pos, bufsz - pos, "beacon_count:\t\t\t %u\n",
1087 data->beacon_count);
1088
1089 pos += scnprintf(buf + pos, bufsz - pos, "disconn_array:\t\t\t");
1090 for (cnt = 0; cnt < NUM_RX_CHAINS; cnt++) {
1091 pos +=
1092 scnprintf(buf + pos, bufsz - pos, " %u",
1093 data->disconn_array[cnt]);
1094 }
1095 pos += scnprintf(buf + pos, bufsz - pos, "\n");
1096 pos += scnprintf(buf + pos, bufsz - pos, "delta_gain_code:\t\t");
1097 for (cnt = 0; cnt < NUM_RX_CHAINS; cnt++) {
1098 pos +=
1099 scnprintf(buf + pos, bufsz - pos, " %u",
1100 data->delta_gain_code[cnt]);
1101 }
1102 pos += scnprintf(buf + pos, bufsz - pos, "\n");
1103 pos +=
1104 scnprintf(buf + pos, bufsz - pos, "radio_write:\t\t\t %u\n",
1105 data->radio_write);
1106 pos +=
1107 scnprintf(buf + pos, bufsz - pos, "state:\t\t\t\t %u\n",
1108 data->state);
1109
1110 ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
1111 kfree(buf);
1112 return ret;
1113}
1114
1115static ssize_t
1116il_dbgfs_power_save_status_read(struct file *file, char __user *user_buf,
1117 size_t count, loff_t *ppos)
1118{
1119 struct il_priv *il = file->private_data;
1120 char buf[60];
1121 int pos = 0;
1122 const size_t bufsz = sizeof(buf);
1123 u32 pwrsave_status;
1124
1125 pwrsave_status =
1126 _il_rd(il, CSR_GP_CNTRL) & CSR_GP_REG_POWER_SAVE_STATUS_MSK;
1127
1128 pos += scnprintf(buf + pos, bufsz - pos, "Power Save Status: ");
1129 pos +=
1130 scnprintf(buf + pos, bufsz - pos, "%s\n",
1131 (pwrsave_status == CSR_GP_REG_NO_POWER_SAVE) ? "none" :
1132 (pwrsave_status == CSR_GP_REG_MAC_POWER_SAVE) ? "MAC" :
1133 (pwrsave_status == CSR_GP_REG_PHY_POWER_SAVE) ? "PHY" :
1134 "error");
1135
1136 return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
1137}
1138
1139static ssize_t
1140il_dbgfs_clear_ucode_stats_write(struct file *file,
1141 const char __user *user_buf, size_t count,
1142 loff_t *ppos)
1143{
1144 struct il_priv *il = file->private_data;
1145 char buf[8];
1146 int buf_size;
1147 int clear;
1148
1149 memset(buf, 0, sizeof(buf));
1150 buf_size = min(count, sizeof(buf) - 1);
1151 if (copy_from_user(buf, user_buf, buf_size))
1152 return -EFAULT;
1153 if (sscanf(buf, "%d", &clear) != 1)
1154 return -EFAULT;
1155
1156 /* make request to uCode to retrieve stats information */
1157 mutex_lock(&il->mutex);
1158 il_send_stats_request(il, CMD_SYNC, true);
1159 mutex_unlock(&il->mutex);
1160
1161 return count;
1162}
1163
1164static ssize_t
1165il_dbgfs_rxon_flags_read(struct file *file, char __user *user_buf,
1166 size_t count, loff_t *ppos)
1167{
1168
1169 struct il_priv *il = file->private_data;
1170 int len = 0;
1171 char buf[20];
1172
1173 len = sprintf(buf, "0x%04X\n", le32_to_cpu(il->active.flags));
1174 return simple_read_from_buffer(user_buf, count, ppos, buf, len);
1175}
1176
1177static ssize_t
1178il_dbgfs_rxon_filter_flags_read(struct file *file, char __user *user_buf,
1179 size_t count, loff_t *ppos)
1180{
1181
1182 struct il_priv *il = file->private_data;
1183 int len = 0;
1184 char buf[20];
1185
1186 len =
1187 sprintf(buf, "0x%04X\n", le32_to_cpu(il->active.filter_flags));
1188 return simple_read_from_buffer(user_buf, count, ppos, buf, len);
1189}
1190
1191static ssize_t
1192il_dbgfs_fh_reg_read(struct file *file, char __user *user_buf, size_t count,
1193 loff_t *ppos)
1194{
1195 struct il_priv *il = file->private_data;
1196 char *buf;
1197 int pos = 0;
1198 ssize_t ret = -EFAULT;
1199
1200 if (il->ops->dump_fh) {
1201 ret = pos = il->ops->dump_fh(il, &buf, true);
1202 if (buf) {
1203 ret =
1204 simple_read_from_buffer(user_buf, count, ppos, buf,
1205 pos);
1206 kfree(buf);
1207 }
1208 }
1209
1210 return ret;
1211}
1212
1213static ssize_t
1214il_dbgfs_missed_beacon_read(struct file *file, char __user *user_buf,
1215 size_t count, loff_t *ppos)
1216{
1217
1218 struct il_priv *il = file->private_data;
1219 int pos = 0;
1220 char buf[12];
1221 const size_t bufsz = sizeof(buf);
1222
1223 pos +=
1224 scnprintf(buf + pos, bufsz - pos, "%d\n",
1225 il->missed_beacon_threshold);
1226
1227 return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
1228}
1229
1230static ssize_t
1231il_dbgfs_missed_beacon_write(struct file *file, const char __user *user_buf,
1232 size_t count, loff_t *ppos)
1233{
1234 struct il_priv *il = file->private_data;
1235 char buf[8];
1236 int buf_size;
1237 int missed;
1238
1239 memset(buf, 0, sizeof(buf));
1240 buf_size = min(count, sizeof(buf) - 1);
1241 if (copy_from_user(buf, user_buf, buf_size))
1242 return -EFAULT;
1243 if (sscanf(buf, "%d", &missed) != 1)
1244 return -EINVAL;
1245
1246 if (missed < IL_MISSED_BEACON_THRESHOLD_MIN ||
1247 missed > IL_MISSED_BEACON_THRESHOLD_MAX)
1248 il->missed_beacon_threshold = IL_MISSED_BEACON_THRESHOLD_DEF;
1249 else
1250 il->missed_beacon_threshold = missed;
1251
1252 return count;
1253}
1254
1255static ssize_t
1256il_dbgfs_force_reset_read(struct file *file, char __user *user_buf,
1257 size_t count, loff_t *ppos)
1258{
1259
1260 struct il_priv *il = file->private_data;
1261 int pos = 0;
1262 char buf[300];
1263 const size_t bufsz = sizeof(buf);
1264 struct il_force_reset *force_reset;
1265
1266 force_reset = &il->force_reset;
1267
1268 pos +=
1269 scnprintf(buf + pos, bufsz - pos, "\tnumber of reset request: %d\n",
1270 force_reset->reset_request_count);
1271 pos +=
1272 scnprintf(buf + pos, bufsz - pos,
1273 "\tnumber of reset request success: %d\n",
1274 force_reset->reset_success_count);
1275 pos +=
1276 scnprintf(buf + pos, bufsz - pos,
1277 "\tnumber of reset request reject: %d\n",
1278 force_reset->reset_reject_count);
1279 pos +=
1280 scnprintf(buf + pos, bufsz - pos, "\treset duration: %lu\n",
1281 force_reset->reset_duration);
1282
1283 return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
1284}
1285
1286static ssize_t
1287il_dbgfs_force_reset_write(struct file *file, const char __user *user_buf,
1288 size_t count, loff_t *ppos)
1289{
1290
1291 int ret;
1292 struct il_priv *il = file->private_data;
1293
1294 ret = il_force_reset(il, true);
1295
1296 return ret ? ret : count;
1297}
1298
1299static ssize_t
1300il_dbgfs_wd_timeout_write(struct file *file, const char __user *user_buf,
1301 size_t count, loff_t *ppos)
1302{
1303
1304 struct il_priv *il = file->private_data;
1305 char buf[8];
1306 int buf_size;
1307 int timeout;
1308
1309 memset(buf, 0, sizeof(buf));
1310 buf_size = min(count, sizeof(buf) - 1);
1311 if (copy_from_user(buf, user_buf, buf_size))
1312 return -EFAULT;
1313 if (sscanf(buf, "%d", &timeout) != 1)
1314 return -EINVAL;
1315 if (timeout < 0 || timeout > IL_MAX_WD_TIMEOUT)
1316 timeout = IL_DEF_WD_TIMEOUT;
1317
1318 il->cfg->wd_timeout = timeout;
1319 il_setup_watchdog(il);
1320 return count;
1321}
1322
1323DEBUGFS_READ_FILE_OPS(rx_stats);
1324DEBUGFS_READ_FILE_OPS(tx_stats);
1325DEBUGFS_READ_FILE_OPS(rx_queue);
1326DEBUGFS_READ_FILE_OPS(tx_queue);
1327DEBUGFS_READ_FILE_OPS(ucode_rx_stats);
1328DEBUGFS_READ_FILE_OPS(ucode_tx_stats);
1329DEBUGFS_READ_FILE_OPS(ucode_general_stats);
1330DEBUGFS_READ_FILE_OPS(sensitivity);
1331DEBUGFS_READ_FILE_OPS(chain_noise);
1332DEBUGFS_READ_FILE_OPS(power_save_status);
1333DEBUGFS_WRITE_FILE_OPS(clear_ucode_stats);
1334DEBUGFS_WRITE_FILE_OPS(clear_traffic_stats);
1335DEBUGFS_READ_FILE_OPS(fh_reg);
1336DEBUGFS_READ_WRITE_FILE_OPS(missed_beacon);
1337DEBUGFS_READ_WRITE_FILE_OPS(force_reset);
1338DEBUGFS_READ_FILE_OPS(rxon_flags);
1339DEBUGFS_READ_FILE_OPS(rxon_filter_flags);
1340DEBUGFS_WRITE_FILE_OPS(wd_timeout);
1341
1342/*
1343 * Create the debugfs files and directories
1344 *
1345 */
1346int
1347il_dbgfs_register(struct il_priv *il, const char *name)
1348{
1349 struct dentry *phyd = il->hw->wiphy->debugfsdir;
1350 struct dentry *dir_drv, *dir_data, *dir_rf, *dir_debug;
1351
1352 dir_drv = debugfs_create_dir(name, phyd);
1353 if (!dir_drv)
1354 return -ENOMEM;
1355
1356 il->debugfs_dir = dir_drv;
1357
1358 dir_data = debugfs_create_dir("data", dir_drv);
1359 if (!dir_data)
1360 goto err;
1361 dir_rf = debugfs_create_dir("rf", dir_drv);
1362 if (!dir_rf)
1363 goto err;
1364 dir_debug = debugfs_create_dir("debug", dir_drv);
1365 if (!dir_debug)
1366 goto err;
1367
1368 DEBUGFS_ADD_FILE(nvm, dir_data, S_IRUSR);
1369 DEBUGFS_ADD_FILE(sram, dir_data, S_IWUSR | S_IRUSR);
1370 DEBUGFS_ADD_FILE(stations, dir_data, S_IRUSR);
1371 DEBUGFS_ADD_FILE(channels, dir_data, S_IRUSR);
1372 DEBUGFS_ADD_FILE(status, dir_data, S_IRUSR);
1373 DEBUGFS_ADD_FILE(interrupt, dir_data, S_IWUSR | S_IRUSR);
1374 DEBUGFS_ADD_FILE(qos, dir_data, S_IRUSR);
1375 DEBUGFS_ADD_FILE(disable_ht40, dir_data, S_IWUSR | S_IRUSR);
1376 DEBUGFS_ADD_FILE(rx_stats, dir_debug, S_IRUSR);
1377 DEBUGFS_ADD_FILE(tx_stats, dir_debug, S_IRUSR);
1378 DEBUGFS_ADD_FILE(rx_queue, dir_debug, S_IRUSR);
1379 DEBUGFS_ADD_FILE(tx_queue, dir_debug, S_IRUSR);
1380 DEBUGFS_ADD_FILE(power_save_status, dir_debug, S_IRUSR);
1381 DEBUGFS_ADD_FILE(clear_ucode_stats, dir_debug, S_IWUSR);
1382 DEBUGFS_ADD_FILE(clear_traffic_stats, dir_debug, S_IWUSR);
1383 DEBUGFS_ADD_FILE(fh_reg, dir_debug, S_IRUSR);
1384 DEBUGFS_ADD_FILE(missed_beacon, dir_debug, S_IWUSR);
1385 DEBUGFS_ADD_FILE(force_reset, dir_debug, S_IWUSR | S_IRUSR);
1386 DEBUGFS_ADD_FILE(ucode_rx_stats, dir_debug, S_IRUSR);
1387 DEBUGFS_ADD_FILE(ucode_tx_stats, dir_debug, S_IRUSR);
1388 DEBUGFS_ADD_FILE(ucode_general_stats, dir_debug, S_IRUSR);
1389
1390 if (il->cfg->sensitivity_calib_by_driver)
1391 DEBUGFS_ADD_FILE(sensitivity, dir_debug, S_IRUSR);
1392 if (il->cfg->chain_noise_calib_by_driver)
1393 DEBUGFS_ADD_FILE(chain_noise, dir_debug, S_IRUSR);
1394 DEBUGFS_ADD_FILE(rxon_flags, dir_debug, S_IWUSR);
1395 DEBUGFS_ADD_FILE(rxon_filter_flags, dir_debug, S_IWUSR);
1396 DEBUGFS_ADD_FILE(wd_timeout, dir_debug, S_IWUSR);
1397 if (il->cfg->sensitivity_calib_by_driver)
1398 DEBUGFS_ADD_BOOL(disable_sensitivity, dir_rf,
1399 &il->disable_sens_cal);
1400 if (il->cfg->chain_noise_calib_by_driver)
1401 DEBUGFS_ADD_BOOL(disable_chain_noise, dir_rf,
1402 &il->disable_chain_noise_cal);
1403 DEBUGFS_ADD_BOOL(disable_tx_power, dir_rf, &il->disable_tx_power_cal);
1404 return 0;
1405
1406err:
1407 IL_ERR("Can't create the debugfs directory\n");
1408 il_dbgfs_unregister(il);
1409 return -ENOMEM;
1410}
1411EXPORT_SYMBOL(il_dbgfs_register);
1412
1413/**
1414 * Remove the debugfs files and directories
1415 *
1416 */
1417void
1418il_dbgfs_unregister(struct il_priv *il)
1419{
1420 if (!il->debugfs_dir)
1421 return;
1422
1423 debugfs_remove_recursive(il->debugfs_dir);
1424 il->debugfs_dir = NULL;
1425}
1426EXPORT_SYMBOL(il_dbgfs_unregister);
diff --git a/drivers/net/wireless/intel/iwlegacy/iwl-spectrum.h b/drivers/net/wireless/intel/iwlegacy/iwl-spectrum.h
new file mode 100644
index 000000000000..85fe48e520f9
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlegacy/iwl-spectrum.h
@@ -0,0 +1,92 @@
1/******************************************************************************
2 *
3 * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved.
4 *
5 * Portions of this file are derived from the ieee80211 subsystem header files.
6 *
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms of version 2 of the GNU General Public License as
9 * published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful, but WITHOUT
12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 * more details.
15 *
16 * You should have received a copy of the GNU General Public License along with
17 * this program; if not, write to the Free Software Foundation, Inc.,
18 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
19 *
20 * The full GNU General Public License is included in this distribution in the
21 * file called LICENSE.
22 *
23 * Contact Information:
24 * Intel Linux Wireless <ilw@linux.intel.com>
25 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
26 *
27 *****************************************************************************/
28
29#ifndef __il_spectrum_h__
30#define __il_spectrum_h__
31enum { /* ieee80211_basic_report.map */
32 IEEE80211_BASIC_MAP_BSS = (1 << 0),
33 IEEE80211_BASIC_MAP_OFDM = (1 << 1),
34 IEEE80211_BASIC_MAP_UNIDENTIFIED = (1 << 2),
35 IEEE80211_BASIC_MAP_RADAR = (1 << 3),
36 IEEE80211_BASIC_MAP_UNMEASURED = (1 << 4),
37 /* Bits 5-7 are reserved */
38
39};
40struct ieee80211_basic_report {
41 u8 channel;
42 __le64 start_time;
43 __le16 duration;
44 u8 map;
45} __packed;
46
47enum { /* ieee80211_measurement_request.mode */
48 /* Bit 0 is reserved */
49 IEEE80211_MEASUREMENT_ENABLE = (1 << 1),
50 IEEE80211_MEASUREMENT_REQUEST = (1 << 2),
51 IEEE80211_MEASUREMENT_REPORT = (1 << 3),
52 /* Bits 4-7 are reserved */
53};
54
55enum {
56 IEEE80211_REPORT_BASIC = 0, /* required */
57 IEEE80211_REPORT_CCA = 1, /* optional */
58 IEEE80211_REPORT_RPI = 2, /* optional */
59 /* 3-255 reserved */
60};
61
62struct ieee80211_measurement_params {
63 u8 channel;
64 __le64 start_time;
65 __le16 duration;
66} __packed;
67
68struct ieee80211_info_element {
69 u8 id;
70 u8 len;
71 u8 data[0];
72} __packed;
73
74struct ieee80211_measurement_request {
75 struct ieee80211_info_element ie;
76 u8 token;
77 u8 mode;
78 u8 type;
79 struct ieee80211_measurement_params params[0];
80} __packed;
81
82struct ieee80211_measurement_report {
83 struct ieee80211_info_element ie;
84 u8 token;
85 u8 mode;
86 u8 type;
87 union {
88 struct ieee80211_basic_report basic[0];
89 } u;
90} __packed;
91
92#endif
diff --git a/drivers/net/wireless/intel/iwlegacy/prph.h b/drivers/net/wireless/intel/iwlegacy/prph.h
new file mode 100644
index 000000000000..ffec4b4a248a
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlegacy/prph.h
@@ -0,0 +1,522 @@
1/******************************************************************************
2 *
3 * This file is provided under a dual BSD/GPLv2 license. When using or
4 * redistributing this file, you may do so under either license.
5 *
6 * GPL LICENSE SUMMARY
7 *
8 * Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved.
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of version 2 of the GNU General Public License as
12 * published by the Free Software Foundation.
13 *
14 * This program is distributed in the hope that it will be useful, but
15 * WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
22 * USA
23 *
24 * The full GNU General Public License is included in this distribution
25 * in the file called LICENSE.GPL.
26 *
27 * Contact Information:
28 * Intel Linux Wireless <ilw@linux.intel.com>
29 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
30 *
31 * BSD LICENSE
32 *
33 * Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved.
34 * All rights reserved.
35 *
36 * Redistribution and use in source and binary forms, with or without
37 * modification, are permitted provided that the following conditions
38 * are met:
39 *
40 * * Redistributions of source code must retain the above copyright
41 * notice, this list of conditions and the following disclaimer.
42 * * Redistributions in binary form must reproduce the above copyright
43 * notice, this list of conditions and the following disclaimer in
44 * the documentation and/or other materials provided with the
45 * distribution.
46 * * Neither the name Intel Corporation nor the names of its
47 * contributors may be used to endorse or promote products derived
48 * from this software without specific prior written permission.
49 *
50 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
51 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
52 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
53 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
54 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
55 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
56 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
57 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
58 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
59 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
60 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
61 *****************************************************************************/
62
63#ifndef __il_prph_h__
64#define __il_prph_h__
65
66/*
67 * Registers in this file are internal, not PCI bus memory mapped.
68 * Driver accesses these via HBUS_TARG_PRPH_* registers.
69 */
70#define PRPH_BASE (0x00000)
71#define PRPH_END (0xFFFFF)
72
73/* APMG (power management) constants */
74#define APMG_BASE (PRPH_BASE + 0x3000)
75#define APMG_CLK_CTRL_REG (APMG_BASE + 0x0000)
76#define APMG_CLK_EN_REG (APMG_BASE + 0x0004)
77#define APMG_CLK_DIS_REG (APMG_BASE + 0x0008)
78#define APMG_PS_CTRL_REG (APMG_BASE + 0x000c)
79#define APMG_PCIDEV_STT_REG (APMG_BASE + 0x0010)
80#define APMG_RFKILL_REG (APMG_BASE + 0x0014)
81#define APMG_RTC_INT_STT_REG (APMG_BASE + 0x001c)
82#define APMG_RTC_INT_MSK_REG (APMG_BASE + 0x0020)
83#define APMG_DIGITAL_SVR_REG (APMG_BASE + 0x0058)
84#define APMG_ANALOG_SVR_REG (APMG_BASE + 0x006C)
85
86#define APMS_CLK_VAL_MRB_FUNC_MODE (0x00000001)
87#define APMG_CLK_VAL_DMA_CLK_RQT (0x00000200)
88#define APMG_CLK_VAL_BSM_CLK_RQT (0x00000800)
89
90#define APMG_PS_CTRL_EARLY_PWR_OFF_RESET_DIS (0x00400000)
91#define APMG_PS_CTRL_VAL_RESET_REQ (0x04000000)
92#define APMG_PS_CTRL_MSK_PWR_SRC (0x03000000)
93#define APMG_PS_CTRL_VAL_PWR_SRC_VMAIN (0x00000000)
94#define APMG_PS_CTRL_VAL_PWR_SRC_MAX (0x01000000) /* 3945 only */
95#define APMG_PS_CTRL_VAL_PWR_SRC_VAUX (0x02000000)
96#define APMG_SVR_VOLTAGE_CONFIG_BIT_MSK (0x000001E0) /* bit 8:5 */
97#define APMG_SVR_DIGITAL_VOLTAGE_1_32 (0x00000060)
98
99#define APMG_PCIDEV_STT_VAL_L1_ACT_DIS (0x00000800)
100
101/**
102 * BSM (Bootstrap State Machine)
103 *
104 * The Bootstrap State Machine (BSM) stores a short bootstrap uCode program
105 * in special SRAM that does not power down when the embedded control
106 * processor is sleeping (e.g. for periodic power-saving shutdowns of radio).
107 *
108 * When powering back up after sleeps (or during initial uCode load), the BSM
109 * internally loads the short bootstrap program from the special SRAM into the
110 * embedded processor's instruction SRAM, and starts the processor so it runs
111 * the bootstrap program.
112 *
113 * This bootstrap program loads (via PCI busmaster DMA) instructions and data
114 * images for a uCode program from host DRAM locations. The host driver
115 * indicates DRAM locations and sizes for instruction and data images via the
116 * four BSM_DRAM_* registers. Once the bootstrap program loads the new program,
117 * the new program starts automatically.
118 *
119 * The uCode used for open-source drivers includes two programs:
120 *
121 * 1) Initialization -- performs hardware calibration and sets up some
122 * internal data, then notifies host via "initialize alive" notification
123 * (struct il_init_alive_resp) that it has completed all of its work.
124 * After signal from host, it then loads and starts the runtime program.
125 * The initialization program must be used when initially setting up the
126 * NIC after loading the driver.
127 *
128 * 2) Runtime/Protocol -- performs all normal runtime operations. This
129 * notifies host via "alive" notification (struct il_alive_resp) that it
130 * is ready to be used.
131 *
132 * When initializing the NIC, the host driver does the following procedure:
133 *
134 * 1) Load bootstrap program (instructions only, no data image for bootstrap)
135 * into bootstrap memory. Use dword writes starting at BSM_SRAM_LOWER_BOUND
136 *
137 * 2) Point (via BSM_DRAM_*) to the "initialize" uCode data and instruction
138 * images in host DRAM.
139 *
140 * 3) Set up BSM to copy from BSM SRAM into uCode instruction SRAM when asked:
141 * BSM_WR_MEM_SRC_REG = 0
142 * BSM_WR_MEM_DST_REG = RTC_INST_LOWER_BOUND
143 * BSM_WR_MEM_DWCOUNT_REG = # dwords in bootstrap instruction image
144 *
145 * 4) Load bootstrap into instruction SRAM:
146 * BSM_WR_CTRL_REG = BSM_WR_CTRL_REG_BIT_START
147 *
148 * 5) Wait for load completion:
149 * Poll BSM_WR_CTRL_REG for BSM_WR_CTRL_REG_BIT_START = 0
150 *
151 * 6) Enable future boot loads whenever NIC's power management triggers it:
152 * BSM_WR_CTRL_REG = BSM_WR_CTRL_REG_BIT_START_EN
153 *
154 * 7) Start the NIC by removing all reset bits:
155 * CSR_RESET = 0
156 *
157 * The bootstrap uCode (already in instruction SRAM) loads initialization
158 * uCode. Initialization uCode performs data initialization, sends
159 * "initialize alive" notification to host, and waits for a signal from
160 * host to load runtime code.
161 *
162 * 4) Point (via BSM_DRAM_*) to the "runtime" uCode data and instruction
163 * images in host DRAM. The last register loaded must be the instruction
164 * byte count register ("1" in MSbit tells initialization uCode to load
165 * the runtime uCode):
166 * BSM_DRAM_INST_BYTECOUNT_REG = byte count | BSM_DRAM_INST_LOAD
167 *
168 * 5) Wait for "alive" notification, then issue normal runtime commands.
169 *
170 * Data caching during power-downs:
171 *
172 * Just before the embedded controller powers down (e.g for automatic
173 * power-saving modes, or for RFKILL), uCode stores (via PCI busmaster DMA)
174 * a current snapshot of the embedded processor's data SRAM into host DRAM.
175 * This caches the data while the embedded processor's memory is powered down.
176 * Location and size are controlled by BSM_DRAM_DATA_* registers.
177 *
178 * NOTE: Instruction SRAM does not need to be saved, since that doesn't
179 * change during operation; the original image (from uCode distribution
180 * file) can be used for reload.
181 *
182 * When powering back up, the BSM loads the bootstrap program. Bootstrap looks
183 * at the BSM_DRAM_* registers, which now point to the runtime instruction
184 * image and the cached (modified) runtime data (*not* the initialization
185 * uCode). Bootstrap reloads these runtime images into SRAM, and restarts the
186 * uCode from where it left off before the power-down.
187 *
188 * NOTE: Initialization uCode does *not* run as part of the save/restore
189 * procedure.
190 *
191 * This save/restore method is mostly for autonomous power management during
192 * normal operation (result of C_POWER_TBL). Platform suspend/resume and
193 * RFKILL should use complete restarts (with total re-initialization) of uCode,
194 * allowing total shutdown (including BSM memory).
195 *
196 * Note that, during normal operation, the host DRAM that held the initial
197 * startup data for the runtime code is now being used as a backup data cache
198 * for modified data! If you need to completely re-initialize the NIC, make
199 * sure that you use the runtime data image from the uCode distribution file,
200 * not the modified/saved runtime data. You may want to store a separate
201 * "clean" runtime data image in DRAM to avoid disk reads of distribution file.
202 */
203
204/* BSM bit fields */
205#define BSM_WR_CTRL_REG_BIT_START (0x80000000) /* start boot load now */
206#define BSM_WR_CTRL_REG_BIT_START_EN (0x40000000) /* enable boot after pwrup */
207#define BSM_DRAM_INST_LOAD (0x80000000) /* start program load now */
208
209/* BSM addresses */
210#define BSM_BASE (PRPH_BASE + 0x3400)
211#define BSM_END (PRPH_BASE + 0x3800)
212
213#define BSM_WR_CTRL_REG (BSM_BASE + 0x000) /* ctl and status */
214#define BSM_WR_MEM_SRC_REG (BSM_BASE + 0x004) /* source in BSM mem */
215#define BSM_WR_MEM_DST_REG (BSM_BASE + 0x008) /* dest in SRAM mem */
216#define BSM_WR_DWCOUNT_REG (BSM_BASE + 0x00C) /* bytes */
217#define BSM_WR_STATUS_REG (BSM_BASE + 0x010) /* bit 0: 1 == done */
218
219/*
220 * Pointers and size regs for bootstrap load and data SRAM save/restore.
221 * NOTE: 3945 pointers use bits 31:0 of DRAM address.
222 * 4965 pointers use bits 35:4 of DRAM address.
223 */
224#define BSM_DRAM_INST_PTR_REG (BSM_BASE + 0x090)
225#define BSM_DRAM_INST_BYTECOUNT_REG (BSM_BASE + 0x094)
226#define BSM_DRAM_DATA_PTR_REG (BSM_BASE + 0x098)
227#define BSM_DRAM_DATA_BYTECOUNT_REG (BSM_BASE + 0x09C)
228
229/*
230 * BSM special memory, stays powered on during power-save sleeps.
231 * Read/write, address range from LOWER_BOUND to (LOWER_BOUND + SIZE -1)
232 */
233#define BSM_SRAM_LOWER_BOUND (PRPH_BASE + 0x3800)
234#define BSM_SRAM_SIZE (1024) /* bytes */
235
236/* 3945 Tx scheduler registers */
237#define ALM_SCD_BASE (PRPH_BASE + 0x2E00)
238#define ALM_SCD_MODE_REG (ALM_SCD_BASE + 0x000)
239#define ALM_SCD_ARASTAT_REG (ALM_SCD_BASE + 0x004)
240#define ALM_SCD_TXFACT_REG (ALM_SCD_BASE + 0x010)
241#define ALM_SCD_TXF4MF_REG (ALM_SCD_BASE + 0x014)
242#define ALM_SCD_TXF5MF_REG (ALM_SCD_BASE + 0x020)
243#define ALM_SCD_SBYP_MODE_1_REG (ALM_SCD_BASE + 0x02C)
244#define ALM_SCD_SBYP_MODE_2_REG (ALM_SCD_BASE + 0x030)
245
246/**
247 * Tx Scheduler
248 *
249 * The Tx Scheduler selects the next frame to be transmitted, choosing TFDs
250 * (Transmit Frame Descriptors) from up to 16 circular Tx queues resident in
251 * host DRAM. It steers each frame's Tx command (which contains the frame
252 * data) into one of up to 7 prioritized Tx DMA FIFO channels within the
253 * device. A queue maps to only one (selectable by driver) Tx DMA channel,
254 * but one DMA channel may take input from several queues.
255 *
256 * Tx DMA FIFOs have dedicated purposes. For 4965, they are used as follows
257 * (cf. default_queue_to_tx_fifo in 4965.c):
258 *
259 * 0 -- EDCA BK (background) frames, lowest priority
260 * 1 -- EDCA BE (best effort) frames, normal priority
261 * 2 -- EDCA VI (video) frames, higher priority
262 * 3 -- EDCA VO (voice) and management frames, highest priority
263 * 4 -- Commands (e.g. RXON, etc.)
264 * 5 -- unused (HCCA)
265 * 6 -- unused (HCCA)
266 * 7 -- not used by driver (device-internal only)
267 *
268 *
269 * Driver should normally map queues 0-6 to Tx DMA/FIFO channels 0-6.
270 * In addition, driver can map the remaining queues to Tx DMA/FIFO
271 * channels 0-3 to support 11n aggregation via EDCA DMA channels.
272 *
273 * The driver sets up each queue to work in one of two modes:
274 *
275 * 1) Scheduler-Ack, in which the scheduler automatically supports a
276 * block-ack (BA) win of up to 64 TFDs. In this mode, each queue
277 * contains TFDs for a unique combination of Recipient Address (RA)
278 * and Traffic Identifier (TID), that is, traffic of a given
279 * Quality-Of-Service (QOS) priority, destined for a single station.
280 *
281 * In scheduler-ack mode, the scheduler keeps track of the Tx status of
282 * each frame within the BA win, including whether it's been transmitted,
283 * and whether it's been acknowledged by the receiving station. The device
284 * automatically processes block-acks received from the receiving STA,
285 * and reschedules un-acked frames to be retransmitted (successful
286 * Tx completion may end up being out-of-order).
287 *
288 * The driver must maintain the queue's Byte Count table in host DRAM
289 * (struct il4965_sched_queue_byte_cnt_tbl) for this mode.
290 * This mode does not support fragmentation.
291 *
292 * 2) FIFO (a.k.a. non-Scheduler-ACK), in which each TFD is processed in order.
293 * The device may automatically retry Tx, but will retry only one frame
294 * at a time, until receiving ACK from receiving station, or reaching
295 * retry limit and giving up.
296 *
297 * The command queue (#4/#9) must use this mode!
298 * This mode does not require use of the Byte Count table in host DRAM.
299 *
300 * Driver controls scheduler operation via 3 means:
301 * 1) Scheduler registers
302 * 2) Shared scheduler data base in internal 4956 SRAM
303 * 3) Shared data in host DRAM
304 *
305 * Initialization:
306 *
307 * When loading, driver should allocate memory for:
308 * 1) 16 TFD circular buffers, each with space for (typically) 256 TFDs.
309 * 2) 16 Byte Count circular buffers in 16 KBytes contiguous memory
310 * (1024 bytes for each queue).
311 *
312 * After receiving "Alive" response from uCode, driver must initialize
313 * the scheduler (especially for queue #4/#9, the command queue, otherwise
314 * the driver can't issue commands!):
315 */
316
317/**
318 * Max Tx win size is the max number of contiguous TFDs that the scheduler
319 * can keep track of at one time when creating block-ack chains of frames.
320 * Note that "64" matches the number of ack bits in a block-ack packet.
321 * Driver should use SCD_WIN_SIZE and SCD_FRAME_LIMIT values to initialize
322 * IL49_SCD_CONTEXT_QUEUE_OFFSET(x) values.
323 */
324#define SCD_WIN_SIZE 64
325#define SCD_FRAME_LIMIT 64
326
327/* SCD registers are internal, must be accessed via HBUS_TARG_PRPH regs */
328#define IL49_SCD_START_OFFSET 0xa02c00
329
330/*
331 * 4965 tells driver SRAM address for internal scheduler structs via this reg.
332 * Value is valid only after "Alive" response from uCode.
333 */
334#define IL49_SCD_SRAM_BASE_ADDR (IL49_SCD_START_OFFSET + 0x0)
335
336/*
337 * Driver may need to update queue-empty bits after changing queue's
338 * write and read pointers (idxes) during (re-)initialization (i.e. when
339 * scheduler is not tracking what's happening).
340 * Bit fields:
341 * 31-16: Write mask -- 1: update empty bit, 0: don't change empty bit
342 * 15-00: Empty state, one for each queue -- 1: empty, 0: non-empty
343 * NOTE: This register is not used by Linux driver.
344 */
345#define IL49_SCD_EMPTY_BITS (IL49_SCD_START_OFFSET + 0x4)
346
347/*
348 * Physical base address of array of byte count (BC) circular buffers (CBs).
349 * Each Tx queue has a BC CB in host DRAM to support Scheduler-ACK mode.
350 * This register points to BC CB for queue 0, must be on 1024-byte boundary.
351 * Others are spaced by 1024 bytes.
352 * Each BC CB is 2 bytes * (256 + 64) = 740 bytes, followed by 384 bytes pad.
353 * (Index into a queue's BC CB) = (idx into queue's TFD CB) = (SSN & 0xff).
354 * Bit fields:
355 * 25-00: Byte Count CB physical address [35:10], must be 1024-byte aligned.
356 */
357#define IL49_SCD_DRAM_BASE_ADDR (IL49_SCD_START_OFFSET + 0x10)
358
359/*
360 * Enables any/all Tx DMA/FIFO channels.
361 * Scheduler generates requests for only the active channels.
362 * Set this to 0xff to enable all 8 channels (normal usage).
363 * Bit fields:
364 * 7- 0: Enable (1), disable (0), one bit for each channel 0-7
365 */
366#define IL49_SCD_TXFACT (IL49_SCD_START_OFFSET + 0x1c)
367/*
368 * Queue (x) Write Pointers (idxes, really!), one for each Tx queue.
369 * Initialized and updated by driver as new TFDs are added to queue.
370 * NOTE: If using Block Ack, idx must correspond to frame's
371 * Start Sequence Number; idx = (SSN & 0xff)
372 * NOTE: Alternative to HBUS_TARG_WRPTR, which is what Linux driver uses?
373 */
374#define IL49_SCD_QUEUE_WRPTR(x) (IL49_SCD_START_OFFSET + 0x24 + (x) * 4)
375
376/*
377 * Queue (x) Read Pointers (idxes, really!), one for each Tx queue.
378 * For FIFO mode, idx indicates next frame to transmit.
379 * For Scheduler-ACK mode, idx indicates first frame in Tx win.
380 * Initialized by driver, updated by scheduler.
381 */
382#define IL49_SCD_QUEUE_RDPTR(x) (IL49_SCD_START_OFFSET + 0x64 + (x) * 4)
383
384/*
385 * Select which queues work in chain mode (1) vs. not (0).
386 * Use chain mode to build chains of aggregated frames.
387 * Bit fields:
388 * 31-16: Reserved
389 * 15-00: Mode, one bit for each queue -- 1: Chain mode, 0: one-at-a-time
390 * NOTE: If driver sets up queue for chain mode, it should be also set up
391 * Scheduler-ACK mode as well, via SCD_QUEUE_STATUS_BITS(x).
392 */
393#define IL49_SCD_QUEUECHAIN_SEL (IL49_SCD_START_OFFSET + 0xd0)
394
395/*
396 * Select which queues interrupt driver when scheduler increments
397 * a queue's read pointer (idx).
398 * Bit fields:
399 * 31-16: Reserved
400 * 15-00: Interrupt enable, one bit for each queue -- 1: enabled, 0: disabled
401 * NOTE: This functionality is apparently a no-op; driver relies on interrupts
402 * from Rx queue to read Tx command responses and update Tx queues.
403 */
404#define IL49_SCD_INTERRUPT_MASK (IL49_SCD_START_OFFSET + 0xe4)
405
406/*
407 * Queue search status registers. One for each queue.
408 * Sets up queue mode and assigns queue to Tx DMA channel.
409 * Bit fields:
410 * 19-10: Write mask/enable bits for bits 0-9
411 * 9: Driver should init to "0"
412 * 8: Scheduler-ACK mode (1), non-Scheduler-ACK (i.e. FIFO) mode (0).
413 * Driver should init to "1" for aggregation mode, or "0" otherwise.
414 * 7-6: Driver should init to "0"
415 * 5: Window Size Left; indicates whether scheduler can request
416 * another TFD, based on win size, etc. Driver should init
417 * this bit to "1" for aggregation mode, or "0" for non-agg.
418 * 4-1: Tx FIFO to use (range 0-7).
419 * 0: Queue is active (1), not active (0).
420 * Other bits should be written as "0"
421 *
422 * NOTE: If enabling Scheduler-ACK mode, chain mode should also be enabled
423 * via SCD_QUEUECHAIN_SEL.
424 */
425#define IL49_SCD_QUEUE_STATUS_BITS(x)\
426 (IL49_SCD_START_OFFSET + 0x104 + (x) * 4)
427
428/* Bit field positions */
429#define IL49_SCD_QUEUE_STTS_REG_POS_ACTIVE (0)
430#define IL49_SCD_QUEUE_STTS_REG_POS_TXF (1)
431#define IL49_SCD_QUEUE_STTS_REG_POS_WSL (5)
432#define IL49_SCD_QUEUE_STTS_REG_POS_SCD_ACK (8)
433
434/* Write masks */
435#define IL49_SCD_QUEUE_STTS_REG_POS_SCD_ACT_EN (10)
436#define IL49_SCD_QUEUE_STTS_REG_MSK (0x0007FC00)
437
438/**
439 * 4965 internal SRAM structures for scheduler, shared with driver ...
440 *
441 * Driver should clear and initialize the following areas after receiving
442 * "Alive" response from 4965 uCode, i.e. after initial
443 * uCode load, or after a uCode load done for error recovery:
444 *
445 * SCD_CONTEXT_DATA_OFFSET (size 128 bytes)
446 * SCD_TX_STTS_BITMAP_OFFSET (size 256 bytes)
447 * SCD_TRANSLATE_TBL_OFFSET (size 32 bytes)
448 *
449 * Driver accesses SRAM via HBUS_TARG_MEM_* registers.
450 * Driver reads base address of this scheduler area from SCD_SRAM_BASE_ADDR.
451 * All OFFSET values must be added to this base address.
452 */
453
454/*
455 * Queue context. One 8-byte entry for each of 16 queues.
456 *
457 * Driver should clear this entire area (size 0x80) to 0 after receiving
458 * "Alive" notification from uCode. Additionally, driver should init
459 * each queue's entry as follows:
460 *
461 * LS Dword bit fields:
462 * 0-06: Max Tx win size for Scheduler-ACK. Driver should init to 64.
463 *
464 * MS Dword bit fields:
465 * 16-22: Frame limit. Driver should init to 10 (0xa).
466 *
467 * Driver should init all other bits to 0.
468 *
469 * Init must be done after driver receives "Alive" response from 4965 uCode,
470 * and when setting up queue for aggregation.
471 */
472#define IL49_SCD_CONTEXT_DATA_OFFSET 0x380
473#define IL49_SCD_CONTEXT_QUEUE_OFFSET(x) \
474 (IL49_SCD_CONTEXT_DATA_OFFSET + ((x) * 8))
475
476#define IL49_SCD_QUEUE_CTX_REG1_WIN_SIZE_POS (0)
477#define IL49_SCD_QUEUE_CTX_REG1_WIN_SIZE_MSK (0x0000007F)
478#define IL49_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS (16)
479#define IL49_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK (0x007F0000)
480
481/*
482 * Tx Status Bitmap
483 *
484 * Driver should clear this entire area (size 0x100) to 0 after receiving
485 * "Alive" notification from uCode. Area is used only by device itself;
486 * no other support (besides clearing) is required from driver.
487 */
488#define IL49_SCD_TX_STTS_BITMAP_OFFSET 0x400
489
490/*
491 * RAxTID to queue translation mapping.
492 *
493 * When queue is in Scheduler-ACK mode, frames placed in a that queue must be
494 * for only one combination of receiver address (RA) and traffic ID (TID), i.e.
495 * one QOS priority level destined for one station (for this wireless link,
496 * not final destination). The SCD_TRANSLATE_TBL area provides 16 16-bit
497 * mappings, one for each of the 16 queues. If queue is not in Scheduler-ACK
498 * mode, the device ignores the mapping value.
499 *
500 * Bit fields, for each 16-bit map:
501 * 15-9: Reserved, set to 0
502 * 8-4: Index into device's station table for recipient station
503 * 3-0: Traffic ID (tid), range 0-15
504 *
505 * Driver should clear this entire area (size 32 bytes) to 0 after receiving
506 * "Alive" notification from uCode. To update a 16-bit map value, driver
507 * must read a dword-aligned value from device SRAM, replace the 16-bit map
508 * value of interest, and write the dword value back into device SRAM.
509 */
510#define IL49_SCD_TRANSLATE_TBL_OFFSET 0x500
511
512/* Find translation table dword to read/write for given queue */
513#define IL49_SCD_TRANSLATE_TBL_OFFSET_QUEUE(x) \
514 ((IL49_SCD_TRANSLATE_TBL_OFFSET + ((x) * 2)) & 0xfffffffc)
515
516#define IL_SCD_TXFIFO_POS_TID (0)
517#define IL_SCD_TXFIFO_POS_RA (4)
518#define IL_SCD_QUEUE_RA_TID_MAP_RATID_MSK (0x01FF)
519
520/*********************** END TX SCHEDULER *************************************/
521
522#endif /* __il_prph_h__ */