aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/wireless/iwlegacy
diff options
context:
space:
mode:
authorJonathan Herman <hermanjl@cs.unc.edu>2013-01-22 10:38:37 -0500
committerJonathan Herman <hermanjl@cs.unc.edu>2013-01-22 10:38:37 -0500
commitfcc9d2e5a6c89d22b8b773a64fb4ad21ac318446 (patch)
treea57612d1888735a2ec7972891b68c1ac5ec8faea /drivers/net/wireless/iwlegacy
parent8dea78da5cee153b8af9c07a2745f6c55057fe12 (diff)
Added missing tegra files.HEADmaster
Diffstat (limited to 'drivers/net/wireless/iwlegacy')
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-3945-debugfs.c523
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-3945-debugfs.h60
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-3945-fh.h187
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-3945-hw.h291
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-3945-led.c64
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-3945-led.h32
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-3945-rs.c997
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-3945.c2742
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-3945.h308
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-4965-calib.c967
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-4965-calib.h75
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-4965-debugfs.c774
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-4965-debugfs.h59
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-4965-eeprom.c154
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-4965-hw.h811
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-4965-led.c74
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-4965-led.h33
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-4965-lib.c1194
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-4965-rs.c2872
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-4965-rx.c215
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-4965-sta.c721
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-4965-tx.c1378
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-4965-ucode.c166
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-4965.c2184
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-4965.h282
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-commands.h3398
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-core.c2659
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-core.h634
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-csr.h422
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-debug.h198
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-debugfs.c1313
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-dev.h1364
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-devtrace.c42
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-devtrace.h210
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-eeprom.c553
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-eeprom.h344
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-fh.h513
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-hcmd.c271
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-helpers.h196
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-io.h545
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-led.c206
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-led.h56
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-legacy-rs.h456
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-power.c165
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-power.h55
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-prph.h523
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-rx.c281
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-scan.c549
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-sta.c816
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-sta.h148
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-tx.c658
-rw-r--r--drivers/net/wireless/iwlegacy/iwl3945-base.c4017
-rw-r--r--drivers/net/wireless/iwlegacy/iwl4965-base.c3282
53 files changed, 41037 insertions, 0 deletions
diff --git a/drivers/net/wireless/iwlegacy/iwl-3945-debugfs.c b/drivers/net/wireless/iwlegacy/iwl-3945-debugfs.c
new file mode 100644
index 00000000000..cfabb38793a
--- /dev/null
+++ b/drivers/net/wireless/iwlegacy/iwl-3945-debugfs.c
@@ -0,0 +1,523 @@
1/******************************************************************************
2 *
3 * GPL LICENSE SUMMARY
4 *
5 * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of version 2 of the GNU General Public License as
9 * published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful, but
12 * WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
19 * USA
20 *
21 * The full GNU General Public License is included in this distribution
22 * in the file called LICENSE.GPL.
23 *
24 * Contact Information:
25 * Intel Linux Wireless <ilw@linux.intel.com>
26 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
27 *****************************************************************************/
28
29#include "iwl-3945-debugfs.h"
30
31
32static int iwl3945_statistics_flag(struct iwl_priv *priv, char *buf, int bufsz)
33{
34 int p = 0;
35
36 p += scnprintf(buf + p, bufsz - p, "Statistics Flag(0x%X):\n",
37 le32_to_cpu(priv->_3945.statistics.flag));
38 if (le32_to_cpu(priv->_3945.statistics.flag) &
39 UCODE_STATISTICS_CLEAR_MSK)
40 p += scnprintf(buf + p, bufsz - p,
41 "\tStatistics have been cleared\n");
42 p += scnprintf(buf + p, bufsz - p, "\tOperational Frequency: %s\n",
43 (le32_to_cpu(priv->_3945.statistics.flag) &
44 UCODE_STATISTICS_FREQUENCY_MSK)
45 ? "2.4 GHz" : "5.2 GHz");
46 p += scnprintf(buf + p, bufsz - p, "\tTGj Narrow Band: %s\n",
47 (le32_to_cpu(priv->_3945.statistics.flag) &
48 UCODE_STATISTICS_NARROW_BAND_MSK)
49 ? "enabled" : "disabled");
50 return p;
51}
52
53ssize_t iwl3945_ucode_rx_stats_read(struct file *file,
54 char __user *user_buf,
55 size_t count, loff_t *ppos)
56{
57 struct iwl_priv *priv = file->private_data;
58 int pos = 0;
59 char *buf;
60 int bufsz = sizeof(struct iwl39_statistics_rx_phy) * 40 +
61 sizeof(struct iwl39_statistics_rx_non_phy) * 40 + 400;
62 ssize_t ret;
63 struct iwl39_statistics_rx_phy *ofdm, *accum_ofdm, *delta_ofdm,
64 *max_ofdm;
65 struct iwl39_statistics_rx_phy *cck, *accum_cck, *delta_cck, *max_cck;
66 struct iwl39_statistics_rx_non_phy *general, *accum_general;
67 struct iwl39_statistics_rx_non_phy *delta_general, *max_general;
68
69 if (!iwl_legacy_is_alive(priv))
70 return -EAGAIN;
71
72 buf = kzalloc(bufsz, GFP_KERNEL);
73 if (!buf) {
74 IWL_ERR(priv, "Can not allocate Buffer\n");
75 return -ENOMEM;
76 }
77
78 /*
79 * The statistic information display here is based on
80 * the last statistics notification from uCode
81 * might not reflect the current uCode activity
82 */
83 ofdm = &priv->_3945.statistics.rx.ofdm;
84 cck = &priv->_3945.statistics.rx.cck;
85 general = &priv->_3945.statistics.rx.general;
86 accum_ofdm = &priv->_3945.accum_statistics.rx.ofdm;
87 accum_cck = &priv->_3945.accum_statistics.rx.cck;
88 accum_general = &priv->_3945.accum_statistics.rx.general;
89 delta_ofdm = &priv->_3945.delta_statistics.rx.ofdm;
90 delta_cck = &priv->_3945.delta_statistics.rx.cck;
91 delta_general = &priv->_3945.delta_statistics.rx.general;
92 max_ofdm = &priv->_3945.max_delta.rx.ofdm;
93 max_cck = &priv->_3945.max_delta.rx.cck;
94 max_general = &priv->_3945.max_delta.rx.general;
95
96 pos += iwl3945_statistics_flag(priv, buf, bufsz);
97 pos += scnprintf(buf + pos, bufsz - pos, "%-32s current"
98 "acumulative delta max\n",
99 "Statistics_Rx - OFDM:");
100 pos += scnprintf(buf + pos, bufsz - pos,
101 " %-30s %10u %10u %10u %10u\n",
102 "ina_cnt:", le32_to_cpu(ofdm->ina_cnt),
103 accum_ofdm->ina_cnt,
104 delta_ofdm->ina_cnt, max_ofdm->ina_cnt);
105 pos += scnprintf(buf + pos, bufsz - pos,
106 " %-30s %10u %10u %10u %10u\n",
107 "fina_cnt:",
108 le32_to_cpu(ofdm->fina_cnt), accum_ofdm->fina_cnt,
109 delta_ofdm->fina_cnt, max_ofdm->fina_cnt);
110 pos += scnprintf(buf + pos, bufsz - pos,
111 " %-30s %10u %10u %10u %10u\n", "plcp_err:",
112 le32_to_cpu(ofdm->plcp_err), accum_ofdm->plcp_err,
113 delta_ofdm->plcp_err, max_ofdm->plcp_err);
114 pos += scnprintf(buf + pos, bufsz - pos,
115 " %-30s %10u %10u %10u %10u\n", "crc32_err:",
116 le32_to_cpu(ofdm->crc32_err), accum_ofdm->crc32_err,
117 delta_ofdm->crc32_err, max_ofdm->crc32_err);
118 pos += scnprintf(buf + pos, bufsz - pos,
119 " %-30s %10u %10u %10u %10u\n", "overrun_err:",
120 le32_to_cpu(ofdm->overrun_err),
121 accum_ofdm->overrun_err, delta_ofdm->overrun_err,
122 max_ofdm->overrun_err);
123 pos += scnprintf(buf + pos, bufsz - pos,
124 " %-30s %10u %10u %10u %10u\n",
125 "early_overrun_err:",
126 le32_to_cpu(ofdm->early_overrun_err),
127 accum_ofdm->early_overrun_err,
128 delta_ofdm->early_overrun_err,
129 max_ofdm->early_overrun_err);
130 pos += scnprintf(buf + pos, bufsz - pos,
131 " %-30s %10u %10u %10u %10u\n",
132 "crc32_good:", le32_to_cpu(ofdm->crc32_good),
133 accum_ofdm->crc32_good, delta_ofdm->crc32_good,
134 max_ofdm->crc32_good);
135 pos += scnprintf(buf + pos, bufsz - pos,
136 " %-30s %10u %10u %10u %10u\n", "false_alarm_cnt:",
137 le32_to_cpu(ofdm->false_alarm_cnt),
138 accum_ofdm->false_alarm_cnt,
139 delta_ofdm->false_alarm_cnt,
140 max_ofdm->false_alarm_cnt);
141 pos += scnprintf(buf + pos, bufsz - pos,
142 " %-30s %10u %10u %10u %10u\n",
143 "fina_sync_err_cnt:",
144 le32_to_cpu(ofdm->fina_sync_err_cnt),
145 accum_ofdm->fina_sync_err_cnt,
146 delta_ofdm->fina_sync_err_cnt,
147 max_ofdm->fina_sync_err_cnt);
148 pos += scnprintf(buf + pos, bufsz - pos,
149 " %-30s %10u %10u %10u %10u\n",
150 "sfd_timeout:",
151 le32_to_cpu(ofdm->sfd_timeout),
152 accum_ofdm->sfd_timeout,
153 delta_ofdm->sfd_timeout,
154 max_ofdm->sfd_timeout);
155 pos += scnprintf(buf + pos, bufsz - pos,
156 " %-30s %10u %10u %10u %10u\n",
157 "fina_timeout:",
158 le32_to_cpu(ofdm->fina_timeout),
159 accum_ofdm->fina_timeout,
160 delta_ofdm->fina_timeout,
161 max_ofdm->fina_timeout);
162 pos += scnprintf(buf + pos, bufsz - pos,
163 " %-30s %10u %10u %10u %10u\n",
164 "unresponded_rts:",
165 le32_to_cpu(ofdm->unresponded_rts),
166 accum_ofdm->unresponded_rts,
167 delta_ofdm->unresponded_rts,
168 max_ofdm->unresponded_rts);
169 pos += scnprintf(buf + pos, bufsz - pos,
170 " %-30s %10u %10u %10u %10u\n",
171 "rxe_frame_lmt_ovrun:",
172 le32_to_cpu(ofdm->rxe_frame_limit_overrun),
173 accum_ofdm->rxe_frame_limit_overrun,
174 delta_ofdm->rxe_frame_limit_overrun,
175 max_ofdm->rxe_frame_limit_overrun);
176 pos += scnprintf(buf + pos, bufsz - pos,
177 " %-30s %10u %10u %10u %10u\n",
178 "sent_ack_cnt:",
179 le32_to_cpu(ofdm->sent_ack_cnt),
180 accum_ofdm->sent_ack_cnt,
181 delta_ofdm->sent_ack_cnt,
182 max_ofdm->sent_ack_cnt);
183 pos += scnprintf(buf + pos, bufsz - pos,
184 " %-30s %10u %10u %10u %10u\n",
185 "sent_cts_cnt:",
186 le32_to_cpu(ofdm->sent_cts_cnt),
187 accum_ofdm->sent_cts_cnt,
188 delta_ofdm->sent_cts_cnt, max_ofdm->sent_cts_cnt);
189
190 pos += scnprintf(buf + pos, bufsz - pos, "%-32s current"
191 "acumulative delta max\n",
192 "Statistics_Rx - CCK:");
193 pos += scnprintf(buf + pos, bufsz - pos,
194 " %-30s %10u %10u %10u %10u\n",
195 "ina_cnt:",
196 le32_to_cpu(cck->ina_cnt), accum_cck->ina_cnt,
197 delta_cck->ina_cnt, max_cck->ina_cnt);
198 pos += scnprintf(buf + pos, bufsz - pos,
199 " %-30s %10u %10u %10u %10u\n",
200 "fina_cnt:",
201 le32_to_cpu(cck->fina_cnt), accum_cck->fina_cnt,
202 delta_cck->fina_cnt, max_cck->fina_cnt);
203 pos += scnprintf(buf + pos, bufsz - pos,
204 " %-30s %10u %10u %10u %10u\n",
205 "plcp_err:",
206 le32_to_cpu(cck->plcp_err), accum_cck->plcp_err,
207 delta_cck->plcp_err, max_cck->plcp_err);
208 pos += scnprintf(buf + pos, bufsz - pos,
209 " %-30s %10u %10u %10u %10u\n",
210 "crc32_err:",
211 le32_to_cpu(cck->crc32_err), accum_cck->crc32_err,
212 delta_cck->crc32_err, max_cck->crc32_err);
213 pos += scnprintf(buf + pos, bufsz - pos,
214 " %-30s %10u %10u %10u %10u\n",
215 "overrun_err:",
216 le32_to_cpu(cck->overrun_err),
217 accum_cck->overrun_err,
218 delta_cck->overrun_err, max_cck->overrun_err);
219 pos += scnprintf(buf + pos, bufsz - pos,
220 " %-30s %10u %10u %10u %10u\n",
221 "early_overrun_err:",
222 le32_to_cpu(cck->early_overrun_err),
223 accum_cck->early_overrun_err,
224 delta_cck->early_overrun_err,
225 max_cck->early_overrun_err);
226 pos += scnprintf(buf + pos, bufsz - pos,
227 " %-30s %10u %10u %10u %10u\n",
228 "crc32_good:",
229 le32_to_cpu(cck->crc32_good), accum_cck->crc32_good,
230 delta_cck->crc32_good,
231 max_cck->crc32_good);
232 pos += scnprintf(buf + pos, bufsz - pos,
233 " %-30s %10u %10u %10u %10u\n",
234 "false_alarm_cnt:",
235 le32_to_cpu(cck->false_alarm_cnt),
236 accum_cck->false_alarm_cnt,
237 delta_cck->false_alarm_cnt, max_cck->false_alarm_cnt);
238 pos += scnprintf(buf + pos, bufsz - pos,
239 " %-30s %10u %10u %10u %10u\n",
240 "fina_sync_err_cnt:",
241 le32_to_cpu(cck->fina_sync_err_cnt),
242 accum_cck->fina_sync_err_cnt,
243 delta_cck->fina_sync_err_cnt,
244 max_cck->fina_sync_err_cnt);
245 pos += scnprintf(buf + pos, bufsz - pos,
246 " %-30s %10u %10u %10u %10u\n",
247 "sfd_timeout:",
248 le32_to_cpu(cck->sfd_timeout),
249 accum_cck->sfd_timeout,
250 delta_cck->sfd_timeout, max_cck->sfd_timeout);
251 pos += scnprintf(buf + pos, bufsz - pos,
252 " %-30s %10u %10u %10u %10u\n",
253 "fina_timeout:",
254 le32_to_cpu(cck->fina_timeout),
255 accum_cck->fina_timeout,
256 delta_cck->fina_timeout, max_cck->fina_timeout);
257 pos += scnprintf(buf + pos, bufsz - pos,
258 " %-30s %10u %10u %10u %10u\n",
259 "unresponded_rts:",
260 le32_to_cpu(cck->unresponded_rts),
261 accum_cck->unresponded_rts,
262 delta_cck->unresponded_rts,
263 max_cck->unresponded_rts);
264 pos += scnprintf(buf + pos, bufsz - pos,
265 " %-30s %10u %10u %10u %10u\n",
266 "rxe_frame_lmt_ovrun:",
267 le32_to_cpu(cck->rxe_frame_limit_overrun),
268 accum_cck->rxe_frame_limit_overrun,
269 delta_cck->rxe_frame_limit_overrun,
270 max_cck->rxe_frame_limit_overrun);
271 pos += scnprintf(buf + pos, bufsz - pos,
272 " %-30s %10u %10u %10u %10u\n",
273 "sent_ack_cnt:",
274 le32_to_cpu(cck->sent_ack_cnt),
275 accum_cck->sent_ack_cnt,
276 delta_cck->sent_ack_cnt,
277 max_cck->sent_ack_cnt);
278 pos += scnprintf(buf + pos, bufsz - pos,
279 " %-30s %10u %10u %10u %10u\n",
280 "sent_cts_cnt:",
281 le32_to_cpu(cck->sent_cts_cnt),
282 accum_cck->sent_cts_cnt,
283 delta_cck->sent_cts_cnt,
284 max_cck->sent_cts_cnt);
285
286 pos += scnprintf(buf + pos, bufsz - pos, "%-32s current"
287 "acumulative delta max\n",
288 "Statistics_Rx - GENERAL:");
289 pos += scnprintf(buf + pos, bufsz - pos,
290 " %-30s %10u %10u %10u %10u\n",
291 "bogus_cts:",
292 le32_to_cpu(general->bogus_cts),
293 accum_general->bogus_cts,
294 delta_general->bogus_cts, max_general->bogus_cts);
295 pos += scnprintf(buf + pos, bufsz - pos,
296 " %-30s %10u %10u %10u %10u\n",
297 "bogus_ack:",
298 le32_to_cpu(general->bogus_ack),
299 accum_general->bogus_ack,
300 delta_general->bogus_ack, max_general->bogus_ack);
301 pos += scnprintf(buf + pos, bufsz - pos,
302 " %-30s %10u %10u %10u %10u\n",
303 "non_bssid_frames:",
304 le32_to_cpu(general->non_bssid_frames),
305 accum_general->non_bssid_frames,
306 delta_general->non_bssid_frames,
307 max_general->non_bssid_frames);
308 pos += scnprintf(buf + pos, bufsz - pos,
309 " %-30s %10u %10u %10u %10u\n",
310 "filtered_frames:",
311 le32_to_cpu(general->filtered_frames),
312 accum_general->filtered_frames,
313 delta_general->filtered_frames,
314 max_general->filtered_frames);
315 pos += scnprintf(buf + pos, bufsz - pos,
316 " %-30s %10u %10u %10u %10u\n",
317 "non_channel_beacons:",
318 le32_to_cpu(general->non_channel_beacons),
319 accum_general->non_channel_beacons,
320 delta_general->non_channel_beacons,
321 max_general->non_channel_beacons);
322
323 ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
324 kfree(buf);
325 return ret;
326}
327
328ssize_t iwl3945_ucode_tx_stats_read(struct file *file,
329 char __user *user_buf,
330 size_t count, loff_t *ppos)
331{
332 struct iwl_priv *priv = file->private_data;
333 int pos = 0;
334 char *buf;
335 int bufsz = (sizeof(struct iwl39_statistics_tx) * 48) + 250;
336 ssize_t ret;
337 struct iwl39_statistics_tx *tx, *accum_tx, *delta_tx, *max_tx;
338
339 if (!iwl_legacy_is_alive(priv))
340 return -EAGAIN;
341
342 buf = kzalloc(bufsz, GFP_KERNEL);
343 if (!buf) {
344 IWL_ERR(priv, "Can not allocate Buffer\n");
345 return -ENOMEM;
346 }
347
348 /*
349 * The statistic information display here is based on
350 * the last statistics notification from uCode
351 * might not reflect the current uCode activity
352 */
353 tx = &priv->_3945.statistics.tx;
354 accum_tx = &priv->_3945.accum_statistics.tx;
355 delta_tx = &priv->_3945.delta_statistics.tx;
356 max_tx = &priv->_3945.max_delta.tx;
357 pos += iwl3945_statistics_flag(priv, buf, bufsz);
358 pos += scnprintf(buf + pos, bufsz - pos, "%-32s current"
359 "acumulative delta max\n",
360 "Statistics_Tx:");
361 pos += scnprintf(buf + pos, bufsz - pos,
362 " %-30s %10u %10u %10u %10u\n",
363 "preamble:",
364 le32_to_cpu(tx->preamble_cnt),
365 accum_tx->preamble_cnt,
366 delta_tx->preamble_cnt, max_tx->preamble_cnt);
367 pos += scnprintf(buf + pos, bufsz - pos,
368 " %-30s %10u %10u %10u %10u\n",
369 "rx_detected_cnt:",
370 le32_to_cpu(tx->rx_detected_cnt),
371 accum_tx->rx_detected_cnt,
372 delta_tx->rx_detected_cnt, max_tx->rx_detected_cnt);
373 pos += scnprintf(buf + pos, bufsz - pos,
374 " %-30s %10u %10u %10u %10u\n",
375 "bt_prio_defer_cnt:",
376 le32_to_cpu(tx->bt_prio_defer_cnt),
377 accum_tx->bt_prio_defer_cnt,
378 delta_tx->bt_prio_defer_cnt,
379 max_tx->bt_prio_defer_cnt);
380 pos += scnprintf(buf + pos, bufsz - pos,
381 " %-30s %10u %10u %10u %10u\n",
382 "bt_prio_kill_cnt:",
383 le32_to_cpu(tx->bt_prio_kill_cnt),
384 accum_tx->bt_prio_kill_cnt,
385 delta_tx->bt_prio_kill_cnt,
386 max_tx->bt_prio_kill_cnt);
387 pos += scnprintf(buf + pos, bufsz - pos,
388 " %-30s %10u %10u %10u %10u\n",
389 "few_bytes_cnt:",
390 le32_to_cpu(tx->few_bytes_cnt),
391 accum_tx->few_bytes_cnt,
392 delta_tx->few_bytes_cnt, max_tx->few_bytes_cnt);
393 pos += scnprintf(buf + pos, bufsz - pos,
394 " %-30s %10u %10u %10u %10u\n",
395 "cts_timeout:",
396 le32_to_cpu(tx->cts_timeout), accum_tx->cts_timeout,
397 delta_tx->cts_timeout, max_tx->cts_timeout);
398 pos += scnprintf(buf + pos, bufsz - pos,
399 " %-30s %10u %10u %10u %10u\n",
400 "ack_timeout:",
401 le32_to_cpu(tx->ack_timeout),
402 accum_tx->ack_timeout,
403 delta_tx->ack_timeout, max_tx->ack_timeout);
404 pos += scnprintf(buf + pos, bufsz - pos,
405 " %-30s %10u %10u %10u %10u\n",
406 "expected_ack_cnt:",
407 le32_to_cpu(tx->expected_ack_cnt),
408 accum_tx->expected_ack_cnt,
409 delta_tx->expected_ack_cnt,
410 max_tx->expected_ack_cnt);
411 pos += scnprintf(buf + pos, bufsz - pos,
412 " %-30s %10u %10u %10u %10u\n",
413 "actual_ack_cnt:",
414 le32_to_cpu(tx->actual_ack_cnt),
415 accum_tx->actual_ack_cnt,
416 delta_tx->actual_ack_cnt,
417 max_tx->actual_ack_cnt);
418
419 ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
420 kfree(buf);
421 return ret;
422}
423
424ssize_t iwl3945_ucode_general_stats_read(struct file *file,
425 char __user *user_buf,
426 size_t count, loff_t *ppos)
427{
428 struct iwl_priv *priv = file->private_data;
429 int pos = 0;
430 char *buf;
431 int bufsz = sizeof(struct iwl39_statistics_general) * 10 + 300;
432 ssize_t ret;
433 struct iwl39_statistics_general *general, *accum_general;
434 struct iwl39_statistics_general *delta_general, *max_general;
435 struct statistics_dbg *dbg, *accum_dbg, *delta_dbg, *max_dbg;
436 struct iwl39_statistics_div *div, *accum_div, *delta_div, *max_div;
437
438 if (!iwl_legacy_is_alive(priv))
439 return -EAGAIN;
440
441 buf = kzalloc(bufsz, GFP_KERNEL);
442 if (!buf) {
443 IWL_ERR(priv, "Can not allocate Buffer\n");
444 return -ENOMEM;
445 }
446
447 /*
448 * The statistic information display here is based on
449 * the last statistics notification from uCode
450 * might not reflect the current uCode activity
451 */
452 general = &priv->_3945.statistics.general;
453 dbg = &priv->_3945.statistics.general.dbg;
454 div = &priv->_3945.statistics.general.div;
455 accum_general = &priv->_3945.accum_statistics.general;
456 delta_general = &priv->_3945.delta_statistics.general;
457 max_general = &priv->_3945.max_delta.general;
458 accum_dbg = &priv->_3945.accum_statistics.general.dbg;
459 delta_dbg = &priv->_3945.delta_statistics.general.dbg;
460 max_dbg = &priv->_3945.max_delta.general.dbg;
461 accum_div = &priv->_3945.accum_statistics.general.div;
462 delta_div = &priv->_3945.delta_statistics.general.div;
463 max_div = &priv->_3945.max_delta.general.div;
464 pos += iwl3945_statistics_flag(priv, buf, bufsz);
465 pos += scnprintf(buf + pos, bufsz - pos, "%-32s current"
466 "acumulative delta max\n",
467 "Statistics_General:");
468 pos += scnprintf(buf + pos, bufsz - pos,
469 " %-30s %10u %10u %10u %10u\n",
470 "burst_check:",
471 le32_to_cpu(dbg->burst_check),
472 accum_dbg->burst_check,
473 delta_dbg->burst_check, max_dbg->burst_check);
474 pos += scnprintf(buf + pos, bufsz - pos,
475 " %-30s %10u %10u %10u %10u\n",
476 "burst_count:",
477 le32_to_cpu(dbg->burst_count),
478 accum_dbg->burst_count,
479 delta_dbg->burst_count, max_dbg->burst_count);
480 pos += scnprintf(buf + pos, bufsz - pos,
481 " %-30s %10u %10u %10u %10u\n",
482 "sleep_time:",
483 le32_to_cpu(general->sleep_time),
484 accum_general->sleep_time,
485 delta_general->sleep_time, max_general->sleep_time);
486 pos += scnprintf(buf + pos, bufsz - pos,
487 " %-30s %10u %10u %10u %10u\n",
488 "slots_out:",
489 le32_to_cpu(general->slots_out),
490 accum_general->slots_out,
491 delta_general->slots_out, max_general->slots_out);
492 pos += scnprintf(buf + pos, bufsz - pos,
493 " %-30s %10u %10u %10u %10u\n",
494 "slots_idle:",
495 le32_to_cpu(general->slots_idle),
496 accum_general->slots_idle,
497 delta_general->slots_idle, max_general->slots_idle);
498 pos += scnprintf(buf + pos, bufsz - pos, "ttl_timestamp:\t\t\t%u\n",
499 le32_to_cpu(general->ttl_timestamp));
500 pos += scnprintf(buf + pos, bufsz - pos,
501 " %-30s %10u %10u %10u %10u\n",
502 "tx_on_a:",
503 le32_to_cpu(div->tx_on_a), accum_div->tx_on_a,
504 delta_div->tx_on_a, max_div->tx_on_a);
505 pos += scnprintf(buf + pos, bufsz - pos,
506 " %-30s %10u %10u %10u %10u\n",
507 "tx_on_b:",
508 le32_to_cpu(div->tx_on_b), accum_div->tx_on_b,
509 delta_div->tx_on_b, max_div->tx_on_b);
510 pos += scnprintf(buf + pos, bufsz - pos,
511 " %-30s %10u %10u %10u %10u\n",
512 "exec_time:",
513 le32_to_cpu(div->exec_time), accum_div->exec_time,
514 delta_div->exec_time, max_div->exec_time);
515 pos += scnprintf(buf + pos, bufsz - pos,
516 " %-30s %10u %10u %10u %10u\n",
517 "probe_time:",
518 le32_to_cpu(div->probe_time), accum_div->probe_time,
519 delta_div->probe_time, max_div->probe_time);
520 ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
521 kfree(buf);
522 return ret;
523}
diff --git a/drivers/net/wireless/iwlegacy/iwl-3945-debugfs.h b/drivers/net/wireless/iwlegacy/iwl-3945-debugfs.h
new file mode 100644
index 00000000000..8fef4b32b44
--- /dev/null
+++ b/drivers/net/wireless/iwlegacy/iwl-3945-debugfs.h
@@ -0,0 +1,60 @@
1/******************************************************************************
2 *
3 * GPL LICENSE SUMMARY
4 *
5 * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of version 2 of the GNU General Public License as
9 * published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful, but
12 * WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
19 * USA
20 *
21 * The full GNU General Public License is included in this distribution
22 * in the file called LICENSE.GPL.
23 *
24 * Contact Information:
25 * Intel Linux Wireless <ilw@linux.intel.com>
26 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
27 *****************************************************************************/
28
29#include "iwl-dev.h"
30#include "iwl-core.h"
31#include "iwl-debug.h"
32
33#ifdef CONFIG_IWLWIFI_LEGACY_DEBUGFS
34ssize_t iwl3945_ucode_rx_stats_read(struct file *file, char __user *user_buf,
35 size_t count, loff_t *ppos);
36ssize_t iwl3945_ucode_tx_stats_read(struct file *file, char __user *user_buf,
37 size_t count, loff_t *ppos);
38ssize_t iwl3945_ucode_general_stats_read(struct file *file,
39 char __user *user_buf, size_t count,
40 loff_t *ppos);
41#else
42static ssize_t iwl3945_ucode_rx_stats_read(struct file *file,
43 char __user *user_buf, size_t count,
44 loff_t *ppos)
45{
46 return 0;
47}
48static ssize_t iwl3945_ucode_tx_stats_read(struct file *file,
49 char __user *user_buf, size_t count,
50 loff_t *ppos)
51{
52 return 0;
53}
54static ssize_t iwl3945_ucode_general_stats_read(struct file *file,
55 char __user *user_buf,
56 size_t count, loff_t *ppos)
57{
58 return 0;
59}
60#endif
diff --git a/drivers/net/wireless/iwlegacy/iwl-3945-fh.h b/drivers/net/wireless/iwlegacy/iwl-3945-fh.h
new file mode 100644
index 00000000000..836c9919f82
--- /dev/null
+++ b/drivers/net/wireless/iwlegacy/iwl-3945-fh.h
@@ -0,0 +1,187 @@
1/******************************************************************************
2 *
3 * This file is provided under a dual BSD/GPLv2 license. When using or
4 * redistributing this file, you may do so under either license.
5 *
6 * GPL LICENSE SUMMARY
7 *
8 * Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved.
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of version 2 of the GNU General Public License as
12 * published by the Free Software Foundation.
13 *
14 * This program is distributed in the hope that it will be useful, but
15 * WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
22 * USA
23 *
24 * The full GNU General Public License is included in this distribution
25 * in the file called LICENSE.GPL.
26 *
27 * Contact Information:
28 * Intel Linux Wireless <ilw@linux.intel.com>
29 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
30 *
31 * BSD LICENSE
32 *
33 * Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved.
34 * All rights reserved.
35 *
36 * Redistribution and use in source and binary forms, with or without
37 * modification, are permitted provided that the following conditions
38 * are met:
39 *
40 * * Redistributions of source code must retain the above copyright
41 * notice, this list of conditions and the following disclaimer.
42 * * Redistributions in binary form must reproduce the above copyright
43 * notice, this list of conditions and the following disclaimer in
44 * the documentation and/or other materials provided with the
45 * distribution.
46 * * Neither the name Intel Corporation nor the names of its
47 * contributors may be used to endorse or promote products derived
48 * from this software without specific prior written permission.
49 *
50 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
51 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
52 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
53 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
54 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
55 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
56 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
57 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
58 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
59 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
60 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
61 *
62 *****************************************************************************/
63#ifndef __iwl_3945_fh_h__
64#define __iwl_3945_fh_h__
65
66/************************************/
67/* iwl3945 Flow Handler Definitions */
68/************************************/
69
70/**
71 * This I/O area is directly read/writable by driver (e.g. Linux uses writel())
72 * Addresses are offsets from device's PCI hardware base address.
73 */
74#define FH39_MEM_LOWER_BOUND (0x0800)
75#define FH39_MEM_UPPER_BOUND (0x1000)
76
77#define FH39_CBCC_TABLE (FH39_MEM_LOWER_BOUND + 0x140)
78#define FH39_TFDB_TABLE (FH39_MEM_LOWER_BOUND + 0x180)
79#define FH39_RCSR_TABLE (FH39_MEM_LOWER_BOUND + 0x400)
80#define FH39_RSSR_TABLE (FH39_MEM_LOWER_BOUND + 0x4c0)
81#define FH39_TCSR_TABLE (FH39_MEM_LOWER_BOUND + 0x500)
82#define FH39_TSSR_TABLE (FH39_MEM_LOWER_BOUND + 0x680)
83
84/* TFDB (Transmit Frame Buffer Descriptor) */
85#define FH39_TFDB(_ch, buf) (FH39_TFDB_TABLE + \
86 ((_ch) * 2 + (buf)) * 0x28)
87#define FH39_TFDB_CHNL_BUF_CTRL_REG(_ch) (FH39_TFDB_TABLE + 0x50 * (_ch))
88
89/* CBCC channel is [0,2] */
90#define FH39_CBCC(_ch) (FH39_CBCC_TABLE + (_ch) * 0x8)
91#define FH39_CBCC_CTRL(_ch) (FH39_CBCC(_ch) + 0x00)
92#define FH39_CBCC_BASE(_ch) (FH39_CBCC(_ch) + 0x04)
93
94/* RCSR channel is [0,2] */
95#define FH39_RCSR(_ch) (FH39_RCSR_TABLE + (_ch) * 0x40)
96#define FH39_RCSR_CONFIG(_ch) (FH39_RCSR(_ch) + 0x00)
97#define FH39_RCSR_RBD_BASE(_ch) (FH39_RCSR(_ch) + 0x04)
98#define FH39_RCSR_WPTR(_ch) (FH39_RCSR(_ch) + 0x20)
99#define FH39_RCSR_RPTR_ADDR(_ch) (FH39_RCSR(_ch) + 0x24)
100
101#define FH39_RSCSR_CHNL0_WPTR (FH39_RCSR_WPTR(0))
102
103/* RSSR */
104#define FH39_RSSR_CTRL (FH39_RSSR_TABLE + 0x000)
105#define FH39_RSSR_STATUS (FH39_RSSR_TABLE + 0x004)
106
107/* TCSR */
108#define FH39_TCSR(_ch) (FH39_TCSR_TABLE + (_ch) * 0x20)
109#define FH39_TCSR_CONFIG(_ch) (FH39_TCSR(_ch) + 0x00)
110#define FH39_TCSR_CREDIT(_ch) (FH39_TCSR(_ch) + 0x04)
111#define FH39_TCSR_BUFF_STTS(_ch) (FH39_TCSR(_ch) + 0x08)
112
113/* TSSR */
114#define FH39_TSSR_CBB_BASE (FH39_TSSR_TABLE + 0x000)
115#define FH39_TSSR_MSG_CONFIG (FH39_TSSR_TABLE + 0x008)
116#define FH39_TSSR_TX_STATUS (FH39_TSSR_TABLE + 0x010)
117
118
119/* DBM */
120
121#define FH39_SRVC_CHNL (6)
122
123#define FH39_RCSR_RX_CONFIG_REG_POS_RBDC_SIZE (20)
124#define FH39_RCSR_RX_CONFIG_REG_POS_IRQ_RBTH (4)
125
126#define FH39_RCSR_RX_CONFIG_REG_BIT_WR_STTS_EN (0x08000000)
127
128#define FH39_RCSR_RX_CONFIG_REG_VAL_DMA_CHNL_EN_ENABLE (0x80000000)
129
130#define FH39_RCSR_RX_CONFIG_REG_VAL_RDRBD_EN_ENABLE (0x20000000)
131
132#define FH39_RCSR_RX_CONFIG_REG_VAL_MAX_FRAG_SIZE_128 (0x01000000)
133
134#define FH39_RCSR_RX_CONFIG_REG_VAL_IRQ_DEST_INT_HOST (0x00001000)
135
136#define FH39_RCSR_RX_CONFIG_REG_VAL_MSG_MODE_FH (0x00000000)
137
138#define FH39_TCSR_TX_CONFIG_REG_VAL_MSG_MODE_TXF (0x00000000)
139#define FH39_TCSR_TX_CONFIG_REG_VAL_MSG_MODE_DRIVER (0x00000001)
140
141#define FH39_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_DISABLE_VAL (0x00000000)
142#define FH39_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE_VAL (0x00000008)
143
144#define FH39_TCSR_TX_CONFIG_REG_VAL_CIRQ_HOST_IFTFD (0x00200000)
145
146#define FH39_TCSR_TX_CONFIG_REG_VAL_CIRQ_RTC_NOINT (0x00000000)
147
148#define FH39_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_PAUSE (0x00000000)
149#define FH39_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE (0x80000000)
150
151#define FH39_TCSR_CHNL_TX_BUF_STS_REG_VAL_TFDB_VALID (0x00004000)
152
153#define FH39_TCSR_CHNL_TX_BUF_STS_REG_BIT_TFDB_WPTR (0x00000001)
154
155#define FH39_TSSR_TX_MSG_CONFIG_REG_VAL_SNOOP_RD_TXPD_ON (0xFF000000)
156#define FH39_TSSR_TX_MSG_CONFIG_REG_VAL_ORDER_RD_TXPD_ON (0x00FF0000)
157
158#define FH39_TSSR_TX_MSG_CONFIG_REG_VAL_MAX_FRAG_SIZE_128B (0x00000400)
159
160#define FH39_TSSR_TX_MSG_CONFIG_REG_VAL_SNOOP_RD_TFD_ON (0x00000100)
161#define FH39_TSSR_TX_MSG_CONFIG_REG_VAL_ORDER_RD_CBB_ON (0x00000080)
162
163#define FH39_TSSR_TX_MSG_CONFIG_REG_VAL_ORDER_RSP_WAIT_TH (0x00000020)
164#define FH39_TSSR_TX_MSG_CONFIG_REG_VAL_RSP_WAIT_TH (0x00000005)
165
166#define FH39_TSSR_TX_STATUS_REG_BIT_BUFS_EMPTY(_ch) (BIT(_ch) << 24)
167#define FH39_TSSR_TX_STATUS_REG_BIT_NO_PEND_REQ(_ch) (BIT(_ch) << 16)
168
169#define FH39_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(_ch) \
170 (FH39_TSSR_TX_STATUS_REG_BIT_BUFS_EMPTY(_ch) | \
171 FH39_TSSR_TX_STATUS_REG_BIT_NO_PEND_REQ(_ch))
172
173#define FH39_RSSR_CHNL0_RX_STATUS_CHNL_IDLE (0x01000000)
174
175struct iwl3945_tfd_tb {
176 __le32 addr;
177 __le32 len;
178} __packed;
179
180struct iwl3945_tfd {
181 __le32 control_flags;
182 struct iwl3945_tfd_tb tbs[4];
183 u8 __pad[28];
184} __packed;
185
186
187#endif /* __iwl_3945_fh_h__ */
diff --git a/drivers/net/wireless/iwlegacy/iwl-3945-hw.h b/drivers/net/wireless/iwlegacy/iwl-3945-hw.h
new file mode 100644
index 00000000000..5c3a68d3af1
--- /dev/null
+++ b/drivers/net/wireless/iwlegacy/iwl-3945-hw.h
@@ -0,0 +1,291 @@
1/******************************************************************************
2 *
3 * This file is provided under a dual BSD/GPLv2 license. When using or
4 * redistributing this file, you may do so under either license.
5 *
6 * GPL LICENSE SUMMARY
7 *
8 * Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved.
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of version 2 of the GNU General Public License as
12 * published by the Free Software Foundation.
13 *
14 * This program is distributed in the hope that it will be useful, but
15 * WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
22 * USA
23 *
24 * The full GNU General Public License is included in this distribution
25 * in the file called LICENSE.GPL.
26 *
27 * Contact Information:
28 * Intel Linux Wireless <ilw@linux.intel.com>
29 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
30 *
31 * BSD LICENSE
32 *
33 * Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved.
34 * All rights reserved.
35 *
36 * Redistribution and use in source and binary forms, with or without
37 * modification, are permitted provided that the following conditions
38 * are met:
39 *
40 * * Redistributions of source code must retain the above copyright
41 * notice, this list of conditions and the following disclaimer.
42 * * Redistributions in binary form must reproduce the above copyright
43 * notice, this list of conditions and the following disclaimer in
44 * the documentation and/or other materials provided with the
45 * distribution.
46 * * Neither the name Intel Corporation nor the names of its
47 * contributors may be used to endorse or promote products derived
48 * from this software without specific prior written permission.
49 *
50 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
51 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
52 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
53 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
54 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
55 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
56 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
57 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
58 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
59 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
60 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
61 *
62 *****************************************************************************/
63/*
64 * Please use this file (iwl-3945-hw.h) only for hardware-related definitions.
65 * Please use iwl-commands.h for uCode API definitions.
66 * Please use iwl-3945.h for driver implementation definitions.
67 */
68
69#ifndef __iwl_3945_hw__
70#define __iwl_3945_hw__
71
72#include "iwl-eeprom.h"
73
74/* RSSI to dBm */
75#define IWL39_RSSI_OFFSET 95
76
77/*
78 * EEPROM related constants, enums, and structures.
79 */
80#define EEPROM_SKU_CAP_OP_MODE_MRC (1 << 7)
81
82/*
83 * Mapping of a Tx power level, at factory calibration temperature,
84 * to a radio/DSP gain table index.
85 * One for each of 5 "sample" power levels in each band.
86 * v_det is measured at the factory, using the 3945's built-in power amplifier
87 * (PA) output voltage detector. This same detector is used during Tx of
88 * long packets in normal operation to provide feedback as to proper output
89 * level.
90 * Data copied from EEPROM.
91 * DO NOT ALTER THIS STRUCTURE!!!
92 */
93struct iwl3945_eeprom_txpower_sample {
94 u8 gain_index; /* index into power (gain) setup table ... */
95 s8 power; /* ... for this pwr level for this chnl group */
96 u16 v_det; /* PA output voltage */
97} __packed;
98
99/*
100 * Mappings of Tx power levels -> nominal radio/DSP gain table indexes.
101 * One for each channel group (a.k.a. "band") (1 for BG, 4 for A).
102 * Tx power setup code interpolates between the 5 "sample" power levels
103 * to determine the nominal setup for a requested power level.
104 * Data copied from EEPROM.
105 * DO NOT ALTER THIS STRUCTURE!!!
106 */
107struct iwl3945_eeprom_txpower_group {
108 struct iwl3945_eeprom_txpower_sample samples[5]; /* 5 power levels */
109 s32 a, b, c, d, e; /* coefficients for voltage->power
110 * formula (signed) */
111 s32 Fa, Fb, Fc, Fd, Fe; /* these modify coeffs based on
112 * frequency (signed) */
113 s8 saturation_power; /* highest power possible by h/w in this
114 * band */
115 u8 group_channel; /* "representative" channel # in this band */
116 s16 temperature; /* h/w temperature at factory calib this band
117 * (signed) */
118} __packed;
119
120/*
121 * Temperature-based Tx-power compensation data, not band-specific.
122 * These coefficients are use to modify a/b/c/d/e coeffs based on
123 * difference between current temperature and factory calib temperature.
124 * Data copied from EEPROM.
125 */
126struct iwl3945_eeprom_temperature_corr {
127 u32 Ta;
128 u32 Tb;
129 u32 Tc;
130 u32 Td;
131 u32 Te;
132} __packed;
133
134/*
135 * EEPROM map
136 */
137struct iwl3945_eeprom {
138 u8 reserved0[16];
139 u16 device_id; /* abs.ofs: 16 */
140 u8 reserved1[2];
141 u16 pmc; /* abs.ofs: 20 */
142 u8 reserved2[20];
143 u8 mac_address[6]; /* abs.ofs: 42 */
144 u8 reserved3[58];
145 u16 board_revision; /* abs.ofs: 106 */
146 u8 reserved4[11];
147 u8 board_pba_number[9]; /* abs.ofs: 119 */
148 u8 reserved5[8];
149 u16 version; /* abs.ofs: 136 */
150 u8 sku_cap; /* abs.ofs: 138 */
151 u8 leds_mode; /* abs.ofs: 139 */
152 u16 oem_mode;
153 u16 wowlan_mode; /* abs.ofs: 142 */
154 u16 leds_time_interval; /* abs.ofs: 144 */
155 u8 leds_off_time; /* abs.ofs: 146 */
156 u8 leds_on_time; /* abs.ofs: 147 */
157 u8 almgor_m_version; /* abs.ofs: 148 */
158 u8 antenna_switch_type; /* abs.ofs: 149 */
159 u8 reserved6[42];
160 u8 sku_id[4]; /* abs.ofs: 192 */
161
162/*
163 * Per-channel regulatory data.
164 *
165 * Each channel that *might* be supported by 3945 has a fixed location
166 * in EEPROM containing EEPROM_CHANNEL_* usage flags (LSB) and max regulatory
167 * txpower (MSB).
168 *
169 * Entries immediately below are for 20 MHz channel width.
170 *
171 * 2.4 GHz channels 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14
172 */
173 u16 band_1_count; /* abs.ofs: 196 */
174 struct iwl_eeprom_channel band_1_channels[14]; /* abs.ofs: 198 */
175
176/*
177 * 4.9 GHz channels 183, 184, 185, 187, 188, 189, 192, 196,
178 * 5.0 GHz channels 7, 8, 11, 12, 16
179 * (4915-5080MHz) (none of these is ever supported)
180 */
181 u16 band_2_count; /* abs.ofs: 226 */
182 struct iwl_eeprom_channel band_2_channels[13]; /* abs.ofs: 228 */
183
184/*
185 * 5.2 GHz channels 34, 36, 38, 40, 42, 44, 46, 48, 52, 56, 60, 64
186 * (5170-5320MHz)
187 */
188 u16 band_3_count; /* abs.ofs: 254 */
189 struct iwl_eeprom_channel band_3_channels[12]; /* abs.ofs: 256 */
190
191/*
192 * 5.5 GHz channels 100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140
193 * (5500-5700MHz)
194 */
195 u16 band_4_count; /* abs.ofs: 280 */
196 struct iwl_eeprom_channel band_4_channels[11]; /* abs.ofs: 282 */
197
198/*
199 * 5.7 GHz channels 145, 149, 153, 157, 161, 165
200 * (5725-5825MHz)
201 */
202 u16 band_5_count; /* abs.ofs: 304 */
203 struct iwl_eeprom_channel band_5_channels[6]; /* abs.ofs: 306 */
204
205 u8 reserved9[194];
206
207/*
208 * 3945 Txpower calibration data.
209 */
210#define IWL_NUM_TX_CALIB_GROUPS 5
211 struct iwl3945_eeprom_txpower_group groups[IWL_NUM_TX_CALIB_GROUPS];
212/* abs.ofs: 512 */
213 struct iwl3945_eeprom_temperature_corr corrections; /* abs.ofs: 832 */
214 u8 reserved16[172]; /* fill out to full 1024 byte block */
215} __packed;
216
217#define IWL3945_EEPROM_IMG_SIZE 1024
218
219/* End of EEPROM */
220
221#define PCI_CFG_REV_ID_BIT_BASIC_SKU (0x40) /* bit 6 */
222#define PCI_CFG_REV_ID_BIT_RTP (0x80) /* bit 7 */
223
224/* 4 DATA + 1 CMD. There are 2 HCCA queues that are not used. */
225#define IWL39_NUM_QUEUES 5
226#define IWL39_CMD_QUEUE_NUM 4
227
228#define IWL_DEFAULT_TX_RETRY 15
229
230/*********************************************/
231
232#define RFD_SIZE 4
233#define NUM_TFD_CHUNKS 4
234
235#define RX_QUEUE_SIZE 256
236#define RX_QUEUE_MASK 255
237#define RX_QUEUE_SIZE_LOG 8
238
239#define U32_PAD(n) ((4-(n))&0x3)
240
241#define TFD_CTL_COUNT_SET(n) (n << 24)
242#define TFD_CTL_COUNT_GET(ctl) ((ctl >> 24) & 7)
243#define TFD_CTL_PAD_SET(n) (n << 28)
244#define TFD_CTL_PAD_GET(ctl) (ctl >> 28)
245
246/* Sizes and addresses for instruction and data memory (SRAM) in
247 * 3945's embedded processor. Driver access is via HBUS_TARG_MEM_* regs. */
248#define IWL39_RTC_INST_LOWER_BOUND (0x000000)
249#define IWL39_RTC_INST_UPPER_BOUND (0x014000)
250
251#define IWL39_RTC_DATA_LOWER_BOUND (0x800000)
252#define IWL39_RTC_DATA_UPPER_BOUND (0x808000)
253
254#define IWL39_RTC_INST_SIZE (IWL39_RTC_INST_UPPER_BOUND - \
255 IWL39_RTC_INST_LOWER_BOUND)
256#define IWL39_RTC_DATA_SIZE (IWL39_RTC_DATA_UPPER_BOUND - \
257 IWL39_RTC_DATA_LOWER_BOUND)
258
259#define IWL39_MAX_INST_SIZE IWL39_RTC_INST_SIZE
260#define IWL39_MAX_DATA_SIZE IWL39_RTC_DATA_SIZE
261
262/* Size of uCode instruction memory in bootstrap state machine */
263#define IWL39_MAX_BSM_SIZE IWL39_RTC_INST_SIZE
264
265static inline int iwl3945_hw_valid_rtc_data_addr(u32 addr)
266{
267 return (addr >= IWL39_RTC_DATA_LOWER_BOUND) &&
268 (addr < IWL39_RTC_DATA_UPPER_BOUND);
269}
270
271/* Base physical address of iwl3945_shared is provided to FH_TSSR_CBB_BASE
272 * and &iwl3945_shared.rx_read_ptr[0] is provided to FH_RCSR_RPTR_ADDR(0) */
273struct iwl3945_shared {
274 __le32 tx_base_ptr[8];
275} __packed;
276
277static inline u8 iwl3945_hw_get_rate(__le16 rate_n_flags)
278{
279 return le16_to_cpu(rate_n_flags) & 0xFF;
280}
281
282static inline u16 iwl3945_hw_get_rate_n_flags(__le16 rate_n_flags)
283{
284 return le16_to_cpu(rate_n_flags);
285}
286
287static inline __le16 iwl3945_hw_set_rate_n_flags(u8 rate, u16 flags)
288{
289 return cpu_to_le16((u16)rate|flags);
290}
291#endif
diff --git a/drivers/net/wireless/iwlegacy/iwl-3945-led.c b/drivers/net/wireless/iwlegacy/iwl-3945-led.c
new file mode 100644
index 00000000000..abd923558d4
--- /dev/null
+++ b/drivers/net/wireless/iwlegacy/iwl-3945-led.c
@@ -0,0 +1,64 @@
1/******************************************************************************
2 *
3 * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of version 2 of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
17 *
18 * The full GNU General Public License is included in this distribution in the
19 * file called LICENSE.
20 *
21 * Contact Information:
22 * Intel Linux Wireless <ilw@linux.intel.com>
23 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
24 *
25 *****************************************************************************/
26
27#include <linux/kernel.h>
28#include <linux/module.h>
29#include <linux/init.h>
30#include <linux/pci.h>
31#include <linux/dma-mapping.h>
32#include <linux/delay.h>
33#include <linux/skbuff.h>
34#include <linux/netdevice.h>
35#include <linux/wireless.h>
36#include <net/mac80211.h>
37#include <linux/etherdevice.h>
38#include <asm/unaligned.h>
39
40#include "iwl-commands.h"
41#include "iwl-3945.h"
42#include "iwl-core.h"
43#include "iwl-dev.h"
44#include "iwl-3945-led.h"
45
46
47/* Send led command */
48static int iwl3945_send_led_cmd(struct iwl_priv *priv,
49 struct iwl_led_cmd *led_cmd)
50{
51 struct iwl_host_cmd cmd = {
52 .id = REPLY_LEDS_CMD,
53 .len = sizeof(struct iwl_led_cmd),
54 .data = led_cmd,
55 .flags = CMD_ASYNC,
56 .callback = NULL,
57 };
58
59 return iwl_legacy_send_cmd(priv, &cmd);
60}
61
62const struct iwl_led_ops iwl3945_led_ops = {
63 .cmd = iwl3945_send_led_cmd,
64};
diff --git a/drivers/net/wireless/iwlegacy/iwl-3945-led.h b/drivers/net/wireless/iwlegacy/iwl-3945-led.h
new file mode 100644
index 00000000000..96716276eb0
--- /dev/null
+++ b/drivers/net/wireless/iwlegacy/iwl-3945-led.h
@@ -0,0 +1,32 @@
1/******************************************************************************
2 *
3 * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of version 2 of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
17 *
18 * The full GNU General Public License is included in this distribution in the
19 * file called LICENSE.
20 *
21 * Contact Information:
22 * Intel Linux Wireless <ilw@linux.intel.com>
23 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
24 *
25 *****************************************************************************/
26
27#ifndef __iwl_3945_led_h__
28#define __iwl_3945_led_h__
29
30extern const struct iwl_led_ops iwl3945_led_ops;
31
32#endif /* __iwl_3945_led_h__ */
diff --git a/drivers/net/wireless/iwlegacy/iwl-3945-rs.c b/drivers/net/wireless/iwlegacy/iwl-3945-rs.c
new file mode 100644
index 00000000000..164bcae821f
--- /dev/null
+++ b/drivers/net/wireless/iwlegacy/iwl-3945-rs.c
@@ -0,0 +1,997 @@
1/******************************************************************************
2 *
3 * Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of version 2 of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
17 *
18 * The full GNU General Public License is included in this distribution in the
19 * file called LICENSE.
20 *
21 * Contact Information:
22 * Intel Linux Wireless <ilw@linux.intel.com>
23 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
24 *
25 *****************************************************************************/
26
27#include <linux/kernel.h>
28#include <linux/init.h>
29#include <linux/skbuff.h>
30#include <linux/slab.h>
31#include <linux/wireless.h>
32#include <net/mac80211.h>
33
34#include <linux/netdevice.h>
35#include <linux/etherdevice.h>
36#include <linux/delay.h>
37
38#include <linux/workqueue.h>
39
40#include "iwl-commands.h"
41#include "iwl-3945.h"
42#include "iwl-sta.h"
43
44#define RS_NAME "iwl-3945-rs"
45
46static s32 iwl3945_expected_tpt_g[IWL_RATE_COUNT_3945] = {
47 7, 13, 35, 58, 0, 0, 76, 104, 130, 168, 191, 202
48};
49
50static s32 iwl3945_expected_tpt_g_prot[IWL_RATE_COUNT_3945] = {
51 7, 13, 35, 58, 0, 0, 0, 80, 93, 113, 123, 125
52};
53
54static s32 iwl3945_expected_tpt_a[IWL_RATE_COUNT_3945] = {
55 0, 0, 0, 0, 40, 57, 72, 98, 121, 154, 177, 186
56};
57
58static s32 iwl3945_expected_tpt_b[IWL_RATE_COUNT_3945] = {
59 7, 13, 35, 58, 0, 0, 0, 0, 0, 0, 0, 0
60};
61
62struct iwl3945_tpt_entry {
63 s8 min_rssi;
64 u8 index;
65};
66
67static struct iwl3945_tpt_entry iwl3945_tpt_table_a[] = {
68 {-60, IWL_RATE_54M_INDEX},
69 {-64, IWL_RATE_48M_INDEX},
70 {-72, IWL_RATE_36M_INDEX},
71 {-80, IWL_RATE_24M_INDEX},
72 {-84, IWL_RATE_18M_INDEX},
73 {-85, IWL_RATE_12M_INDEX},
74 {-87, IWL_RATE_9M_INDEX},
75 {-89, IWL_RATE_6M_INDEX}
76};
77
78static struct iwl3945_tpt_entry iwl3945_tpt_table_g[] = {
79 {-60, IWL_RATE_54M_INDEX},
80 {-64, IWL_RATE_48M_INDEX},
81 {-68, IWL_RATE_36M_INDEX},
82 {-80, IWL_RATE_24M_INDEX},
83 {-84, IWL_RATE_18M_INDEX},
84 {-85, IWL_RATE_12M_INDEX},
85 {-86, IWL_RATE_11M_INDEX},
86 {-88, IWL_RATE_5M_INDEX},
87 {-90, IWL_RATE_2M_INDEX},
88 {-92, IWL_RATE_1M_INDEX}
89};
90
91#define IWL_RATE_MAX_WINDOW 62
92#define IWL_RATE_FLUSH (3*HZ)
93#define IWL_RATE_WIN_FLUSH (HZ/2)
94#define IWL39_RATE_HIGH_TH 11520
95#define IWL_SUCCESS_UP_TH 8960
96#define IWL_SUCCESS_DOWN_TH 10880
97#define IWL_RATE_MIN_FAILURE_TH 6
98#define IWL_RATE_MIN_SUCCESS_TH 8
99#define IWL_RATE_DECREASE_TH 1920
100#define IWL_RATE_RETRY_TH 15
101
102static u8 iwl3945_get_rate_index_by_rssi(s32 rssi, enum ieee80211_band band)
103{
104 u32 index = 0;
105 u32 table_size = 0;
106 struct iwl3945_tpt_entry *tpt_table = NULL;
107
108 if ((rssi < IWL_MIN_RSSI_VAL) || (rssi > IWL_MAX_RSSI_VAL))
109 rssi = IWL_MIN_RSSI_VAL;
110
111 switch (band) {
112 case IEEE80211_BAND_2GHZ:
113 tpt_table = iwl3945_tpt_table_g;
114 table_size = ARRAY_SIZE(iwl3945_tpt_table_g);
115 break;
116
117 case IEEE80211_BAND_5GHZ:
118 tpt_table = iwl3945_tpt_table_a;
119 table_size = ARRAY_SIZE(iwl3945_tpt_table_a);
120 break;
121
122 default:
123 BUG();
124 break;
125 }
126
127 while ((index < table_size) && (rssi < tpt_table[index].min_rssi))
128 index++;
129
130 index = min(index, (table_size - 1));
131
132 return tpt_table[index].index;
133}
134
135static void iwl3945_clear_window(struct iwl3945_rate_scale_data *window)
136{
137 window->data = 0;
138 window->success_counter = 0;
139 window->success_ratio = -1;
140 window->counter = 0;
141 window->average_tpt = IWL_INVALID_VALUE;
142 window->stamp = 0;
143}
144
145/**
146 * iwl3945_rate_scale_flush_windows - flush out the rate scale windows
147 *
148 * Returns the number of windows that have gathered data but were
149 * not flushed. If there were any that were not flushed, then
150 * reschedule the rate flushing routine.
151 */
152static int iwl3945_rate_scale_flush_windows(struct iwl3945_rs_sta *rs_sta)
153{
154 int unflushed = 0;
155 int i;
156 unsigned long flags;
157 struct iwl_priv *priv __maybe_unused = rs_sta->priv;
158
159 /*
160 * For each rate, if we have collected data on that rate
161 * and it has been more than IWL_RATE_WIN_FLUSH
162 * since we flushed, clear out the gathered statistics
163 */
164 for (i = 0; i < IWL_RATE_COUNT_3945; i++) {
165 if (!rs_sta->win[i].counter)
166 continue;
167
168 spin_lock_irqsave(&rs_sta->lock, flags);
169 if (time_after(jiffies, rs_sta->win[i].stamp +
170 IWL_RATE_WIN_FLUSH)) {
171 IWL_DEBUG_RATE(priv, "flushing %d samples of rate "
172 "index %d\n",
173 rs_sta->win[i].counter, i);
174 iwl3945_clear_window(&rs_sta->win[i]);
175 } else
176 unflushed++;
177 spin_unlock_irqrestore(&rs_sta->lock, flags);
178 }
179
180 return unflushed;
181}
182
183#define IWL_RATE_FLUSH_MAX 5000 /* msec */
184#define IWL_RATE_FLUSH_MIN 50 /* msec */
185#define IWL_AVERAGE_PACKETS 1500
186
187static void iwl3945_bg_rate_scale_flush(unsigned long data)
188{
189 struct iwl3945_rs_sta *rs_sta = (void *)data;
190 struct iwl_priv *priv __maybe_unused = rs_sta->priv;
191 int unflushed = 0;
192 unsigned long flags;
193 u32 packet_count, duration, pps;
194
195 IWL_DEBUG_RATE(priv, "enter\n");
196
197 unflushed = iwl3945_rate_scale_flush_windows(rs_sta);
198
199 spin_lock_irqsave(&rs_sta->lock, flags);
200
201 /* Number of packets Rx'd since last time this timer ran */
202 packet_count = (rs_sta->tx_packets - rs_sta->last_tx_packets) + 1;
203
204 rs_sta->last_tx_packets = rs_sta->tx_packets + 1;
205
206 if (unflushed) {
207 duration =
208 jiffies_to_msecs(jiffies - rs_sta->last_partial_flush);
209
210 IWL_DEBUG_RATE(priv, "Tx'd %d packets in %dms\n",
211 packet_count, duration);
212
213 /* Determine packets per second */
214 if (duration)
215 pps = (packet_count * 1000) / duration;
216 else
217 pps = 0;
218
219 if (pps) {
220 duration = (IWL_AVERAGE_PACKETS * 1000) / pps;
221 if (duration < IWL_RATE_FLUSH_MIN)
222 duration = IWL_RATE_FLUSH_MIN;
223 else if (duration > IWL_RATE_FLUSH_MAX)
224 duration = IWL_RATE_FLUSH_MAX;
225 } else
226 duration = IWL_RATE_FLUSH_MAX;
227
228 rs_sta->flush_time = msecs_to_jiffies(duration);
229
230 IWL_DEBUG_RATE(priv, "new flush period: %d msec ave %d\n",
231 duration, packet_count);
232
233 mod_timer(&rs_sta->rate_scale_flush, jiffies +
234 rs_sta->flush_time);
235
236 rs_sta->last_partial_flush = jiffies;
237 } else {
238 rs_sta->flush_time = IWL_RATE_FLUSH;
239 rs_sta->flush_pending = 0;
240 }
241 /* If there weren't any unflushed entries, we don't schedule the timer
242 * to run again */
243
244 rs_sta->last_flush = jiffies;
245
246 spin_unlock_irqrestore(&rs_sta->lock, flags);
247
248 IWL_DEBUG_RATE(priv, "leave\n");
249}
250
251/**
252 * iwl3945_collect_tx_data - Update the success/failure sliding window
253 *
254 * We keep a sliding window of the last 64 packets transmitted
255 * at this rate. window->data contains the bitmask of successful
256 * packets.
257 */
258static void iwl3945_collect_tx_data(struct iwl3945_rs_sta *rs_sta,
259 struct iwl3945_rate_scale_data *window,
260 int success, int retries, int index)
261{
262 unsigned long flags;
263 s32 fail_count;
264 struct iwl_priv *priv __maybe_unused = rs_sta->priv;
265
266 if (!retries) {
267 IWL_DEBUG_RATE(priv, "leave: retries == 0 -- should be at least 1\n");
268 return;
269 }
270
271 spin_lock_irqsave(&rs_sta->lock, flags);
272
273 /*
274 * Keep track of only the latest 62 tx frame attempts in this rate's
275 * history window; anything older isn't really relevant any more.
276 * If we have filled up the sliding window, drop the oldest attempt;
277 * if the oldest attempt (highest bit in bitmap) shows "success",
278 * subtract "1" from the success counter (this is the main reason
279 * we keep these bitmaps!).
280 * */
281 while (retries > 0) {
282 if (window->counter >= IWL_RATE_MAX_WINDOW) {
283
284 /* remove earliest */
285 window->counter = IWL_RATE_MAX_WINDOW - 1;
286
287 if (window->data & (1ULL << (IWL_RATE_MAX_WINDOW - 1))) {
288 window->data &= ~(1ULL << (IWL_RATE_MAX_WINDOW - 1));
289 window->success_counter--;
290 }
291 }
292
293 /* Increment frames-attempted counter */
294 window->counter++;
295
296 /* Shift bitmap by one frame (throw away oldest history),
297 * OR in "1", and increment "success" if this
298 * frame was successful. */
299 window->data <<= 1;
300 if (success > 0) {
301 window->success_counter++;
302 window->data |= 0x1;
303 success--;
304 }
305
306 retries--;
307 }
308
309 /* Calculate current success ratio, avoid divide-by-0! */
310 if (window->counter > 0)
311 window->success_ratio = 128 * (100 * window->success_counter)
312 / window->counter;
313 else
314 window->success_ratio = IWL_INVALID_VALUE;
315
316 fail_count = window->counter - window->success_counter;
317
318 /* Calculate average throughput, if we have enough history. */
319 if ((fail_count >= IWL_RATE_MIN_FAILURE_TH) ||
320 (window->success_counter >= IWL_RATE_MIN_SUCCESS_TH))
321 window->average_tpt = ((window->success_ratio *
322 rs_sta->expected_tpt[index] + 64) / 128);
323 else
324 window->average_tpt = IWL_INVALID_VALUE;
325
326 /* Tag this window as having been updated */
327 window->stamp = jiffies;
328
329 spin_unlock_irqrestore(&rs_sta->lock, flags);
330
331}
332
333/*
334 * Called after adding a new station to initialize rate scaling
335 */
336void iwl3945_rs_rate_init(struct iwl_priv *priv, struct ieee80211_sta *sta, u8 sta_id)
337{
338 struct ieee80211_hw *hw = priv->hw;
339 struct ieee80211_conf *conf = &priv->hw->conf;
340 struct iwl3945_sta_priv *psta;
341 struct iwl3945_rs_sta *rs_sta;
342 struct ieee80211_supported_band *sband;
343 int i;
344
345 IWL_DEBUG_INFO(priv, "enter\n");
346 if (sta_id == priv->contexts[IWL_RXON_CTX_BSS].bcast_sta_id)
347 goto out;
348
349 psta = (struct iwl3945_sta_priv *) sta->drv_priv;
350 rs_sta = &psta->rs_sta;
351 sband = hw->wiphy->bands[conf->channel->band];
352
353 rs_sta->priv = priv;
354
355 rs_sta->start_rate = IWL_RATE_INVALID;
356
357 /* default to just 802.11b */
358 rs_sta->expected_tpt = iwl3945_expected_tpt_b;
359
360 rs_sta->last_partial_flush = jiffies;
361 rs_sta->last_flush = jiffies;
362 rs_sta->flush_time = IWL_RATE_FLUSH;
363 rs_sta->last_tx_packets = 0;
364
365 rs_sta->rate_scale_flush.data = (unsigned long)rs_sta;
366 rs_sta->rate_scale_flush.function = iwl3945_bg_rate_scale_flush;
367
368 for (i = 0; i < IWL_RATE_COUNT_3945; i++)
369 iwl3945_clear_window(&rs_sta->win[i]);
370
371 /* TODO: what is a good starting rate for STA? About middle? Maybe not
372 * the lowest or the highest rate.. Could consider using RSSI from
373 * previous packets? Need to have IEEE 802.1X auth succeed immediately
374 * after assoc.. */
375
376 for (i = sband->n_bitrates - 1; i >= 0; i--) {
377 if (sta->supp_rates[sband->band] & (1 << i)) {
378 rs_sta->last_txrate_idx = i;
379 break;
380 }
381 }
382
383 priv->_3945.sta_supp_rates = sta->supp_rates[sband->band];
384 /* For 5 GHz band it start at IWL_FIRST_OFDM_RATE */
385 if (sband->band == IEEE80211_BAND_5GHZ) {
386 rs_sta->last_txrate_idx += IWL_FIRST_OFDM_RATE;
387 priv->_3945.sta_supp_rates = priv->_3945.sta_supp_rates <<
388 IWL_FIRST_OFDM_RATE;
389 }
390
391out:
392 priv->stations[sta_id].used &= ~IWL_STA_UCODE_INPROGRESS;
393
394 IWL_DEBUG_INFO(priv, "leave\n");
395}
396
397static void *iwl3945_rs_alloc(struct ieee80211_hw *hw, struct dentry *debugfsdir)
398{
399 return hw->priv;
400}
401
402/* rate scale requires free function to be implemented */
403static void iwl3945_rs_free(void *priv)
404{
405 return;
406}
407
408static void *iwl3945_rs_alloc_sta(void *iwl_priv, struct ieee80211_sta *sta, gfp_t gfp)
409{
410 struct iwl3945_rs_sta *rs_sta;
411 struct iwl3945_sta_priv *psta = (void *) sta->drv_priv;
412 struct iwl_priv *priv __maybe_unused = iwl_priv;
413
414 IWL_DEBUG_RATE(priv, "enter\n");
415
416 rs_sta = &psta->rs_sta;
417
418 spin_lock_init(&rs_sta->lock);
419 init_timer(&rs_sta->rate_scale_flush);
420
421 IWL_DEBUG_RATE(priv, "leave\n");
422
423 return rs_sta;
424}
425
426static void iwl3945_rs_free_sta(void *iwl_priv, struct ieee80211_sta *sta,
427 void *priv_sta)
428{
429 struct iwl3945_rs_sta *rs_sta = priv_sta;
430
431 /*
432 * Be careful not to use any members of iwl3945_rs_sta (like trying
433 * to use iwl_priv to print out debugging) since it may not be fully
434 * initialized at this point.
435 */
436 del_timer_sync(&rs_sta->rate_scale_flush);
437}
438
439
440/**
441 * iwl3945_rs_tx_status - Update rate control values based on Tx results
442 *
443 * NOTE: Uses iwl_priv->retry_rate for the # of retries attempted by
444 * the hardware for each rate.
445 */
446static void iwl3945_rs_tx_status(void *priv_rate, struct ieee80211_supported_band *sband,
447 struct ieee80211_sta *sta, void *priv_sta,
448 struct sk_buff *skb)
449{
450 s8 retries = 0, current_count;
451 int scale_rate_index, first_index, last_index;
452 unsigned long flags;
453 struct iwl_priv *priv = (struct iwl_priv *)priv_rate;
454 struct iwl3945_rs_sta *rs_sta = priv_sta;
455 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
456
457 IWL_DEBUG_RATE(priv, "enter\n");
458
459 retries = info->status.rates[0].count;
460 /* Sanity Check for retries */
461 if (retries > IWL_RATE_RETRY_TH)
462 retries = IWL_RATE_RETRY_TH;
463
464 first_index = sband->bitrates[info->status.rates[0].idx].hw_value;
465 if ((first_index < 0) || (first_index >= IWL_RATE_COUNT_3945)) {
466 IWL_DEBUG_RATE(priv, "leave: Rate out of bounds: %d\n", first_index);
467 return;
468 }
469
470 if (!priv_sta) {
471 IWL_DEBUG_RATE(priv, "leave: No STA priv data to update!\n");
472 return;
473 }
474
475 /* Treat uninitialized rate scaling data same as non-existing. */
476 if (!rs_sta->priv) {
477 IWL_DEBUG_RATE(priv, "leave: STA priv data uninitialized!\n");
478 return;
479 }
480
481
482 rs_sta->tx_packets++;
483
484 scale_rate_index = first_index;
485 last_index = first_index;
486
487 /*
488 * Update the window for each rate. We determine which rates
489 * were Tx'd based on the total number of retries vs. the number
490 * of retries configured for each rate -- currently set to the
491 * priv value 'retry_rate' vs. rate specific
492 *
493 * On exit from this while loop last_index indicates the rate
494 * at which the frame was finally transmitted (or failed if no
495 * ACK)
496 */
497 while (retries > 1) {
498 if ((retries - 1) < priv->retry_rate) {
499 current_count = (retries - 1);
500 last_index = scale_rate_index;
501 } else {
502 current_count = priv->retry_rate;
503 last_index = iwl3945_rs_next_rate(priv,
504 scale_rate_index);
505 }
506
507 /* Update this rate accounting for as many retries
508 * as was used for it (per current_count) */
509 iwl3945_collect_tx_data(rs_sta,
510 &rs_sta->win[scale_rate_index],
511 0, current_count, scale_rate_index);
512 IWL_DEBUG_RATE(priv, "Update rate %d for %d retries.\n",
513 scale_rate_index, current_count);
514
515 retries -= current_count;
516
517 scale_rate_index = last_index;
518 }
519
520
521 /* Update the last index window with success/failure based on ACK */
522 IWL_DEBUG_RATE(priv, "Update rate %d with %s.\n",
523 last_index,
524 (info->flags & IEEE80211_TX_STAT_ACK) ?
525 "success" : "failure");
526 iwl3945_collect_tx_data(rs_sta,
527 &rs_sta->win[last_index],
528 info->flags & IEEE80211_TX_STAT_ACK, 1, last_index);
529
530 /* We updated the rate scale window -- if its been more than
531 * flush_time since the last run, schedule the flush
532 * again */
533 spin_lock_irqsave(&rs_sta->lock, flags);
534
535 if (!rs_sta->flush_pending &&
536 time_after(jiffies, rs_sta->last_flush +
537 rs_sta->flush_time)) {
538
539 rs_sta->last_partial_flush = jiffies;
540 rs_sta->flush_pending = 1;
541 mod_timer(&rs_sta->rate_scale_flush,
542 jiffies + rs_sta->flush_time);
543 }
544
545 spin_unlock_irqrestore(&rs_sta->lock, flags);
546
547 IWL_DEBUG_RATE(priv, "leave\n");
548}
549
550static u16 iwl3945_get_adjacent_rate(struct iwl3945_rs_sta *rs_sta,
551 u8 index, u16 rate_mask, enum ieee80211_band band)
552{
553 u8 high = IWL_RATE_INVALID;
554 u8 low = IWL_RATE_INVALID;
555 struct iwl_priv *priv __maybe_unused = rs_sta->priv;
556
557 /* 802.11A walks to the next literal adjacent rate in
558 * the rate table */
559 if (unlikely(band == IEEE80211_BAND_5GHZ)) {
560 int i;
561 u32 mask;
562
563 /* Find the previous rate that is in the rate mask */
564 i = index - 1;
565 for (mask = (1 << i); i >= 0; i--, mask >>= 1) {
566 if (rate_mask & mask) {
567 low = i;
568 break;
569 }
570 }
571
572 /* Find the next rate that is in the rate mask */
573 i = index + 1;
574 for (mask = (1 << i); i < IWL_RATE_COUNT_3945;
575 i++, mask <<= 1) {
576 if (rate_mask & mask) {
577 high = i;
578 break;
579 }
580 }
581
582 return (high << 8) | low;
583 }
584
585 low = index;
586 while (low != IWL_RATE_INVALID) {
587 if (rs_sta->tgg)
588 low = iwl3945_rates[low].prev_rs_tgg;
589 else
590 low = iwl3945_rates[low].prev_rs;
591 if (low == IWL_RATE_INVALID)
592 break;
593 if (rate_mask & (1 << low))
594 break;
595 IWL_DEBUG_RATE(priv, "Skipping masked lower rate: %d\n", low);
596 }
597
598 high = index;
599 while (high != IWL_RATE_INVALID) {
600 if (rs_sta->tgg)
601 high = iwl3945_rates[high].next_rs_tgg;
602 else
603 high = iwl3945_rates[high].next_rs;
604 if (high == IWL_RATE_INVALID)
605 break;
606 if (rate_mask & (1 << high))
607 break;
608 IWL_DEBUG_RATE(priv, "Skipping masked higher rate: %d\n", high);
609 }
610
611 return (high << 8) | low;
612}
613
614/**
615 * iwl3945_rs_get_rate - find the rate for the requested packet
616 *
617 * Returns the ieee80211_rate structure allocated by the driver.
618 *
619 * The rate control algorithm has no internal mapping between hw_mode's
620 * rate ordering and the rate ordering used by the rate control algorithm.
621 *
622 * The rate control algorithm uses a single table of rates that goes across
623 * the entire A/B/G spectrum vs. being limited to just one particular
624 * hw_mode.
625 *
626 * As such, we can't convert the index obtained below into the hw_mode's
627 * rate table and must reference the driver allocated rate table
628 *
629 */
630static void iwl3945_rs_get_rate(void *priv_r, struct ieee80211_sta *sta,
631 void *priv_sta, struct ieee80211_tx_rate_control *txrc)
632{
633 struct ieee80211_supported_band *sband = txrc->sband;
634 struct sk_buff *skb = txrc->skb;
635 u8 low = IWL_RATE_INVALID;
636 u8 high = IWL_RATE_INVALID;
637 u16 high_low;
638 int index;
639 struct iwl3945_rs_sta *rs_sta = priv_sta;
640 struct iwl3945_rate_scale_data *window = NULL;
641 int current_tpt = IWL_INVALID_VALUE;
642 int low_tpt = IWL_INVALID_VALUE;
643 int high_tpt = IWL_INVALID_VALUE;
644 u32 fail_count;
645 s8 scale_action = 0;
646 unsigned long flags;
647 u16 rate_mask;
648 s8 max_rate_idx = -1;
649 struct iwl_priv *priv __maybe_unused = (struct iwl_priv *)priv_r;
650 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
651
652 IWL_DEBUG_RATE(priv, "enter\n");
653
654 /* Treat uninitialized rate scaling data same as non-existing. */
655 if (rs_sta && !rs_sta->priv) {
656 IWL_DEBUG_RATE(priv, "Rate scaling information not initialized yet.\n");
657 priv_sta = NULL;
658 }
659
660 if (rate_control_send_low(sta, priv_sta, txrc))
661 return;
662
663 rate_mask = sta->supp_rates[sband->band];
664
665 /* get user max rate if set */
666 max_rate_idx = txrc->max_rate_idx;
667 if ((sband->band == IEEE80211_BAND_5GHZ) && (max_rate_idx != -1))
668 max_rate_idx += IWL_FIRST_OFDM_RATE;
669 if ((max_rate_idx < 0) || (max_rate_idx >= IWL_RATE_COUNT))
670 max_rate_idx = -1;
671
672 index = min(rs_sta->last_txrate_idx & 0xffff, IWL_RATE_COUNT_3945 - 1);
673
674 if (sband->band == IEEE80211_BAND_5GHZ)
675 rate_mask = rate_mask << IWL_FIRST_OFDM_RATE;
676
677 spin_lock_irqsave(&rs_sta->lock, flags);
678
679 /* for recent assoc, choose best rate regarding
680 * to rssi value
681 */
682 if (rs_sta->start_rate != IWL_RATE_INVALID) {
683 if (rs_sta->start_rate < index &&
684 (rate_mask & (1 << rs_sta->start_rate)))
685 index = rs_sta->start_rate;
686 rs_sta->start_rate = IWL_RATE_INVALID;
687 }
688
689 /* force user max rate if set by user */
690 if ((max_rate_idx != -1) && (max_rate_idx < index)) {
691 if (rate_mask & (1 << max_rate_idx))
692 index = max_rate_idx;
693 }
694
695 window = &(rs_sta->win[index]);
696
697 fail_count = window->counter - window->success_counter;
698
699 if (((fail_count < IWL_RATE_MIN_FAILURE_TH) &&
700 (window->success_counter < IWL_RATE_MIN_SUCCESS_TH))) {
701 spin_unlock_irqrestore(&rs_sta->lock, flags);
702
703 IWL_DEBUG_RATE(priv, "Invalid average_tpt on rate %d: "
704 "counter: %d, success_counter: %d, "
705 "expected_tpt is %sNULL\n",
706 index,
707 window->counter,
708 window->success_counter,
709 rs_sta->expected_tpt ? "not " : "");
710
711 /* Can't calculate this yet; not enough history */
712 window->average_tpt = IWL_INVALID_VALUE;
713 goto out;
714
715 }
716
717 current_tpt = window->average_tpt;
718
719 high_low = iwl3945_get_adjacent_rate(rs_sta, index, rate_mask,
720 sband->band);
721 low = high_low & 0xff;
722 high = (high_low >> 8) & 0xff;
723
724 /* If user set max rate, dont allow higher than user constrain */
725 if ((max_rate_idx != -1) && (max_rate_idx < high))
726 high = IWL_RATE_INVALID;
727
728 /* Collect Measured throughputs of adjacent rates */
729 if (low != IWL_RATE_INVALID)
730 low_tpt = rs_sta->win[low].average_tpt;
731
732 if (high != IWL_RATE_INVALID)
733 high_tpt = rs_sta->win[high].average_tpt;
734
735 spin_unlock_irqrestore(&rs_sta->lock, flags);
736
737 scale_action = 0;
738
739 /* Low success ratio , need to drop the rate */
740 if ((window->success_ratio < IWL_RATE_DECREASE_TH) || !current_tpt) {
741 IWL_DEBUG_RATE(priv, "decrease rate because of low success_ratio\n");
742 scale_action = -1;
743 /* No throughput measured yet for adjacent rates,
744 * try increase */
745 } else if ((low_tpt == IWL_INVALID_VALUE) &&
746 (high_tpt == IWL_INVALID_VALUE)) {
747
748 if (high != IWL_RATE_INVALID && window->success_ratio >= IWL_RATE_INCREASE_TH)
749 scale_action = 1;
750 else if (low != IWL_RATE_INVALID)
751 scale_action = 0;
752
753 /* Both adjacent throughputs are measured, but neither one has
754 * better throughput; we're using the best rate, don't change
755 * it! */
756 } else if ((low_tpt != IWL_INVALID_VALUE) &&
757 (high_tpt != IWL_INVALID_VALUE) &&
758 (low_tpt < current_tpt) && (high_tpt < current_tpt)) {
759
760 IWL_DEBUG_RATE(priv, "No action -- low [%d] & high [%d] < "
761 "current_tpt [%d]\n",
762 low_tpt, high_tpt, current_tpt);
763 scale_action = 0;
764
765 /* At least one of the rates has better throughput */
766 } else {
767 if (high_tpt != IWL_INVALID_VALUE) {
768
769 /* High rate has better throughput, Increase
770 * rate */
771 if (high_tpt > current_tpt &&
772 window->success_ratio >= IWL_RATE_INCREASE_TH)
773 scale_action = 1;
774 else {
775 IWL_DEBUG_RATE(priv,
776 "decrease rate because of high tpt\n");
777 scale_action = 0;
778 }
779 } else if (low_tpt != IWL_INVALID_VALUE) {
780 if (low_tpt > current_tpt) {
781 IWL_DEBUG_RATE(priv,
782 "decrease rate because of low tpt\n");
783 scale_action = -1;
784 } else if (window->success_ratio >= IWL_RATE_INCREASE_TH) {
785 /* Lower rate has better
786 * throughput,decrease rate */
787 scale_action = 1;
788 }
789 }
790 }
791
792 /* Sanity check; asked for decrease, but success rate or throughput
793 * has been good at old rate. Don't change it. */
794 if ((scale_action == -1) && (low != IWL_RATE_INVALID) &&
795 ((window->success_ratio > IWL_RATE_HIGH_TH) ||
796 (current_tpt > (100 * rs_sta->expected_tpt[low]))))
797 scale_action = 0;
798
799 switch (scale_action) {
800 case -1:
801
802 /* Decrese rate */
803 if (low != IWL_RATE_INVALID)
804 index = low;
805 break;
806
807 case 1:
808 /* Increase rate */
809 if (high != IWL_RATE_INVALID)
810 index = high;
811
812 break;
813
814 case 0:
815 default:
816 /* No change */
817 break;
818 }
819
820 IWL_DEBUG_RATE(priv, "Selected %d (action %d) - low %d high %d\n",
821 index, scale_action, low, high);
822
823 out:
824
825 if (sband->band == IEEE80211_BAND_5GHZ) {
826 if (WARN_ON_ONCE(index < IWL_FIRST_OFDM_RATE))
827 index = IWL_FIRST_OFDM_RATE;
828 rs_sta->last_txrate_idx = index;
829 info->control.rates[0].idx = index - IWL_FIRST_OFDM_RATE;
830 } else {
831 rs_sta->last_txrate_idx = index;
832 info->control.rates[0].idx = rs_sta->last_txrate_idx;
833 }
834
835 IWL_DEBUG_RATE(priv, "leave: %d\n", index);
836}
837
838#ifdef CONFIG_MAC80211_DEBUGFS
839static int iwl3945_open_file_generic(struct inode *inode, struct file *file)
840{
841 file->private_data = inode->i_private;
842 return 0;
843}
844
845static ssize_t iwl3945_sta_dbgfs_stats_table_read(struct file *file,
846 char __user *user_buf,
847 size_t count, loff_t *ppos)
848{
849 char *buff;
850 int desc = 0;
851 int j;
852 ssize_t ret;
853 struct iwl3945_rs_sta *lq_sta = file->private_data;
854
855 buff = kmalloc(1024, GFP_KERNEL);
856 if (!buff)
857 return -ENOMEM;
858
859 desc += sprintf(buff + desc, "tx packets=%d last rate index=%d\n"
860 "rate=0x%X flush time %d\n",
861 lq_sta->tx_packets,
862 lq_sta->last_txrate_idx,
863 lq_sta->start_rate, jiffies_to_msecs(lq_sta->flush_time));
864 for (j = 0; j < IWL_RATE_COUNT_3945; j++) {
865 desc += sprintf(buff+desc,
866 "counter=%d success=%d %%=%d\n",
867 lq_sta->win[j].counter,
868 lq_sta->win[j].success_counter,
869 lq_sta->win[j].success_ratio);
870 }
871 ret = simple_read_from_buffer(user_buf, count, ppos, buff, desc);
872 kfree(buff);
873 return ret;
874}
875
876static const struct file_operations rs_sta_dbgfs_stats_table_ops = {
877 .read = iwl3945_sta_dbgfs_stats_table_read,
878 .open = iwl3945_open_file_generic,
879 .llseek = default_llseek,
880};
881
882static void iwl3945_add_debugfs(void *priv, void *priv_sta,
883 struct dentry *dir)
884{
885 struct iwl3945_rs_sta *lq_sta = priv_sta;
886
887 lq_sta->rs_sta_dbgfs_stats_table_file =
888 debugfs_create_file("rate_stats_table", 0600, dir,
889 lq_sta, &rs_sta_dbgfs_stats_table_ops);
890
891}
892
893static void iwl3945_remove_debugfs(void *priv, void *priv_sta)
894{
895 struct iwl3945_rs_sta *lq_sta = priv_sta;
896 debugfs_remove(lq_sta->rs_sta_dbgfs_stats_table_file);
897}
898#endif
899
900/*
901 * Initialization of rate scaling information is done by driver after
902 * the station is added. Since mac80211 calls this function before a
903 * station is added we ignore it.
904 */
905static void iwl3945_rs_rate_init_stub(void *priv_r,
906 struct ieee80211_supported_band *sband,
907 struct ieee80211_sta *sta, void *priv_sta)
908{
909}
910
911static struct rate_control_ops rs_ops = {
912 .module = NULL,
913 .name = RS_NAME,
914 .tx_status = iwl3945_rs_tx_status,
915 .get_rate = iwl3945_rs_get_rate,
916 .rate_init = iwl3945_rs_rate_init_stub,
917 .alloc = iwl3945_rs_alloc,
918 .free = iwl3945_rs_free,
919 .alloc_sta = iwl3945_rs_alloc_sta,
920 .free_sta = iwl3945_rs_free_sta,
921#ifdef CONFIG_MAC80211_DEBUGFS
922 .add_sta_debugfs = iwl3945_add_debugfs,
923 .remove_sta_debugfs = iwl3945_remove_debugfs,
924#endif
925
926};
927void iwl3945_rate_scale_init(struct ieee80211_hw *hw, s32 sta_id)
928{
929 struct iwl_priv *priv = hw->priv;
930 s32 rssi = 0;
931 unsigned long flags;
932 struct iwl3945_rs_sta *rs_sta;
933 struct ieee80211_sta *sta;
934 struct iwl3945_sta_priv *psta;
935
936 IWL_DEBUG_RATE(priv, "enter\n");
937
938 rcu_read_lock();
939
940 sta = ieee80211_find_sta(priv->contexts[IWL_RXON_CTX_BSS].vif,
941 priv->stations[sta_id].sta.sta.addr);
942 if (!sta) {
943 IWL_DEBUG_RATE(priv, "Unable to find station to initialize rate scaling.\n");
944 rcu_read_unlock();
945 return;
946 }
947
948 psta = (void *) sta->drv_priv;
949 rs_sta = &psta->rs_sta;
950
951 spin_lock_irqsave(&rs_sta->lock, flags);
952
953 rs_sta->tgg = 0;
954 switch (priv->band) {
955 case IEEE80211_BAND_2GHZ:
956 /* TODO: this always does G, not a regression */
957 if (priv->contexts[IWL_RXON_CTX_BSS].active.flags &
958 RXON_FLG_TGG_PROTECT_MSK) {
959 rs_sta->tgg = 1;
960 rs_sta->expected_tpt = iwl3945_expected_tpt_g_prot;
961 } else
962 rs_sta->expected_tpt = iwl3945_expected_tpt_g;
963 break;
964
965 case IEEE80211_BAND_5GHZ:
966 rs_sta->expected_tpt = iwl3945_expected_tpt_a;
967 break;
968 case IEEE80211_NUM_BANDS:
969 BUG();
970 break;
971 }
972
973 spin_unlock_irqrestore(&rs_sta->lock, flags);
974
975 rssi = priv->_3945.last_rx_rssi;
976 if (rssi == 0)
977 rssi = IWL_MIN_RSSI_VAL;
978
979 IWL_DEBUG_RATE(priv, "Network RSSI: %d\n", rssi);
980
981 rs_sta->start_rate = iwl3945_get_rate_index_by_rssi(rssi, priv->band);
982
983 IWL_DEBUG_RATE(priv, "leave: rssi %d assign rate index: "
984 "%d (plcp 0x%x)\n", rssi, rs_sta->start_rate,
985 iwl3945_rates[rs_sta->start_rate].plcp);
986 rcu_read_unlock();
987}
988
989int iwl3945_rate_control_register(void)
990{
991 return ieee80211_rate_control_register(&rs_ops);
992}
993
994void iwl3945_rate_control_unregister(void)
995{
996 ieee80211_rate_control_unregister(&rs_ops);
997}
diff --git a/drivers/net/wireless/iwlegacy/iwl-3945.c b/drivers/net/wireless/iwlegacy/iwl-3945.c
new file mode 100644
index 00000000000..73fe3cdf796
--- /dev/null
+++ b/drivers/net/wireless/iwlegacy/iwl-3945.c
@@ -0,0 +1,2742 @@
1/******************************************************************************
2 *
3 * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of version 2 of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
17 *
18 * The full GNU General Public License is included in this distribution in the
19 * file called LICENSE.
20 *
21 * Contact Information:
22 * Intel Linux Wireless <ilw@linux.intel.com>
23 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
24 *
25 *****************************************************************************/
26
27#include <linux/kernel.h>
28#include <linux/module.h>
29#include <linux/init.h>
30#include <linux/slab.h>
31#include <linux/pci.h>
32#include <linux/dma-mapping.h>
33#include <linux/delay.h>
34#include <linux/sched.h>
35#include <linux/skbuff.h>
36#include <linux/netdevice.h>
37#include <linux/wireless.h>
38#include <linux/firmware.h>
39#include <linux/etherdevice.h>
40#include <asm/unaligned.h>
41#include <net/mac80211.h>
42
43#include "iwl-fh.h"
44#include "iwl-3945-fh.h"
45#include "iwl-commands.h"
46#include "iwl-sta.h"
47#include "iwl-3945.h"
48#include "iwl-eeprom.h"
49#include "iwl-core.h"
50#include "iwl-helpers.h"
51#include "iwl-led.h"
52#include "iwl-3945-led.h"
53#include "iwl-3945-debugfs.h"
54
55#define IWL_DECLARE_RATE_INFO(r, ip, in, rp, rn, pp, np) \
56 [IWL_RATE_##r##M_INDEX] = { IWL_RATE_##r##M_PLCP, \
57 IWL_RATE_##r##M_IEEE, \
58 IWL_RATE_##ip##M_INDEX, \
59 IWL_RATE_##in##M_INDEX, \
60 IWL_RATE_##rp##M_INDEX, \
61 IWL_RATE_##rn##M_INDEX, \
62 IWL_RATE_##pp##M_INDEX, \
63 IWL_RATE_##np##M_INDEX, \
64 IWL_RATE_##r##M_INDEX_TABLE, \
65 IWL_RATE_##ip##M_INDEX_TABLE }
66
67/*
68 * Parameter order:
69 * rate, prev rate, next rate, prev tgg rate, next tgg rate
70 *
71 * If there isn't a valid next or previous rate then INV is used which
72 * maps to IWL_RATE_INVALID
73 *
74 */
75const struct iwl3945_rate_info iwl3945_rates[IWL_RATE_COUNT_3945] = {
76 IWL_DECLARE_RATE_INFO(1, INV, 2, INV, 2, INV, 2), /* 1mbps */
77 IWL_DECLARE_RATE_INFO(2, 1, 5, 1, 5, 1, 5), /* 2mbps */
78 IWL_DECLARE_RATE_INFO(5, 2, 6, 2, 11, 2, 11), /*5.5mbps */
79 IWL_DECLARE_RATE_INFO(11, 9, 12, 5, 12, 5, 18), /* 11mbps */
80 IWL_DECLARE_RATE_INFO(6, 5, 9, 5, 11, 5, 11), /* 6mbps */
81 IWL_DECLARE_RATE_INFO(9, 6, 11, 5, 11, 5, 11), /* 9mbps */
82 IWL_DECLARE_RATE_INFO(12, 11, 18, 11, 18, 11, 18), /* 12mbps */
83 IWL_DECLARE_RATE_INFO(18, 12, 24, 12, 24, 11, 24), /* 18mbps */
84 IWL_DECLARE_RATE_INFO(24, 18, 36, 18, 36, 18, 36), /* 24mbps */
85 IWL_DECLARE_RATE_INFO(36, 24, 48, 24, 48, 24, 48), /* 36mbps */
86 IWL_DECLARE_RATE_INFO(48, 36, 54, 36, 54, 36, 54), /* 48mbps */
87 IWL_DECLARE_RATE_INFO(54, 48, INV, 48, INV, 48, INV),/* 54mbps */
88};
89
90static inline u8 iwl3945_get_prev_ieee_rate(u8 rate_index)
91{
92 u8 rate = iwl3945_rates[rate_index].prev_ieee;
93
94 if (rate == IWL_RATE_INVALID)
95 rate = rate_index;
96 return rate;
97}
98
99/* 1 = enable the iwl3945_disable_events() function */
100#define IWL_EVT_DISABLE (0)
101#define IWL_EVT_DISABLE_SIZE (1532/32)
102
103/**
104 * iwl3945_disable_events - Disable selected events in uCode event log
105 *
106 * Disable an event by writing "1"s into "disable"
107 * bitmap in SRAM. Bit position corresponds to Event # (id/type).
108 * Default values of 0 enable uCode events to be logged.
109 * Use for only special debugging. This function is just a placeholder as-is,
110 * you'll need to provide the special bits! ...
111 * ... and set IWL_EVT_DISABLE to 1. */
112void iwl3945_disable_events(struct iwl_priv *priv)
113{
114 int i;
115 u32 base; /* SRAM address of event log header */
116 u32 disable_ptr; /* SRAM address of event-disable bitmap array */
117 u32 array_size; /* # of u32 entries in array */
118 static const u32 evt_disable[IWL_EVT_DISABLE_SIZE] = {
119 0x00000000, /* 31 - 0 Event id numbers */
120 0x00000000, /* 63 - 32 */
121 0x00000000, /* 95 - 64 */
122 0x00000000, /* 127 - 96 */
123 0x00000000, /* 159 - 128 */
124 0x00000000, /* 191 - 160 */
125 0x00000000, /* 223 - 192 */
126 0x00000000, /* 255 - 224 */
127 0x00000000, /* 287 - 256 */
128 0x00000000, /* 319 - 288 */
129 0x00000000, /* 351 - 320 */
130 0x00000000, /* 383 - 352 */
131 0x00000000, /* 415 - 384 */
132 0x00000000, /* 447 - 416 */
133 0x00000000, /* 479 - 448 */
134 0x00000000, /* 511 - 480 */
135 0x00000000, /* 543 - 512 */
136 0x00000000, /* 575 - 544 */
137 0x00000000, /* 607 - 576 */
138 0x00000000, /* 639 - 608 */
139 0x00000000, /* 671 - 640 */
140 0x00000000, /* 703 - 672 */
141 0x00000000, /* 735 - 704 */
142 0x00000000, /* 767 - 736 */
143 0x00000000, /* 799 - 768 */
144 0x00000000, /* 831 - 800 */
145 0x00000000, /* 863 - 832 */
146 0x00000000, /* 895 - 864 */
147 0x00000000, /* 927 - 896 */
148 0x00000000, /* 959 - 928 */
149 0x00000000, /* 991 - 960 */
150 0x00000000, /* 1023 - 992 */
151 0x00000000, /* 1055 - 1024 */
152 0x00000000, /* 1087 - 1056 */
153 0x00000000, /* 1119 - 1088 */
154 0x00000000, /* 1151 - 1120 */
155 0x00000000, /* 1183 - 1152 */
156 0x00000000, /* 1215 - 1184 */
157 0x00000000, /* 1247 - 1216 */
158 0x00000000, /* 1279 - 1248 */
159 0x00000000, /* 1311 - 1280 */
160 0x00000000, /* 1343 - 1312 */
161 0x00000000, /* 1375 - 1344 */
162 0x00000000, /* 1407 - 1376 */
163 0x00000000, /* 1439 - 1408 */
164 0x00000000, /* 1471 - 1440 */
165 0x00000000, /* 1503 - 1472 */
166 };
167
168 base = le32_to_cpu(priv->card_alive.log_event_table_ptr);
169 if (!iwl3945_hw_valid_rtc_data_addr(base)) {
170 IWL_ERR(priv, "Invalid event log pointer 0x%08X\n", base);
171 return;
172 }
173
174 disable_ptr = iwl_legacy_read_targ_mem(priv, base + (4 * sizeof(u32)));
175 array_size = iwl_legacy_read_targ_mem(priv, base + (5 * sizeof(u32)));
176
177 if (IWL_EVT_DISABLE && (array_size == IWL_EVT_DISABLE_SIZE)) {
178 IWL_DEBUG_INFO(priv, "Disabling selected uCode log events at 0x%x\n",
179 disable_ptr);
180 for (i = 0; i < IWL_EVT_DISABLE_SIZE; i++)
181 iwl_legacy_write_targ_mem(priv,
182 disable_ptr + (i * sizeof(u32)),
183 evt_disable[i]);
184
185 } else {
186 IWL_DEBUG_INFO(priv, "Selected uCode log events may be disabled\n");
187 IWL_DEBUG_INFO(priv, " by writing \"1\"s into disable bitmap\n");
188 IWL_DEBUG_INFO(priv, " in SRAM at 0x%x, size %d u32s\n",
189 disable_ptr, array_size);
190 }
191
192}
193
194static int iwl3945_hwrate_to_plcp_idx(u8 plcp)
195{
196 int idx;
197
198 for (idx = 0; idx < IWL_RATE_COUNT_3945; idx++)
199 if (iwl3945_rates[idx].plcp == plcp)
200 return idx;
201 return -1;
202}
203
204#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
205#define TX_STATUS_ENTRY(x) case TX_3945_STATUS_FAIL_ ## x: return #x
206
207static const char *iwl3945_get_tx_fail_reason(u32 status)
208{
209 switch (status & TX_STATUS_MSK) {
210 case TX_3945_STATUS_SUCCESS:
211 return "SUCCESS";
212 TX_STATUS_ENTRY(SHORT_LIMIT);
213 TX_STATUS_ENTRY(LONG_LIMIT);
214 TX_STATUS_ENTRY(FIFO_UNDERRUN);
215 TX_STATUS_ENTRY(MGMNT_ABORT);
216 TX_STATUS_ENTRY(NEXT_FRAG);
217 TX_STATUS_ENTRY(LIFE_EXPIRE);
218 TX_STATUS_ENTRY(DEST_PS);
219 TX_STATUS_ENTRY(ABORTED);
220 TX_STATUS_ENTRY(BT_RETRY);
221 TX_STATUS_ENTRY(STA_INVALID);
222 TX_STATUS_ENTRY(FRAG_DROPPED);
223 TX_STATUS_ENTRY(TID_DISABLE);
224 TX_STATUS_ENTRY(FRAME_FLUSHED);
225 TX_STATUS_ENTRY(INSUFFICIENT_CF_POLL);
226 TX_STATUS_ENTRY(TX_LOCKED);
227 TX_STATUS_ENTRY(NO_BEACON_ON_RADAR);
228 }
229
230 return "UNKNOWN";
231}
232#else
233static inline const char *iwl3945_get_tx_fail_reason(u32 status)
234{
235 return "";
236}
237#endif
238
239/*
240 * get ieee prev rate from rate scale table.
241 * for A and B mode we need to overright prev
242 * value
243 */
244int iwl3945_rs_next_rate(struct iwl_priv *priv, int rate)
245{
246 int next_rate = iwl3945_get_prev_ieee_rate(rate);
247
248 switch (priv->band) {
249 case IEEE80211_BAND_5GHZ:
250 if (rate == IWL_RATE_12M_INDEX)
251 next_rate = IWL_RATE_9M_INDEX;
252 else if (rate == IWL_RATE_6M_INDEX)
253 next_rate = IWL_RATE_6M_INDEX;
254 break;
255 case IEEE80211_BAND_2GHZ:
256 if (!(priv->_3945.sta_supp_rates & IWL_OFDM_RATES_MASK) &&
257 iwl_legacy_is_associated(priv, IWL_RXON_CTX_BSS)) {
258 if (rate == IWL_RATE_11M_INDEX)
259 next_rate = IWL_RATE_5M_INDEX;
260 }
261 break;
262
263 default:
264 break;
265 }
266
267 return next_rate;
268}
269
270
271/**
272 * iwl3945_tx_queue_reclaim - Reclaim Tx queue entries already Tx'd
273 *
274 * When FW advances 'R' index, all entries between old and new 'R' index
275 * need to be reclaimed. As result, some free space forms. If there is
276 * enough free space (> low mark), wake the stack that feeds us.
277 */
278static void iwl3945_tx_queue_reclaim(struct iwl_priv *priv,
279 int txq_id, int index)
280{
281 struct iwl_tx_queue *txq = &priv->txq[txq_id];
282 struct iwl_queue *q = &txq->q;
283 struct iwl_tx_info *tx_info;
284
285 BUG_ON(txq_id == IWL39_CMD_QUEUE_NUM);
286
287 for (index = iwl_legacy_queue_inc_wrap(index, q->n_bd);
288 q->read_ptr != index;
289 q->read_ptr = iwl_legacy_queue_inc_wrap(q->read_ptr, q->n_bd)) {
290
291 tx_info = &txq->txb[txq->q.read_ptr];
292 ieee80211_tx_status_irqsafe(priv->hw, tx_info->skb);
293 tx_info->skb = NULL;
294 priv->cfg->ops->lib->txq_free_tfd(priv, txq);
295 }
296
297 if (iwl_legacy_queue_space(q) > q->low_mark && (txq_id >= 0) &&
298 (txq_id != IWL39_CMD_QUEUE_NUM) &&
299 priv->mac80211_registered)
300 iwl_legacy_wake_queue(priv, txq);
301}
302
303/**
304 * iwl3945_rx_reply_tx - Handle Tx response
305 */
306static void iwl3945_rx_reply_tx(struct iwl_priv *priv,
307 struct iwl_rx_mem_buffer *rxb)
308{
309 struct iwl_rx_packet *pkt = rxb_addr(rxb);
310 u16 sequence = le16_to_cpu(pkt->hdr.sequence);
311 int txq_id = SEQ_TO_QUEUE(sequence);
312 int index = SEQ_TO_INDEX(sequence);
313 struct iwl_tx_queue *txq = &priv->txq[txq_id];
314 struct ieee80211_tx_info *info;
315 struct iwl3945_tx_resp *tx_resp = (void *)&pkt->u.raw[0];
316 u32 status = le32_to_cpu(tx_resp->status);
317 int rate_idx;
318 int fail;
319
320 if ((index >= txq->q.n_bd) || (iwl_legacy_queue_used(&txq->q, index) == 0)) {
321 IWL_ERR(priv, "Read index for DMA queue txq_id (%d) index %d "
322 "is out of range [0-%d] %d %d\n", txq_id,
323 index, txq->q.n_bd, txq->q.write_ptr,
324 txq->q.read_ptr);
325 return;
326 }
327
328 txq->time_stamp = jiffies;
329 info = IEEE80211_SKB_CB(txq->txb[txq->q.read_ptr].skb);
330 ieee80211_tx_info_clear_status(info);
331
332 /* Fill the MRR chain with some info about on-chip retransmissions */
333 rate_idx = iwl3945_hwrate_to_plcp_idx(tx_resp->rate);
334 if (info->band == IEEE80211_BAND_5GHZ)
335 rate_idx -= IWL_FIRST_OFDM_RATE;
336
337 fail = tx_resp->failure_frame;
338
339 info->status.rates[0].idx = rate_idx;
340 info->status.rates[0].count = fail + 1; /* add final attempt */
341
342 /* tx_status->rts_retry_count = tx_resp->failure_rts; */
343 info->flags |= ((status & TX_STATUS_MSK) == TX_STATUS_SUCCESS) ?
344 IEEE80211_TX_STAT_ACK : 0;
345
346 IWL_DEBUG_TX(priv, "Tx queue %d Status %s (0x%08x) plcp rate %d retries %d\n",
347 txq_id, iwl3945_get_tx_fail_reason(status), status,
348 tx_resp->rate, tx_resp->failure_frame);
349
350 IWL_DEBUG_TX_REPLY(priv, "Tx queue reclaim %d\n", index);
351 iwl3945_tx_queue_reclaim(priv, txq_id, index);
352
353 if (status & TX_ABORT_REQUIRED_MSK)
354 IWL_ERR(priv, "TODO: Implement Tx ABORT REQUIRED!!!\n");
355}
356
357
358
359/*****************************************************************************
360 *
361 * Intel PRO/Wireless 3945ABG/BG Network Connection
362 *
363 * RX handler implementations
364 *
365 *****************************************************************************/
366#ifdef CONFIG_IWLWIFI_LEGACY_DEBUGFS
367static void iwl3945_accumulative_statistics(struct iwl_priv *priv,
368 __le32 *stats)
369{
370 int i;
371 __le32 *prev_stats;
372 u32 *accum_stats;
373 u32 *delta, *max_delta;
374
375 prev_stats = (__le32 *)&priv->_3945.statistics;
376 accum_stats = (u32 *)&priv->_3945.accum_statistics;
377 delta = (u32 *)&priv->_3945.delta_statistics;
378 max_delta = (u32 *)&priv->_3945.max_delta;
379
380 for (i = sizeof(__le32); i < sizeof(struct iwl3945_notif_statistics);
381 i += sizeof(__le32), stats++, prev_stats++, delta++,
382 max_delta++, accum_stats++) {
383 if (le32_to_cpu(*stats) > le32_to_cpu(*prev_stats)) {
384 *delta = (le32_to_cpu(*stats) -
385 le32_to_cpu(*prev_stats));
386 *accum_stats += *delta;
387 if (*delta > *max_delta)
388 *max_delta = *delta;
389 }
390 }
391
392 /* reset accumulative statistics for "no-counter" type statistics */
393 priv->_3945.accum_statistics.general.temperature =
394 priv->_3945.statistics.general.temperature;
395 priv->_3945.accum_statistics.general.ttl_timestamp =
396 priv->_3945.statistics.general.ttl_timestamp;
397}
398#endif
399
400void iwl3945_hw_rx_statistics(struct iwl_priv *priv,
401 struct iwl_rx_mem_buffer *rxb)
402{
403 struct iwl_rx_packet *pkt = rxb_addr(rxb);
404
405 IWL_DEBUG_RX(priv, "Statistics notification received (%d vs %d).\n",
406 (int)sizeof(struct iwl3945_notif_statistics),
407 le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK);
408#ifdef CONFIG_IWLWIFI_LEGACY_DEBUGFS
409 iwl3945_accumulative_statistics(priv, (__le32 *)&pkt->u.raw);
410#endif
411
412 memcpy(&priv->_3945.statistics, pkt->u.raw, sizeof(priv->_3945.statistics));
413}
414
415void iwl3945_reply_statistics(struct iwl_priv *priv,
416 struct iwl_rx_mem_buffer *rxb)
417{
418 struct iwl_rx_packet *pkt = rxb_addr(rxb);
419 __le32 *flag = (__le32 *)&pkt->u.raw;
420
421 if (le32_to_cpu(*flag) & UCODE_STATISTICS_CLEAR_MSK) {
422#ifdef CONFIG_IWLWIFI_LEGACY_DEBUGFS
423 memset(&priv->_3945.accum_statistics, 0,
424 sizeof(struct iwl3945_notif_statistics));
425 memset(&priv->_3945.delta_statistics, 0,
426 sizeof(struct iwl3945_notif_statistics));
427 memset(&priv->_3945.max_delta, 0,
428 sizeof(struct iwl3945_notif_statistics));
429#endif
430 IWL_DEBUG_RX(priv, "Statistics have been cleared\n");
431 }
432 iwl3945_hw_rx_statistics(priv, rxb);
433}
434
435
436/******************************************************************************
437 *
438 * Misc. internal state and helper functions
439 *
440 ******************************************************************************/
441
442/* This is necessary only for a number of statistics, see the caller. */
443static int iwl3945_is_network_packet(struct iwl_priv *priv,
444 struct ieee80211_hdr *header)
445{
446 /* Filter incoming packets to determine if they are targeted toward
447 * this network, discarding packets coming from ourselves */
448 switch (priv->iw_mode) {
449 case NL80211_IFTYPE_ADHOC: /* Header: Dest. | Source | BSSID */
450 /* packets to our IBSS update information */
451 return !compare_ether_addr(header->addr3, priv->bssid);
452 case NL80211_IFTYPE_STATION: /* Header: Dest. | AP{BSSID} | Source */
453 /* packets to our IBSS update information */
454 return !compare_ether_addr(header->addr2, priv->bssid);
455 default:
456 return 1;
457 }
458}
459
460static void iwl3945_pass_packet_to_mac80211(struct iwl_priv *priv,
461 struct iwl_rx_mem_buffer *rxb,
462 struct ieee80211_rx_status *stats)
463{
464 struct iwl_rx_packet *pkt = rxb_addr(rxb);
465 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)IWL_RX_DATA(pkt);
466 struct iwl3945_rx_frame_hdr *rx_hdr = IWL_RX_HDR(pkt);
467 struct iwl3945_rx_frame_end *rx_end = IWL_RX_END(pkt);
468 u16 len = le16_to_cpu(rx_hdr->len);
469 struct sk_buff *skb;
470 __le16 fc = hdr->frame_control;
471
472 /* We received data from the HW, so stop the watchdog */
473 if (unlikely(len + IWL39_RX_FRAME_SIZE >
474 PAGE_SIZE << priv->hw_params.rx_page_order)) {
475 IWL_DEBUG_DROP(priv, "Corruption detected!\n");
476 return;
477 }
478
479 /* We only process data packets if the interface is open */
480 if (unlikely(!priv->is_open)) {
481 IWL_DEBUG_DROP_LIMIT(priv,
482 "Dropping packet while interface is not open.\n");
483 return;
484 }
485
486 skb = dev_alloc_skb(128);
487 if (!skb) {
488 IWL_ERR(priv, "dev_alloc_skb failed\n");
489 return;
490 }
491
492 if (!iwl3945_mod_params.sw_crypto)
493 iwl_legacy_set_decrypted_flag(priv,
494 (struct ieee80211_hdr *)rxb_addr(rxb),
495 le32_to_cpu(rx_end->status), stats);
496
497 skb_add_rx_frag(skb, 0, rxb->page,
498 (void *)rx_hdr->payload - (void *)pkt, len);
499
500 iwl_legacy_update_stats(priv, false, fc, len);
501 memcpy(IEEE80211_SKB_RXCB(skb), stats, sizeof(*stats));
502
503 ieee80211_rx(priv->hw, skb);
504 priv->alloc_rxb_page--;
505 rxb->page = NULL;
506}
507
508#define IWL_DELAY_NEXT_SCAN_AFTER_ASSOC (HZ*6)
509
510static void iwl3945_rx_reply_rx(struct iwl_priv *priv,
511 struct iwl_rx_mem_buffer *rxb)
512{
513 struct ieee80211_hdr *header;
514 struct ieee80211_rx_status rx_status;
515 struct iwl_rx_packet *pkt = rxb_addr(rxb);
516 struct iwl3945_rx_frame_stats *rx_stats = IWL_RX_STATS(pkt);
517 struct iwl3945_rx_frame_hdr *rx_hdr = IWL_RX_HDR(pkt);
518 struct iwl3945_rx_frame_end *rx_end = IWL_RX_END(pkt);
519 u16 rx_stats_sig_avg __maybe_unused = le16_to_cpu(rx_stats->sig_avg);
520 u16 rx_stats_noise_diff __maybe_unused = le16_to_cpu(rx_stats->noise_diff);
521 u8 network_packet;
522
523 rx_status.flag = 0;
524 rx_status.mactime = le64_to_cpu(rx_end->timestamp);
525 rx_status.band = (rx_hdr->phy_flags & RX_RES_PHY_FLAGS_BAND_24_MSK) ?
526 IEEE80211_BAND_2GHZ : IEEE80211_BAND_5GHZ;
527 rx_status.freq =
528 ieee80211_channel_to_frequency(le16_to_cpu(rx_hdr->channel),
529 rx_status.band);
530
531 rx_status.rate_idx = iwl3945_hwrate_to_plcp_idx(rx_hdr->rate);
532 if (rx_status.band == IEEE80211_BAND_5GHZ)
533 rx_status.rate_idx -= IWL_FIRST_OFDM_RATE;
534
535 rx_status.antenna = (le16_to_cpu(rx_hdr->phy_flags) &
536 RX_RES_PHY_FLAGS_ANTENNA_MSK) >> 4;
537
538 /* set the preamble flag if appropriate */
539 if (rx_hdr->phy_flags & RX_RES_PHY_FLAGS_SHORT_PREAMBLE_MSK)
540 rx_status.flag |= RX_FLAG_SHORTPRE;
541
542 if ((unlikely(rx_stats->phy_count > 20))) {
543 IWL_DEBUG_DROP(priv, "dsp size out of range [0,20]: %d/n",
544 rx_stats->phy_count);
545 return;
546 }
547
548 if (!(rx_end->status & RX_RES_STATUS_NO_CRC32_ERROR)
549 || !(rx_end->status & RX_RES_STATUS_NO_RXE_OVERFLOW)) {
550 IWL_DEBUG_RX(priv, "Bad CRC or FIFO: 0x%08X.\n", rx_end->status);
551 return;
552 }
553
554
555
556 /* Convert 3945's rssi indicator to dBm */
557 rx_status.signal = rx_stats->rssi - IWL39_RSSI_OFFSET;
558
559 IWL_DEBUG_STATS(priv, "Rssi %d sig_avg %d noise_diff %d\n",
560 rx_status.signal, rx_stats_sig_avg,
561 rx_stats_noise_diff);
562
563 header = (struct ieee80211_hdr *)IWL_RX_DATA(pkt);
564
565 network_packet = iwl3945_is_network_packet(priv, header);
566
567 IWL_DEBUG_STATS_LIMIT(priv, "[%c] %d RSSI:%d Signal:%u, Rate:%u\n",
568 network_packet ? '*' : ' ',
569 le16_to_cpu(rx_hdr->channel),
570 rx_status.signal, rx_status.signal,
571 rx_status.rate_idx);
572
573 iwl_legacy_dbg_log_rx_data_frame(priv, le16_to_cpu(rx_hdr->len),
574 header);
575
576 if (network_packet) {
577 priv->_3945.last_beacon_time =
578 le32_to_cpu(rx_end->beacon_timestamp);
579 priv->_3945.last_tsf = le64_to_cpu(rx_end->timestamp);
580 priv->_3945.last_rx_rssi = rx_status.signal;
581 }
582
583 iwl3945_pass_packet_to_mac80211(priv, rxb, &rx_status);
584}
585
586int iwl3945_hw_txq_attach_buf_to_tfd(struct iwl_priv *priv,
587 struct iwl_tx_queue *txq,
588 dma_addr_t addr, u16 len, u8 reset, u8 pad)
589{
590 int count;
591 struct iwl_queue *q;
592 struct iwl3945_tfd *tfd, *tfd_tmp;
593
594 q = &txq->q;
595 tfd_tmp = (struct iwl3945_tfd *)txq->tfds;
596 tfd = &tfd_tmp[q->write_ptr];
597
598 if (reset)
599 memset(tfd, 0, sizeof(*tfd));
600
601 count = TFD_CTL_COUNT_GET(le32_to_cpu(tfd->control_flags));
602
603 if ((count >= NUM_TFD_CHUNKS) || (count < 0)) {
604 IWL_ERR(priv, "Error can not send more than %d chunks\n",
605 NUM_TFD_CHUNKS);
606 return -EINVAL;
607 }
608
609 tfd->tbs[count].addr = cpu_to_le32(addr);
610 tfd->tbs[count].len = cpu_to_le32(len);
611
612 count++;
613
614 tfd->control_flags = cpu_to_le32(TFD_CTL_COUNT_SET(count) |
615 TFD_CTL_PAD_SET(pad));
616
617 return 0;
618}
619
620/**
621 * iwl3945_hw_txq_free_tfd - Free one TFD, those at index [txq->q.read_ptr]
622 *
623 * Does NOT advance any indexes
624 */
625void iwl3945_hw_txq_free_tfd(struct iwl_priv *priv, struct iwl_tx_queue *txq)
626{
627 struct iwl3945_tfd *tfd_tmp = (struct iwl3945_tfd *)txq->tfds;
628 int index = txq->q.read_ptr;
629 struct iwl3945_tfd *tfd = &tfd_tmp[index];
630 struct pci_dev *dev = priv->pci_dev;
631 int i;
632 int counter;
633
634 /* sanity check */
635 counter = TFD_CTL_COUNT_GET(le32_to_cpu(tfd->control_flags));
636 if (counter > NUM_TFD_CHUNKS) {
637 IWL_ERR(priv, "Too many chunks: %i\n", counter);
638 /* @todo issue fatal error, it is quite serious situation */
639 return;
640 }
641
642 /* Unmap tx_cmd */
643 if (counter)
644 pci_unmap_single(dev,
645 dma_unmap_addr(&txq->meta[index], mapping),
646 dma_unmap_len(&txq->meta[index], len),
647 PCI_DMA_TODEVICE);
648
649 /* unmap chunks if any */
650
651 for (i = 1; i < counter; i++)
652 pci_unmap_single(dev, le32_to_cpu(tfd->tbs[i].addr),
653 le32_to_cpu(tfd->tbs[i].len), PCI_DMA_TODEVICE);
654
655 /* free SKB */
656 if (txq->txb) {
657 struct sk_buff *skb;
658
659 skb = txq->txb[txq->q.read_ptr].skb;
660
661 /* can be called from irqs-disabled context */
662 if (skb) {
663 dev_kfree_skb_any(skb);
664 txq->txb[txq->q.read_ptr].skb = NULL;
665 }
666 }
667}
668
669/**
670 * iwl3945_hw_build_tx_cmd_rate - Add rate portion to TX_CMD:
671 *
672*/
673void iwl3945_hw_build_tx_cmd_rate(struct iwl_priv *priv,
674 struct iwl_device_cmd *cmd,
675 struct ieee80211_tx_info *info,
676 struct ieee80211_hdr *hdr,
677 int sta_id, int tx_id)
678{
679 u16 hw_value = ieee80211_get_tx_rate(priv->hw, info)->hw_value;
680 u16 rate_index = min(hw_value & 0xffff, IWL_RATE_COUNT_3945);
681 u16 rate_mask;
682 int rate;
683 u8 rts_retry_limit;
684 u8 data_retry_limit;
685 __le32 tx_flags;
686 __le16 fc = hdr->frame_control;
687 struct iwl3945_tx_cmd *tx_cmd = (struct iwl3945_tx_cmd *)cmd->cmd.payload;
688
689 rate = iwl3945_rates[rate_index].plcp;
690 tx_flags = tx_cmd->tx_flags;
691
692 /* We need to figure out how to get the sta->supp_rates while
693 * in this running context */
694 rate_mask = IWL_RATES_MASK_3945;
695
696 /* Set retry limit on DATA packets and Probe Responses*/
697 if (ieee80211_is_probe_resp(fc))
698 data_retry_limit = 3;
699 else
700 data_retry_limit = IWL_DEFAULT_TX_RETRY;
701 tx_cmd->data_retry_limit = data_retry_limit;
702
703 if (tx_id >= IWL39_CMD_QUEUE_NUM)
704 rts_retry_limit = 3;
705 else
706 rts_retry_limit = 7;
707
708 if (data_retry_limit < rts_retry_limit)
709 rts_retry_limit = data_retry_limit;
710 tx_cmd->rts_retry_limit = rts_retry_limit;
711
712 tx_cmd->rate = rate;
713 tx_cmd->tx_flags = tx_flags;
714
715 /* OFDM */
716 tx_cmd->supp_rates[0] =
717 ((rate_mask & IWL_OFDM_RATES_MASK) >> IWL_FIRST_OFDM_RATE) & 0xFF;
718
719 /* CCK */
720 tx_cmd->supp_rates[1] = (rate_mask & 0xF);
721
722 IWL_DEBUG_RATE(priv, "Tx sta id: %d, rate: %d (plcp), flags: 0x%4X "
723 "cck/ofdm mask: 0x%x/0x%x\n", sta_id,
724 tx_cmd->rate, le32_to_cpu(tx_cmd->tx_flags),
725 tx_cmd->supp_rates[1], tx_cmd->supp_rates[0]);
726}
727
728static u8 iwl3945_sync_sta(struct iwl_priv *priv, int sta_id, u16 tx_rate)
729{
730 unsigned long flags_spin;
731 struct iwl_station_entry *station;
732
733 if (sta_id == IWL_INVALID_STATION)
734 return IWL_INVALID_STATION;
735
736 spin_lock_irqsave(&priv->sta_lock, flags_spin);
737 station = &priv->stations[sta_id];
738
739 station->sta.sta.modify_mask = STA_MODIFY_TX_RATE_MSK;
740 station->sta.rate_n_flags = cpu_to_le16(tx_rate);
741 station->sta.mode = STA_CONTROL_MODIFY_MSK;
742 iwl_legacy_send_add_sta(priv, &station->sta, CMD_ASYNC);
743 spin_unlock_irqrestore(&priv->sta_lock, flags_spin);
744
745 IWL_DEBUG_RATE(priv, "SCALE sync station %d to rate %d\n",
746 sta_id, tx_rate);
747 return sta_id;
748}
749
750static void iwl3945_set_pwr_vmain(struct iwl_priv *priv)
751{
752/*
753 * (for documentation purposes)
754 * to set power to V_AUX, do
755
756 if (pci_pme_capable(priv->pci_dev, PCI_D3cold)) {
757 iwl_legacy_set_bits_mask_prph(priv, APMG_PS_CTRL_REG,
758 APMG_PS_CTRL_VAL_PWR_SRC_VAUX,
759 ~APMG_PS_CTRL_MSK_PWR_SRC);
760
761 iwl_poll_bit(priv, CSR_GPIO_IN,
762 CSR_GPIO_IN_VAL_VAUX_PWR_SRC,
763 CSR_GPIO_IN_BIT_AUX_POWER, 5000);
764 }
765 */
766
767 iwl_legacy_set_bits_mask_prph(priv, APMG_PS_CTRL_REG,
768 APMG_PS_CTRL_VAL_PWR_SRC_VMAIN,
769 ~APMG_PS_CTRL_MSK_PWR_SRC);
770
771 iwl_poll_bit(priv, CSR_GPIO_IN, CSR_GPIO_IN_VAL_VMAIN_PWR_SRC,
772 CSR_GPIO_IN_BIT_AUX_POWER, 5000); /* uS */
773}
774
775static int iwl3945_rx_init(struct iwl_priv *priv, struct iwl_rx_queue *rxq)
776{
777 iwl_legacy_write_direct32(priv, FH39_RCSR_RBD_BASE(0), rxq->bd_dma);
778 iwl_legacy_write_direct32(priv, FH39_RCSR_RPTR_ADDR(0),
779 rxq->rb_stts_dma);
780 iwl_legacy_write_direct32(priv, FH39_RCSR_WPTR(0), 0);
781 iwl_legacy_write_direct32(priv, FH39_RCSR_CONFIG(0),
782 FH39_RCSR_RX_CONFIG_REG_VAL_DMA_CHNL_EN_ENABLE |
783 FH39_RCSR_RX_CONFIG_REG_VAL_RDRBD_EN_ENABLE |
784 FH39_RCSR_RX_CONFIG_REG_BIT_WR_STTS_EN |
785 FH39_RCSR_RX_CONFIG_REG_VAL_MAX_FRAG_SIZE_128 |
786 (RX_QUEUE_SIZE_LOG << FH39_RCSR_RX_CONFIG_REG_POS_RBDC_SIZE) |
787 FH39_RCSR_RX_CONFIG_REG_VAL_IRQ_DEST_INT_HOST |
788 (1 << FH39_RCSR_RX_CONFIG_REG_POS_IRQ_RBTH) |
789 FH39_RCSR_RX_CONFIG_REG_VAL_MSG_MODE_FH);
790
791 /* fake read to flush all prev I/O */
792 iwl_legacy_read_direct32(priv, FH39_RSSR_CTRL);
793
794 return 0;
795}
796
797static int iwl3945_tx_reset(struct iwl_priv *priv)
798{
799
800 /* bypass mode */
801 iwl_legacy_write_prph(priv, ALM_SCD_MODE_REG, 0x2);
802
803 /* RA 0 is active */
804 iwl_legacy_write_prph(priv, ALM_SCD_ARASTAT_REG, 0x01);
805
806 /* all 6 fifo are active */
807 iwl_legacy_write_prph(priv, ALM_SCD_TXFACT_REG, 0x3f);
808
809 iwl_legacy_write_prph(priv, ALM_SCD_SBYP_MODE_1_REG, 0x010000);
810 iwl_legacy_write_prph(priv, ALM_SCD_SBYP_MODE_2_REG, 0x030002);
811 iwl_legacy_write_prph(priv, ALM_SCD_TXF4MF_REG, 0x000004);
812 iwl_legacy_write_prph(priv, ALM_SCD_TXF5MF_REG, 0x000005);
813
814 iwl_legacy_write_direct32(priv, FH39_TSSR_CBB_BASE,
815 priv->_3945.shared_phys);
816
817 iwl_legacy_write_direct32(priv, FH39_TSSR_MSG_CONFIG,
818 FH39_TSSR_TX_MSG_CONFIG_REG_VAL_SNOOP_RD_TXPD_ON |
819 FH39_TSSR_TX_MSG_CONFIG_REG_VAL_ORDER_RD_TXPD_ON |
820 FH39_TSSR_TX_MSG_CONFIG_REG_VAL_MAX_FRAG_SIZE_128B |
821 FH39_TSSR_TX_MSG_CONFIG_REG_VAL_SNOOP_RD_TFD_ON |
822 FH39_TSSR_TX_MSG_CONFIG_REG_VAL_ORDER_RD_CBB_ON |
823 FH39_TSSR_TX_MSG_CONFIG_REG_VAL_ORDER_RSP_WAIT_TH |
824 FH39_TSSR_TX_MSG_CONFIG_REG_VAL_RSP_WAIT_TH);
825
826
827 return 0;
828}
829
830/**
831 * iwl3945_txq_ctx_reset - Reset TX queue context
832 *
833 * Destroys all DMA structures and initialize them again
834 */
835static int iwl3945_txq_ctx_reset(struct iwl_priv *priv)
836{
837 int rc;
838 int txq_id, slots_num;
839
840 iwl3945_hw_txq_ctx_free(priv);
841
842 /* allocate tx queue structure */
843 rc = iwl_legacy_alloc_txq_mem(priv);
844 if (rc)
845 return rc;
846
847 /* Tx CMD queue */
848 rc = iwl3945_tx_reset(priv);
849 if (rc)
850 goto error;
851
852 /* Tx queue(s) */
853 for (txq_id = 0; txq_id < priv->hw_params.max_txq_num; txq_id++) {
854 slots_num = (txq_id == IWL39_CMD_QUEUE_NUM) ?
855 TFD_CMD_SLOTS : TFD_TX_CMD_SLOTS;
856 rc = iwl_legacy_tx_queue_init(priv, &priv->txq[txq_id],
857 slots_num, txq_id);
858 if (rc) {
859 IWL_ERR(priv, "Tx %d queue init failed\n", txq_id);
860 goto error;
861 }
862 }
863
864 return rc;
865
866 error:
867 iwl3945_hw_txq_ctx_free(priv);
868 return rc;
869}
870
871
872/*
873 * Start up 3945's basic functionality after it has been reset
874 * (e.g. after platform boot, or shutdown via iwl_legacy_apm_stop())
875 * NOTE: This does not load uCode nor start the embedded processor
876 */
877static int iwl3945_apm_init(struct iwl_priv *priv)
878{
879 int ret = iwl_legacy_apm_init(priv);
880
881 /* Clear APMG (NIC's internal power management) interrupts */
882 iwl_legacy_write_prph(priv, APMG_RTC_INT_MSK_REG, 0x0);
883 iwl_legacy_write_prph(priv, APMG_RTC_INT_STT_REG, 0xFFFFFFFF);
884
885 /* Reset radio chip */
886 iwl_legacy_set_bits_prph(priv, APMG_PS_CTRL_REG,
887 APMG_PS_CTRL_VAL_RESET_REQ);
888 udelay(5);
889 iwl_legacy_clear_bits_prph(priv, APMG_PS_CTRL_REG,
890 APMG_PS_CTRL_VAL_RESET_REQ);
891
892 return ret;
893}
894
895static void iwl3945_nic_config(struct iwl_priv *priv)
896{
897 struct iwl3945_eeprom *eeprom = (struct iwl3945_eeprom *)priv->eeprom;
898 unsigned long flags;
899 u8 rev_id = priv->pci_dev->revision;
900
901 spin_lock_irqsave(&priv->lock, flags);
902
903 /* Determine HW type */
904 IWL_DEBUG_INFO(priv, "HW Revision ID = 0x%X\n", rev_id);
905
906 if (rev_id & PCI_CFG_REV_ID_BIT_RTP)
907 IWL_DEBUG_INFO(priv, "RTP type\n");
908 else if (rev_id & PCI_CFG_REV_ID_BIT_BASIC_SKU) {
909 IWL_DEBUG_INFO(priv, "3945 RADIO-MB type\n");
910 iwl_legacy_set_bit(priv, CSR_HW_IF_CONFIG_REG,
911 CSR39_HW_IF_CONFIG_REG_BIT_3945_MB);
912 } else {
913 IWL_DEBUG_INFO(priv, "3945 RADIO-MM type\n");
914 iwl_legacy_set_bit(priv, CSR_HW_IF_CONFIG_REG,
915 CSR39_HW_IF_CONFIG_REG_BIT_3945_MM);
916 }
917
918 if (EEPROM_SKU_CAP_OP_MODE_MRC == eeprom->sku_cap) {
919 IWL_DEBUG_INFO(priv, "SKU OP mode is mrc\n");
920 iwl_legacy_set_bit(priv, CSR_HW_IF_CONFIG_REG,
921 CSR39_HW_IF_CONFIG_REG_BIT_SKU_MRC);
922 } else
923 IWL_DEBUG_INFO(priv, "SKU OP mode is basic\n");
924
925 if ((eeprom->board_revision & 0xF0) == 0xD0) {
926 IWL_DEBUG_INFO(priv, "3945ABG revision is 0x%X\n",
927 eeprom->board_revision);
928 iwl_legacy_set_bit(priv, CSR_HW_IF_CONFIG_REG,
929 CSR39_HW_IF_CONFIG_REG_BIT_BOARD_TYPE);
930 } else {
931 IWL_DEBUG_INFO(priv, "3945ABG revision is 0x%X\n",
932 eeprom->board_revision);
933 iwl_legacy_clear_bit(priv, CSR_HW_IF_CONFIG_REG,
934 CSR39_HW_IF_CONFIG_REG_BIT_BOARD_TYPE);
935 }
936
937 if (eeprom->almgor_m_version <= 1) {
938 iwl_legacy_set_bit(priv, CSR_HW_IF_CONFIG_REG,
939 CSR39_HW_IF_CONFIG_REG_BITS_SILICON_TYPE_A);
940 IWL_DEBUG_INFO(priv, "Card M type A version is 0x%X\n",
941 eeprom->almgor_m_version);
942 } else {
943 IWL_DEBUG_INFO(priv, "Card M type B version is 0x%X\n",
944 eeprom->almgor_m_version);
945 iwl_legacy_set_bit(priv, CSR_HW_IF_CONFIG_REG,
946 CSR39_HW_IF_CONFIG_REG_BITS_SILICON_TYPE_B);
947 }
948 spin_unlock_irqrestore(&priv->lock, flags);
949
950 if (eeprom->sku_cap & EEPROM_SKU_CAP_SW_RF_KILL_ENABLE)
951 IWL_DEBUG_RF_KILL(priv, "SW RF KILL supported in EEPROM.\n");
952
953 if (eeprom->sku_cap & EEPROM_SKU_CAP_HW_RF_KILL_ENABLE)
954 IWL_DEBUG_RF_KILL(priv, "HW RF KILL supported in EEPROM.\n");
955}
956
957int iwl3945_hw_nic_init(struct iwl_priv *priv)
958{
959 int rc;
960 unsigned long flags;
961 struct iwl_rx_queue *rxq = &priv->rxq;
962
963 spin_lock_irqsave(&priv->lock, flags);
964 priv->cfg->ops->lib->apm_ops.init(priv);
965 spin_unlock_irqrestore(&priv->lock, flags);
966
967 iwl3945_set_pwr_vmain(priv);
968
969 priv->cfg->ops->lib->apm_ops.config(priv);
970
971 /* Allocate the RX queue, or reset if it is already allocated */
972 if (!rxq->bd) {
973 rc = iwl_legacy_rx_queue_alloc(priv);
974 if (rc) {
975 IWL_ERR(priv, "Unable to initialize Rx queue\n");
976 return -ENOMEM;
977 }
978 } else
979 iwl3945_rx_queue_reset(priv, rxq);
980
981 iwl3945_rx_replenish(priv);
982
983 iwl3945_rx_init(priv, rxq);
984
985
986 /* Look at using this instead:
987 rxq->need_update = 1;
988 iwl_legacy_rx_queue_update_write_ptr(priv, rxq);
989 */
990
991 iwl_legacy_write_direct32(priv, FH39_RCSR_WPTR(0), rxq->write & ~7);
992
993 rc = iwl3945_txq_ctx_reset(priv);
994 if (rc)
995 return rc;
996
997 set_bit(STATUS_INIT, &priv->status);
998
999 return 0;
1000}
1001
1002/**
1003 * iwl3945_hw_txq_ctx_free - Free TXQ Context
1004 *
1005 * Destroy all TX DMA queues and structures
1006 */
1007void iwl3945_hw_txq_ctx_free(struct iwl_priv *priv)
1008{
1009 int txq_id;
1010
1011 /* Tx queues */
1012 if (priv->txq)
1013 for (txq_id = 0; txq_id < priv->hw_params.max_txq_num;
1014 txq_id++)
1015 if (txq_id == IWL39_CMD_QUEUE_NUM)
1016 iwl_legacy_cmd_queue_free(priv);
1017 else
1018 iwl_legacy_tx_queue_free(priv, txq_id);
1019
1020 /* free tx queue structure */
1021 iwl_legacy_txq_mem(priv);
1022}
1023
1024void iwl3945_hw_txq_ctx_stop(struct iwl_priv *priv)
1025{
1026 int txq_id;
1027
1028 /* stop SCD */
1029 iwl_legacy_write_prph(priv, ALM_SCD_MODE_REG, 0);
1030 iwl_legacy_write_prph(priv, ALM_SCD_TXFACT_REG, 0);
1031
1032 /* reset TFD queues */
1033 for (txq_id = 0; txq_id < priv->hw_params.max_txq_num; txq_id++) {
1034 iwl_legacy_write_direct32(priv, FH39_TCSR_CONFIG(txq_id), 0x0);
1035 iwl_poll_direct_bit(priv, FH39_TSSR_TX_STATUS,
1036 FH39_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(txq_id),
1037 1000);
1038 }
1039
1040 iwl3945_hw_txq_ctx_free(priv);
1041}
1042
1043/**
1044 * iwl3945_hw_reg_adjust_power_by_temp
1045 * return index delta into power gain settings table
1046*/
1047static int iwl3945_hw_reg_adjust_power_by_temp(int new_reading, int old_reading)
1048{
1049 return (new_reading - old_reading) * (-11) / 100;
1050}
1051
1052/**
1053 * iwl3945_hw_reg_temp_out_of_range - Keep temperature in sane range
1054 */
1055static inline int iwl3945_hw_reg_temp_out_of_range(int temperature)
1056{
1057 return ((temperature < -260) || (temperature > 25)) ? 1 : 0;
1058}
1059
1060int iwl3945_hw_get_temperature(struct iwl_priv *priv)
1061{
1062 return iwl_read32(priv, CSR_UCODE_DRV_GP2);
1063}
1064
1065/**
1066 * iwl3945_hw_reg_txpower_get_temperature
1067 * get the current temperature by reading from NIC
1068*/
1069static int iwl3945_hw_reg_txpower_get_temperature(struct iwl_priv *priv)
1070{
1071 struct iwl3945_eeprom *eeprom = (struct iwl3945_eeprom *)priv->eeprom;
1072 int temperature;
1073
1074 temperature = iwl3945_hw_get_temperature(priv);
1075
1076 /* driver's okay range is -260 to +25.
1077 * human readable okay range is 0 to +285 */
1078 IWL_DEBUG_INFO(priv, "Temperature: %d\n", temperature + IWL_TEMP_CONVERT);
1079
1080 /* handle insane temp reading */
1081 if (iwl3945_hw_reg_temp_out_of_range(temperature)) {
1082 IWL_ERR(priv, "Error bad temperature value %d\n", temperature);
1083
1084 /* if really really hot(?),
1085 * substitute the 3rd band/group's temp measured at factory */
1086 if (priv->last_temperature > 100)
1087 temperature = eeprom->groups[2].temperature;
1088 else /* else use most recent "sane" value from driver */
1089 temperature = priv->last_temperature;
1090 }
1091
1092 return temperature; /* raw, not "human readable" */
1093}
1094
1095/* Adjust Txpower only if temperature variance is greater than threshold.
1096 *
1097 * Both are lower than older versions' 9 degrees */
1098#define IWL_TEMPERATURE_LIMIT_TIMER 6
1099
1100/**
1101 * iwl3945_is_temp_calib_needed - determines if new calibration is needed
1102 *
1103 * records new temperature in tx_mgr->temperature.
1104 * replaces tx_mgr->last_temperature *only* if calib needed
1105 * (assumes caller will actually do the calibration!). */
1106static int iwl3945_is_temp_calib_needed(struct iwl_priv *priv)
1107{
1108 int temp_diff;
1109
1110 priv->temperature = iwl3945_hw_reg_txpower_get_temperature(priv);
1111 temp_diff = priv->temperature - priv->last_temperature;
1112
1113 /* get absolute value */
1114 if (temp_diff < 0) {
1115 IWL_DEBUG_POWER(priv, "Getting cooler, delta %d,\n", temp_diff);
1116 temp_diff = -temp_diff;
1117 } else if (temp_diff == 0)
1118 IWL_DEBUG_POWER(priv, "Same temp,\n");
1119 else
1120 IWL_DEBUG_POWER(priv, "Getting warmer, delta %d,\n", temp_diff);
1121
1122 /* if we don't need calibration, *don't* update last_temperature */
1123 if (temp_diff < IWL_TEMPERATURE_LIMIT_TIMER) {
1124 IWL_DEBUG_POWER(priv, "Timed thermal calib not needed\n");
1125 return 0;
1126 }
1127
1128 IWL_DEBUG_POWER(priv, "Timed thermal calib needed\n");
1129
1130 /* assume that caller will actually do calib ...
1131 * update the "last temperature" value */
1132 priv->last_temperature = priv->temperature;
1133 return 1;
1134}
1135
1136#define IWL_MAX_GAIN_ENTRIES 78
1137#define IWL_CCK_FROM_OFDM_POWER_DIFF -5
1138#define IWL_CCK_FROM_OFDM_INDEX_DIFF (10)
1139
1140/* radio and DSP power table, each step is 1/2 dB.
1141 * 1st number is for RF analog gain, 2nd number is for DSP pre-DAC gain. */
1142static struct iwl3945_tx_power power_gain_table[2][IWL_MAX_GAIN_ENTRIES] = {
1143 {
1144 {251, 127}, /* 2.4 GHz, highest power */
1145 {251, 127},
1146 {251, 127},
1147 {251, 127},
1148 {251, 125},
1149 {251, 110},
1150 {251, 105},
1151 {251, 98},
1152 {187, 125},
1153 {187, 115},
1154 {187, 108},
1155 {187, 99},
1156 {243, 119},
1157 {243, 111},
1158 {243, 105},
1159 {243, 97},
1160 {243, 92},
1161 {211, 106},
1162 {211, 100},
1163 {179, 120},
1164 {179, 113},
1165 {179, 107},
1166 {147, 125},
1167 {147, 119},
1168 {147, 112},
1169 {147, 106},
1170 {147, 101},
1171 {147, 97},
1172 {147, 91},
1173 {115, 107},
1174 {235, 121},
1175 {235, 115},
1176 {235, 109},
1177 {203, 127},
1178 {203, 121},
1179 {203, 115},
1180 {203, 108},
1181 {203, 102},
1182 {203, 96},
1183 {203, 92},
1184 {171, 110},
1185 {171, 104},
1186 {171, 98},
1187 {139, 116},
1188 {227, 125},
1189 {227, 119},
1190 {227, 113},
1191 {227, 107},
1192 {227, 101},
1193 {227, 96},
1194 {195, 113},
1195 {195, 106},
1196 {195, 102},
1197 {195, 95},
1198 {163, 113},
1199 {163, 106},
1200 {163, 102},
1201 {163, 95},
1202 {131, 113},
1203 {131, 106},
1204 {131, 102},
1205 {131, 95},
1206 {99, 113},
1207 {99, 106},
1208 {99, 102},
1209 {99, 95},
1210 {67, 113},
1211 {67, 106},
1212 {67, 102},
1213 {67, 95},
1214 {35, 113},
1215 {35, 106},
1216 {35, 102},
1217 {35, 95},
1218 {3, 113},
1219 {3, 106},
1220 {3, 102},
1221 {3, 95} }, /* 2.4 GHz, lowest power */
1222 {
1223 {251, 127}, /* 5.x GHz, highest power */
1224 {251, 120},
1225 {251, 114},
1226 {219, 119},
1227 {219, 101},
1228 {187, 113},
1229 {187, 102},
1230 {155, 114},
1231 {155, 103},
1232 {123, 117},
1233 {123, 107},
1234 {123, 99},
1235 {123, 92},
1236 {91, 108},
1237 {59, 125},
1238 {59, 118},
1239 {59, 109},
1240 {59, 102},
1241 {59, 96},
1242 {59, 90},
1243 {27, 104},
1244 {27, 98},
1245 {27, 92},
1246 {115, 118},
1247 {115, 111},
1248 {115, 104},
1249 {83, 126},
1250 {83, 121},
1251 {83, 113},
1252 {83, 105},
1253 {83, 99},
1254 {51, 118},
1255 {51, 111},
1256 {51, 104},
1257 {51, 98},
1258 {19, 116},
1259 {19, 109},
1260 {19, 102},
1261 {19, 98},
1262 {19, 93},
1263 {171, 113},
1264 {171, 107},
1265 {171, 99},
1266 {139, 120},
1267 {139, 113},
1268 {139, 107},
1269 {139, 99},
1270 {107, 120},
1271 {107, 113},
1272 {107, 107},
1273 {107, 99},
1274 {75, 120},
1275 {75, 113},
1276 {75, 107},
1277 {75, 99},
1278 {43, 120},
1279 {43, 113},
1280 {43, 107},
1281 {43, 99},
1282 {11, 120},
1283 {11, 113},
1284 {11, 107},
1285 {11, 99},
1286 {131, 107},
1287 {131, 99},
1288 {99, 120},
1289 {99, 113},
1290 {99, 107},
1291 {99, 99},
1292 {67, 120},
1293 {67, 113},
1294 {67, 107},
1295 {67, 99},
1296 {35, 120},
1297 {35, 113},
1298 {35, 107},
1299 {35, 99},
1300 {3, 120} } /* 5.x GHz, lowest power */
1301};
1302
1303static inline u8 iwl3945_hw_reg_fix_power_index(int index)
1304{
1305 if (index < 0)
1306 return 0;
1307 if (index >= IWL_MAX_GAIN_ENTRIES)
1308 return IWL_MAX_GAIN_ENTRIES - 1;
1309 return (u8) index;
1310}
1311
1312/* Kick off thermal recalibration check every 60 seconds */
1313#define REG_RECALIB_PERIOD (60)
1314
1315/**
1316 * iwl3945_hw_reg_set_scan_power - Set Tx power for scan probe requests
1317 *
1318 * Set (in our channel info database) the direct scan Tx power for 1 Mbit (CCK)
1319 * or 6 Mbit (OFDM) rates.
1320 */
1321static void iwl3945_hw_reg_set_scan_power(struct iwl_priv *priv, u32 scan_tbl_index,
1322 s32 rate_index, const s8 *clip_pwrs,
1323 struct iwl_channel_info *ch_info,
1324 int band_index)
1325{
1326 struct iwl3945_scan_power_info *scan_power_info;
1327 s8 power;
1328 u8 power_index;
1329
1330 scan_power_info = &ch_info->scan_pwr_info[scan_tbl_index];
1331
1332 /* use this channel group's 6Mbit clipping/saturation pwr,
1333 * but cap at regulatory scan power restriction (set during init
1334 * based on eeprom channel data) for this channel. */
1335 power = min(ch_info->scan_power, clip_pwrs[IWL_RATE_6M_INDEX_TABLE]);
1336
1337 power = min(power, priv->tx_power_user_lmt);
1338 scan_power_info->requested_power = power;
1339
1340 /* find difference between new scan *power* and current "normal"
1341 * Tx *power* for 6Mb. Use this difference (x2) to adjust the
1342 * current "normal" temperature-compensated Tx power *index* for
1343 * this rate (1Mb or 6Mb) to yield new temp-compensated scan power
1344 * *index*. */
1345 power_index = ch_info->power_info[rate_index].power_table_index
1346 - (power - ch_info->power_info
1347 [IWL_RATE_6M_INDEX_TABLE].requested_power) * 2;
1348
1349 /* store reference index that we use when adjusting *all* scan
1350 * powers. So we can accommodate user (all channel) or spectrum
1351 * management (single channel) power changes "between" temperature
1352 * feedback compensation procedures.
1353 * don't force fit this reference index into gain table; it may be a
1354 * negative number. This will help avoid errors when we're at
1355 * the lower bounds (highest gains, for warmest temperatures)
1356 * of the table. */
1357
1358 /* don't exceed table bounds for "real" setting */
1359 power_index = iwl3945_hw_reg_fix_power_index(power_index);
1360
1361 scan_power_info->power_table_index = power_index;
1362 scan_power_info->tpc.tx_gain =
1363 power_gain_table[band_index][power_index].tx_gain;
1364 scan_power_info->tpc.dsp_atten =
1365 power_gain_table[band_index][power_index].dsp_atten;
1366}
1367
1368/**
1369 * iwl3945_send_tx_power - fill in Tx Power command with gain settings
1370 *
1371 * Configures power settings for all rates for the current channel,
1372 * using values from channel info struct, and send to NIC
1373 */
1374static int iwl3945_send_tx_power(struct iwl_priv *priv)
1375{
1376 int rate_idx, i;
1377 const struct iwl_channel_info *ch_info = NULL;
1378 struct iwl3945_txpowertable_cmd txpower = {
1379 .channel = priv->contexts[IWL_RXON_CTX_BSS].active.channel,
1380 };
1381 u16 chan;
1382
1383 if (WARN_ONCE(test_bit(STATUS_SCAN_HW, &priv->status),
1384 "TX Power requested while scanning!\n"))
1385 return -EAGAIN;
1386
1387 chan = le16_to_cpu(priv->contexts[IWL_RXON_CTX_BSS].active.channel);
1388
1389 txpower.band = (priv->band == IEEE80211_BAND_5GHZ) ? 0 : 1;
1390 ch_info = iwl_legacy_get_channel_info(priv, priv->band, chan);
1391 if (!ch_info) {
1392 IWL_ERR(priv,
1393 "Failed to get channel info for channel %d [%d]\n",
1394 chan, priv->band);
1395 return -EINVAL;
1396 }
1397
1398 if (!iwl_legacy_is_channel_valid(ch_info)) {
1399 IWL_DEBUG_POWER(priv, "Not calling TX_PWR_TABLE_CMD on "
1400 "non-Tx channel.\n");
1401 return 0;
1402 }
1403
1404 /* fill cmd with power settings for all rates for current channel */
1405 /* Fill OFDM rate */
1406 for (rate_idx = IWL_FIRST_OFDM_RATE, i = 0;
1407 rate_idx <= IWL39_LAST_OFDM_RATE; rate_idx++, i++) {
1408
1409 txpower.power[i].tpc = ch_info->power_info[i].tpc;
1410 txpower.power[i].rate = iwl3945_rates[rate_idx].plcp;
1411
1412 IWL_DEBUG_POWER(priv, "ch %d:%d rf %d dsp %3d rate code 0x%02x\n",
1413 le16_to_cpu(txpower.channel),
1414 txpower.band,
1415 txpower.power[i].tpc.tx_gain,
1416 txpower.power[i].tpc.dsp_atten,
1417 txpower.power[i].rate);
1418 }
1419 /* Fill CCK rates */
1420 for (rate_idx = IWL_FIRST_CCK_RATE;
1421 rate_idx <= IWL_LAST_CCK_RATE; rate_idx++, i++) {
1422 txpower.power[i].tpc = ch_info->power_info[i].tpc;
1423 txpower.power[i].rate = iwl3945_rates[rate_idx].plcp;
1424
1425 IWL_DEBUG_POWER(priv, "ch %d:%d rf %d dsp %3d rate code 0x%02x\n",
1426 le16_to_cpu(txpower.channel),
1427 txpower.band,
1428 txpower.power[i].tpc.tx_gain,
1429 txpower.power[i].tpc.dsp_atten,
1430 txpower.power[i].rate);
1431 }
1432
1433 return iwl_legacy_send_cmd_pdu(priv, REPLY_TX_PWR_TABLE_CMD,
1434 sizeof(struct iwl3945_txpowertable_cmd),
1435 &txpower);
1436
1437}
1438
1439/**
1440 * iwl3945_hw_reg_set_new_power - Configures power tables at new levels
1441 * @ch_info: Channel to update. Uses power_info.requested_power.
1442 *
1443 * Replace requested_power and base_power_index ch_info fields for
1444 * one channel.
1445 *
1446 * Called if user or spectrum management changes power preferences.
1447 * Takes into account h/w and modulation limitations (clip power).
1448 *
1449 * This does *not* send anything to NIC, just sets up ch_info for one channel.
1450 *
1451 * NOTE: reg_compensate_for_temperature_dif() *must* be run after this to
1452 * properly fill out the scan powers, and actual h/w gain settings,
1453 * and send changes to NIC
1454 */
1455static int iwl3945_hw_reg_set_new_power(struct iwl_priv *priv,
1456 struct iwl_channel_info *ch_info)
1457{
1458 struct iwl3945_channel_power_info *power_info;
1459 int power_changed = 0;
1460 int i;
1461 const s8 *clip_pwrs;
1462 int power;
1463
1464 /* Get this chnlgrp's rate-to-max/clip-powers table */
1465 clip_pwrs = priv->_3945.clip_groups[ch_info->group_index].clip_powers;
1466
1467 /* Get this channel's rate-to-current-power settings table */
1468 power_info = ch_info->power_info;
1469
1470 /* update OFDM Txpower settings */
1471 for (i = IWL_RATE_6M_INDEX_TABLE; i <= IWL_RATE_54M_INDEX_TABLE;
1472 i++, ++power_info) {
1473 int delta_idx;
1474
1475 /* limit new power to be no more than h/w capability */
1476 power = min(ch_info->curr_txpow, clip_pwrs[i]);
1477 if (power == power_info->requested_power)
1478 continue;
1479
1480 /* find difference between old and new requested powers,
1481 * update base (non-temp-compensated) power index */
1482 delta_idx = (power - power_info->requested_power) * 2;
1483 power_info->base_power_index -= delta_idx;
1484
1485 /* save new requested power value */
1486 power_info->requested_power = power;
1487
1488 power_changed = 1;
1489 }
1490
1491 /* update CCK Txpower settings, based on OFDM 12M setting ...
1492 * ... all CCK power settings for a given channel are the *same*. */
1493 if (power_changed) {
1494 power =
1495 ch_info->power_info[IWL_RATE_12M_INDEX_TABLE].
1496 requested_power + IWL_CCK_FROM_OFDM_POWER_DIFF;
1497
1498 /* do all CCK rates' iwl3945_channel_power_info structures */
1499 for (i = IWL_RATE_1M_INDEX_TABLE; i <= IWL_RATE_11M_INDEX_TABLE; i++) {
1500 power_info->requested_power = power;
1501 power_info->base_power_index =
1502 ch_info->power_info[IWL_RATE_12M_INDEX_TABLE].
1503 base_power_index + IWL_CCK_FROM_OFDM_INDEX_DIFF;
1504 ++power_info;
1505 }
1506 }
1507
1508 return 0;
1509}
1510
1511/**
1512 * iwl3945_hw_reg_get_ch_txpower_limit - returns new power limit for channel
1513 *
1514 * NOTE: Returned power limit may be less (but not more) than requested,
1515 * based strictly on regulatory (eeprom and spectrum mgt) limitations
1516 * (no consideration for h/w clipping limitations).
1517 */
1518static int iwl3945_hw_reg_get_ch_txpower_limit(struct iwl_channel_info *ch_info)
1519{
1520 s8 max_power;
1521
1522#if 0
1523 /* if we're using TGd limits, use lower of TGd or EEPROM */
1524 if (ch_info->tgd_data.max_power != 0)
1525 max_power = min(ch_info->tgd_data.max_power,
1526 ch_info->eeprom.max_power_avg);
1527
1528 /* else just use EEPROM limits */
1529 else
1530#endif
1531 max_power = ch_info->eeprom.max_power_avg;
1532
1533 return min(max_power, ch_info->max_power_avg);
1534}
1535
1536/**
1537 * iwl3945_hw_reg_comp_txpower_temp - Compensate for temperature
1538 *
1539 * Compensate txpower settings of *all* channels for temperature.
1540 * This only accounts for the difference between current temperature
1541 * and the factory calibration temperatures, and bases the new settings
1542 * on the channel's base_power_index.
1543 *
1544 * If RxOn is "associated", this sends the new Txpower to NIC!
1545 */
1546static int iwl3945_hw_reg_comp_txpower_temp(struct iwl_priv *priv)
1547{
1548 struct iwl_channel_info *ch_info = NULL;
1549 struct iwl3945_eeprom *eeprom = (struct iwl3945_eeprom *)priv->eeprom;
1550 int delta_index;
1551 const s8 *clip_pwrs; /* array of h/w max power levels for each rate */
1552 u8 a_band;
1553 u8 rate_index;
1554 u8 scan_tbl_index;
1555 u8 i;
1556 int ref_temp;
1557 int temperature = priv->temperature;
1558
1559 if (priv->disable_tx_power_cal ||
1560 test_bit(STATUS_SCANNING, &priv->status)) {
1561 /* do not perform tx power calibration */
1562 return 0;
1563 }
1564 /* set up new Tx power info for each and every channel, 2.4 and 5.x */
1565 for (i = 0; i < priv->channel_count; i++) {
1566 ch_info = &priv->channel_info[i];
1567 a_band = iwl_legacy_is_channel_a_band(ch_info);
1568
1569 /* Get this chnlgrp's factory calibration temperature */
1570 ref_temp = (s16)eeprom->groups[ch_info->group_index].
1571 temperature;
1572
1573 /* get power index adjustment based on current and factory
1574 * temps */
1575 delta_index = iwl3945_hw_reg_adjust_power_by_temp(temperature,
1576 ref_temp);
1577
1578 /* set tx power value for all rates, OFDM and CCK */
1579 for (rate_index = 0; rate_index < IWL_RATE_COUNT_3945;
1580 rate_index++) {
1581 int power_idx =
1582 ch_info->power_info[rate_index].base_power_index;
1583
1584 /* temperature compensate */
1585 power_idx += delta_index;
1586
1587 /* stay within table range */
1588 power_idx = iwl3945_hw_reg_fix_power_index(power_idx);
1589 ch_info->power_info[rate_index].
1590 power_table_index = (u8) power_idx;
1591 ch_info->power_info[rate_index].tpc =
1592 power_gain_table[a_band][power_idx];
1593 }
1594
1595 /* Get this chnlgrp's rate-to-max/clip-powers table */
1596 clip_pwrs = priv->_3945.clip_groups[ch_info->group_index].clip_powers;
1597
1598 /* set scan tx power, 1Mbit for CCK, 6Mbit for OFDM */
1599 for (scan_tbl_index = 0;
1600 scan_tbl_index < IWL_NUM_SCAN_RATES; scan_tbl_index++) {
1601 s32 actual_index = (scan_tbl_index == 0) ?
1602 IWL_RATE_1M_INDEX_TABLE : IWL_RATE_6M_INDEX_TABLE;
1603 iwl3945_hw_reg_set_scan_power(priv, scan_tbl_index,
1604 actual_index, clip_pwrs,
1605 ch_info, a_band);
1606 }
1607 }
1608
1609 /* send Txpower command for current channel to ucode */
1610 return priv->cfg->ops->lib->send_tx_power(priv);
1611}
1612
1613int iwl3945_hw_reg_set_txpower(struct iwl_priv *priv, s8 power)
1614{
1615 struct iwl_channel_info *ch_info;
1616 s8 max_power;
1617 u8 a_band;
1618 u8 i;
1619
1620 if (priv->tx_power_user_lmt == power) {
1621 IWL_DEBUG_POWER(priv, "Requested Tx power same as current "
1622 "limit: %ddBm.\n", power);
1623 return 0;
1624 }
1625
1626 IWL_DEBUG_POWER(priv, "Setting upper limit clamp to %ddBm.\n", power);
1627 priv->tx_power_user_lmt = power;
1628
1629 /* set up new Tx powers for each and every channel, 2.4 and 5.x */
1630
1631 for (i = 0; i < priv->channel_count; i++) {
1632 ch_info = &priv->channel_info[i];
1633 a_band = iwl_legacy_is_channel_a_band(ch_info);
1634
1635 /* find minimum power of all user and regulatory constraints
1636 * (does not consider h/w clipping limitations) */
1637 max_power = iwl3945_hw_reg_get_ch_txpower_limit(ch_info);
1638 max_power = min(power, max_power);
1639 if (max_power != ch_info->curr_txpow) {
1640 ch_info->curr_txpow = max_power;
1641
1642 /* this considers the h/w clipping limitations */
1643 iwl3945_hw_reg_set_new_power(priv, ch_info);
1644 }
1645 }
1646
1647 /* update txpower settings for all channels,
1648 * send to NIC if associated. */
1649 iwl3945_is_temp_calib_needed(priv);
1650 iwl3945_hw_reg_comp_txpower_temp(priv);
1651
1652 return 0;
1653}
1654
1655static int iwl3945_send_rxon_assoc(struct iwl_priv *priv,
1656 struct iwl_rxon_context *ctx)
1657{
1658 int rc = 0;
1659 struct iwl_rx_packet *pkt;
1660 struct iwl3945_rxon_assoc_cmd rxon_assoc;
1661 struct iwl_host_cmd cmd = {
1662 .id = REPLY_RXON_ASSOC,
1663 .len = sizeof(rxon_assoc),
1664 .flags = CMD_WANT_SKB,
1665 .data = &rxon_assoc,
1666 };
1667 const struct iwl_legacy_rxon_cmd *rxon1 = &ctx->staging;
1668 const struct iwl_legacy_rxon_cmd *rxon2 = &ctx->active;
1669
1670 if ((rxon1->flags == rxon2->flags) &&
1671 (rxon1->filter_flags == rxon2->filter_flags) &&
1672 (rxon1->cck_basic_rates == rxon2->cck_basic_rates) &&
1673 (rxon1->ofdm_basic_rates == rxon2->ofdm_basic_rates)) {
1674 IWL_DEBUG_INFO(priv, "Using current RXON_ASSOC. Not resending.\n");
1675 return 0;
1676 }
1677
1678 rxon_assoc.flags = ctx->staging.flags;
1679 rxon_assoc.filter_flags = ctx->staging.filter_flags;
1680 rxon_assoc.ofdm_basic_rates = ctx->staging.ofdm_basic_rates;
1681 rxon_assoc.cck_basic_rates = ctx->staging.cck_basic_rates;
1682 rxon_assoc.reserved = 0;
1683
1684 rc = iwl_legacy_send_cmd_sync(priv, &cmd);
1685 if (rc)
1686 return rc;
1687
1688 pkt = (struct iwl_rx_packet *)cmd.reply_page;
1689 if (pkt->hdr.flags & IWL_CMD_FAILED_MSK) {
1690 IWL_ERR(priv, "Bad return from REPLY_RXON_ASSOC command\n");
1691 rc = -EIO;
1692 }
1693
1694 iwl_legacy_free_pages(priv, cmd.reply_page);
1695
1696 return rc;
1697}
1698
1699/**
1700 * iwl3945_commit_rxon - commit staging_rxon to hardware
1701 *
1702 * The RXON command in staging_rxon is committed to the hardware and
1703 * the active_rxon structure is updated with the new data. This
1704 * function correctly transitions out of the RXON_ASSOC_MSK state if
1705 * a HW tune is required based on the RXON structure changes.
1706 */
1707int iwl3945_commit_rxon(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
1708{
1709 /* cast away the const for active_rxon in this function */
1710 struct iwl3945_rxon_cmd *active_rxon = (void *)&ctx->active;
1711 struct iwl3945_rxon_cmd *staging_rxon = (void *)&ctx->staging;
1712 int rc = 0;
1713 bool new_assoc = !!(staging_rxon->filter_flags & RXON_FILTER_ASSOC_MSK);
1714
1715 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
1716 return -EINVAL;
1717
1718 if (!iwl_legacy_is_alive(priv))
1719 return -1;
1720
1721 /* always get timestamp with Rx frame */
1722 staging_rxon->flags |= RXON_FLG_TSF2HOST_MSK;
1723
1724 /* select antenna */
1725 staging_rxon->flags &=
1726 ~(RXON_FLG_DIS_DIV_MSK | RXON_FLG_ANT_SEL_MSK);
1727 staging_rxon->flags |= iwl3945_get_antenna_flags(priv);
1728
1729 rc = iwl_legacy_check_rxon_cmd(priv, ctx);
1730 if (rc) {
1731 IWL_ERR(priv, "Invalid RXON configuration. Not committing.\n");
1732 return -EINVAL;
1733 }
1734
1735 /* If we don't need to send a full RXON, we can use
1736 * iwl3945_rxon_assoc_cmd which is used to reconfigure filter
1737 * and other flags for the current radio configuration. */
1738 if (!iwl_legacy_full_rxon_required(priv,
1739 &priv->contexts[IWL_RXON_CTX_BSS])) {
1740 rc = iwl_legacy_send_rxon_assoc(priv,
1741 &priv->contexts[IWL_RXON_CTX_BSS]);
1742 if (rc) {
1743 IWL_ERR(priv, "Error setting RXON_ASSOC "
1744 "configuration (%d).\n", rc);
1745 return rc;
1746 }
1747
1748 memcpy(active_rxon, staging_rxon, sizeof(*active_rxon));
1749 /*
1750 * We do not commit tx power settings while channel changing,
1751 * do it now if tx power changed.
1752 */
1753 iwl_legacy_set_tx_power(priv, priv->tx_power_next, false);
1754 return 0;
1755 }
1756
1757 /* If we are currently associated and the new config requires
1758 * an RXON_ASSOC and the new config wants the associated mask enabled,
1759 * we must clear the associated from the active configuration
1760 * before we apply the new config */
1761 if (iwl_legacy_is_associated(priv, IWL_RXON_CTX_BSS) && new_assoc) {
1762 IWL_DEBUG_INFO(priv, "Toggling associated bit on current RXON\n");
1763 active_rxon->filter_flags &= ~RXON_FILTER_ASSOC_MSK;
1764
1765 /*
1766 * reserved4 and 5 could have been filled by the iwlcore code.
1767 * Let's clear them before pushing to the 3945.
1768 */
1769 active_rxon->reserved4 = 0;
1770 active_rxon->reserved5 = 0;
1771 rc = iwl_legacy_send_cmd_pdu(priv, REPLY_RXON,
1772 sizeof(struct iwl3945_rxon_cmd),
1773 &priv->contexts[IWL_RXON_CTX_BSS].active);
1774
1775 /* If the mask clearing failed then we set
1776 * active_rxon back to what it was previously */
1777 if (rc) {
1778 active_rxon->filter_flags |= RXON_FILTER_ASSOC_MSK;
1779 IWL_ERR(priv, "Error clearing ASSOC_MSK on current "
1780 "configuration (%d).\n", rc);
1781 return rc;
1782 }
1783 iwl_legacy_clear_ucode_stations(priv,
1784 &priv->contexts[IWL_RXON_CTX_BSS]);
1785 iwl_legacy_restore_stations(priv,
1786 &priv->contexts[IWL_RXON_CTX_BSS]);
1787 }
1788
1789 IWL_DEBUG_INFO(priv, "Sending RXON\n"
1790 "* with%s RXON_FILTER_ASSOC_MSK\n"
1791 "* channel = %d\n"
1792 "* bssid = %pM\n",
1793 (new_assoc ? "" : "out"),
1794 le16_to_cpu(staging_rxon->channel),
1795 staging_rxon->bssid_addr);
1796
1797 /*
1798 * reserved4 and 5 could have been filled by the iwlcore code.
1799 * Let's clear them before pushing to the 3945.
1800 */
1801 staging_rxon->reserved4 = 0;
1802 staging_rxon->reserved5 = 0;
1803
1804 iwl_legacy_set_rxon_hwcrypto(priv, ctx, !iwl3945_mod_params.sw_crypto);
1805
1806 /* Apply the new configuration */
1807 rc = iwl_legacy_send_cmd_pdu(priv, REPLY_RXON,
1808 sizeof(struct iwl3945_rxon_cmd),
1809 staging_rxon);
1810 if (rc) {
1811 IWL_ERR(priv, "Error setting new configuration (%d).\n", rc);
1812 return rc;
1813 }
1814
1815 memcpy(active_rxon, staging_rxon, sizeof(*active_rxon));
1816
1817 if (!new_assoc) {
1818 iwl_legacy_clear_ucode_stations(priv,
1819 &priv->contexts[IWL_RXON_CTX_BSS]);
1820 iwl_legacy_restore_stations(priv,
1821 &priv->contexts[IWL_RXON_CTX_BSS]);
1822 }
1823
1824 /* If we issue a new RXON command which required a tune then we must
1825 * send a new TXPOWER command or we won't be able to Tx any frames */
1826 rc = iwl_legacy_set_tx_power(priv, priv->tx_power_next, true);
1827 if (rc) {
1828 IWL_ERR(priv, "Error setting Tx power (%d).\n", rc);
1829 return rc;
1830 }
1831
1832 /* Init the hardware's rate fallback order based on the band */
1833 rc = iwl3945_init_hw_rate_table(priv);
1834 if (rc) {
1835 IWL_ERR(priv, "Error setting HW rate table: %02X\n", rc);
1836 return -EIO;
1837 }
1838
1839 return 0;
1840}
1841
1842/**
1843 * iwl3945_reg_txpower_periodic - called when time to check our temperature.
1844 *
1845 * -- reset periodic timer
1846 * -- see if temp has changed enough to warrant re-calibration ... if so:
1847 * -- correct coeffs for temp (can reset temp timer)
1848 * -- save this temp as "last",
1849 * -- send new set of gain settings to NIC
1850 * NOTE: This should continue working, even when we're not associated,
1851 * so we can keep our internal table of scan powers current. */
1852void iwl3945_reg_txpower_periodic(struct iwl_priv *priv)
1853{
1854 /* This will kick in the "brute force"
1855 * iwl3945_hw_reg_comp_txpower_temp() below */
1856 if (!iwl3945_is_temp_calib_needed(priv))
1857 goto reschedule;
1858
1859 /* Set up a new set of temp-adjusted TxPowers, send to NIC.
1860 * This is based *only* on current temperature,
1861 * ignoring any previous power measurements */
1862 iwl3945_hw_reg_comp_txpower_temp(priv);
1863
1864 reschedule:
1865 queue_delayed_work(priv->workqueue,
1866 &priv->_3945.thermal_periodic, REG_RECALIB_PERIOD * HZ);
1867}
1868
1869static void iwl3945_bg_reg_txpower_periodic(struct work_struct *work)
1870{
1871 struct iwl_priv *priv = container_of(work, struct iwl_priv,
1872 _3945.thermal_periodic.work);
1873
1874 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
1875 return;
1876
1877 mutex_lock(&priv->mutex);
1878 iwl3945_reg_txpower_periodic(priv);
1879 mutex_unlock(&priv->mutex);
1880}
1881
1882/**
1883 * iwl3945_hw_reg_get_ch_grp_index - find the channel-group index (0-4)
1884 * for the channel.
1885 *
1886 * This function is used when initializing channel-info structs.
1887 *
1888 * NOTE: These channel groups do *NOT* match the bands above!
1889 * These channel groups are based on factory-tested channels;
1890 * on A-band, EEPROM's "group frequency" entries represent the top
1891 * channel in each group 1-4. Group 5 All B/G channels are in group 0.
1892 */
1893static u16 iwl3945_hw_reg_get_ch_grp_index(struct iwl_priv *priv,
1894 const struct iwl_channel_info *ch_info)
1895{
1896 struct iwl3945_eeprom *eeprom = (struct iwl3945_eeprom *)priv->eeprom;
1897 struct iwl3945_eeprom_txpower_group *ch_grp = &eeprom->groups[0];
1898 u8 group;
1899 u16 group_index = 0; /* based on factory calib frequencies */
1900 u8 grp_channel;
1901
1902 /* Find the group index for the channel ... don't use index 1(?) */
1903 if (iwl_legacy_is_channel_a_band(ch_info)) {
1904 for (group = 1; group < 5; group++) {
1905 grp_channel = ch_grp[group].group_channel;
1906 if (ch_info->channel <= grp_channel) {
1907 group_index = group;
1908 break;
1909 }
1910 }
1911 /* group 4 has a few channels *above* its factory cal freq */
1912 if (group == 5)
1913 group_index = 4;
1914 } else
1915 group_index = 0; /* 2.4 GHz, group 0 */
1916
1917 IWL_DEBUG_POWER(priv, "Chnl %d mapped to grp %d\n", ch_info->channel,
1918 group_index);
1919 return group_index;
1920}
1921
1922/**
1923 * iwl3945_hw_reg_get_matched_power_index - Interpolate to get nominal index
1924 *
1925 * Interpolate to get nominal (i.e. at factory calibration temperature) index
1926 * into radio/DSP gain settings table for requested power.
1927 */
1928static int iwl3945_hw_reg_get_matched_power_index(struct iwl_priv *priv,
1929 s8 requested_power,
1930 s32 setting_index, s32 *new_index)
1931{
1932 const struct iwl3945_eeprom_txpower_group *chnl_grp = NULL;
1933 struct iwl3945_eeprom *eeprom = (struct iwl3945_eeprom *)priv->eeprom;
1934 s32 index0, index1;
1935 s32 power = 2 * requested_power;
1936 s32 i;
1937 const struct iwl3945_eeprom_txpower_sample *samples;
1938 s32 gains0, gains1;
1939 s32 res;
1940 s32 denominator;
1941
1942 chnl_grp = &eeprom->groups[setting_index];
1943 samples = chnl_grp->samples;
1944 for (i = 0; i < 5; i++) {
1945 if (power == samples[i].power) {
1946 *new_index = samples[i].gain_index;
1947 return 0;
1948 }
1949 }
1950
1951 if (power > samples[1].power) {
1952 index0 = 0;
1953 index1 = 1;
1954 } else if (power > samples[2].power) {
1955 index0 = 1;
1956 index1 = 2;
1957 } else if (power > samples[3].power) {
1958 index0 = 2;
1959 index1 = 3;
1960 } else {
1961 index0 = 3;
1962 index1 = 4;
1963 }
1964
1965 denominator = (s32) samples[index1].power - (s32) samples[index0].power;
1966 if (denominator == 0)
1967 return -EINVAL;
1968 gains0 = (s32) samples[index0].gain_index * (1 << 19);
1969 gains1 = (s32) samples[index1].gain_index * (1 << 19);
1970 res = gains0 + (gains1 - gains0) *
1971 ((s32) power - (s32) samples[index0].power) / denominator +
1972 (1 << 18);
1973 *new_index = res >> 19;
1974 return 0;
1975}
1976
1977static void iwl3945_hw_reg_init_channel_groups(struct iwl_priv *priv)
1978{
1979 u32 i;
1980 s32 rate_index;
1981 struct iwl3945_eeprom *eeprom = (struct iwl3945_eeprom *)priv->eeprom;
1982 const struct iwl3945_eeprom_txpower_group *group;
1983
1984 IWL_DEBUG_POWER(priv, "Initializing factory calib info from EEPROM\n");
1985
1986 for (i = 0; i < IWL_NUM_TX_CALIB_GROUPS; i++) {
1987 s8 *clip_pwrs; /* table of power levels for each rate */
1988 s8 satur_pwr; /* saturation power for each chnl group */
1989 group = &eeprom->groups[i];
1990
1991 /* sanity check on factory saturation power value */
1992 if (group->saturation_power < 40) {
1993 IWL_WARN(priv, "Error: saturation power is %d, "
1994 "less than minimum expected 40\n",
1995 group->saturation_power);
1996 return;
1997 }
1998
1999 /*
2000 * Derive requested power levels for each rate, based on
2001 * hardware capabilities (saturation power for band).
2002 * Basic value is 3dB down from saturation, with further
2003 * power reductions for highest 3 data rates. These
2004 * backoffs provide headroom for high rate modulation
2005 * power peaks, without too much distortion (clipping).
2006 */
2007 /* we'll fill in this array with h/w max power levels */
2008 clip_pwrs = (s8 *) priv->_3945.clip_groups[i].clip_powers;
2009
2010 /* divide factory saturation power by 2 to find -3dB level */
2011 satur_pwr = (s8) (group->saturation_power >> 1);
2012
2013 /* fill in channel group's nominal powers for each rate */
2014 for (rate_index = 0;
2015 rate_index < IWL_RATE_COUNT_3945; rate_index++, clip_pwrs++) {
2016 switch (rate_index) {
2017 case IWL_RATE_36M_INDEX_TABLE:
2018 if (i == 0) /* B/G */
2019 *clip_pwrs = satur_pwr;
2020 else /* A */
2021 *clip_pwrs = satur_pwr - 5;
2022 break;
2023 case IWL_RATE_48M_INDEX_TABLE:
2024 if (i == 0)
2025 *clip_pwrs = satur_pwr - 7;
2026 else
2027 *clip_pwrs = satur_pwr - 10;
2028 break;
2029 case IWL_RATE_54M_INDEX_TABLE:
2030 if (i == 0)
2031 *clip_pwrs = satur_pwr - 9;
2032 else
2033 *clip_pwrs = satur_pwr - 12;
2034 break;
2035 default:
2036 *clip_pwrs = satur_pwr;
2037 break;
2038 }
2039 }
2040 }
2041}
2042
2043/**
2044 * iwl3945_txpower_set_from_eeprom - Set channel power info based on EEPROM
2045 *
2046 * Second pass (during init) to set up priv->channel_info
2047 *
2048 * Set up Tx-power settings in our channel info database for each VALID
2049 * (for this geo/SKU) channel, at all Tx data rates, based on eeprom values
2050 * and current temperature.
2051 *
2052 * Since this is based on current temperature (at init time), these values may
2053 * not be valid for very long, but it gives us a starting/default point,
2054 * and allows us to active (i.e. using Tx) scan.
2055 *
2056 * This does *not* write values to NIC, just sets up our internal table.
2057 */
2058int iwl3945_txpower_set_from_eeprom(struct iwl_priv *priv)
2059{
2060 struct iwl_channel_info *ch_info = NULL;
2061 struct iwl3945_channel_power_info *pwr_info;
2062 struct iwl3945_eeprom *eeprom = (struct iwl3945_eeprom *)priv->eeprom;
2063 int delta_index;
2064 u8 rate_index;
2065 u8 scan_tbl_index;
2066 const s8 *clip_pwrs; /* array of power levels for each rate */
2067 u8 gain, dsp_atten;
2068 s8 power;
2069 u8 pwr_index, base_pwr_index, a_band;
2070 u8 i;
2071 int temperature;
2072
2073 /* save temperature reference,
2074 * so we can determine next time to calibrate */
2075 temperature = iwl3945_hw_reg_txpower_get_temperature(priv);
2076 priv->last_temperature = temperature;
2077
2078 iwl3945_hw_reg_init_channel_groups(priv);
2079
2080 /* initialize Tx power info for each and every channel, 2.4 and 5.x */
2081 for (i = 0, ch_info = priv->channel_info; i < priv->channel_count;
2082 i++, ch_info++) {
2083 a_band = iwl_legacy_is_channel_a_band(ch_info);
2084 if (!iwl_legacy_is_channel_valid(ch_info))
2085 continue;
2086
2087 /* find this channel's channel group (*not* "band") index */
2088 ch_info->group_index =
2089 iwl3945_hw_reg_get_ch_grp_index(priv, ch_info);
2090
2091 /* Get this chnlgrp's rate->max/clip-powers table */
2092 clip_pwrs = priv->_3945.clip_groups[ch_info->group_index].clip_powers;
2093
2094 /* calculate power index *adjustment* value according to
2095 * diff between current temperature and factory temperature */
2096 delta_index = iwl3945_hw_reg_adjust_power_by_temp(temperature,
2097 eeprom->groups[ch_info->group_index].
2098 temperature);
2099
2100 IWL_DEBUG_POWER(priv, "Delta index for channel %d: %d [%d]\n",
2101 ch_info->channel, delta_index, temperature +
2102 IWL_TEMP_CONVERT);
2103
2104 /* set tx power value for all OFDM rates */
2105 for (rate_index = 0; rate_index < IWL_OFDM_RATES;
2106 rate_index++) {
2107 s32 uninitialized_var(power_idx);
2108 int rc;
2109
2110 /* use channel group's clip-power table,
2111 * but don't exceed channel's max power */
2112 s8 pwr = min(ch_info->max_power_avg,
2113 clip_pwrs[rate_index]);
2114
2115 pwr_info = &ch_info->power_info[rate_index];
2116
2117 /* get base (i.e. at factory-measured temperature)
2118 * power table index for this rate's power */
2119 rc = iwl3945_hw_reg_get_matched_power_index(priv, pwr,
2120 ch_info->group_index,
2121 &power_idx);
2122 if (rc) {
2123 IWL_ERR(priv, "Invalid power index\n");
2124 return rc;
2125 }
2126 pwr_info->base_power_index = (u8) power_idx;
2127
2128 /* temperature compensate */
2129 power_idx += delta_index;
2130
2131 /* stay within range of gain table */
2132 power_idx = iwl3945_hw_reg_fix_power_index(power_idx);
2133
2134 /* fill 1 OFDM rate's iwl3945_channel_power_info struct */
2135 pwr_info->requested_power = pwr;
2136 pwr_info->power_table_index = (u8) power_idx;
2137 pwr_info->tpc.tx_gain =
2138 power_gain_table[a_band][power_idx].tx_gain;
2139 pwr_info->tpc.dsp_atten =
2140 power_gain_table[a_band][power_idx].dsp_atten;
2141 }
2142
2143 /* set tx power for CCK rates, based on OFDM 12 Mbit settings*/
2144 pwr_info = &ch_info->power_info[IWL_RATE_12M_INDEX_TABLE];
2145 power = pwr_info->requested_power +
2146 IWL_CCK_FROM_OFDM_POWER_DIFF;
2147 pwr_index = pwr_info->power_table_index +
2148 IWL_CCK_FROM_OFDM_INDEX_DIFF;
2149 base_pwr_index = pwr_info->base_power_index +
2150 IWL_CCK_FROM_OFDM_INDEX_DIFF;
2151
2152 /* stay within table range */
2153 pwr_index = iwl3945_hw_reg_fix_power_index(pwr_index);
2154 gain = power_gain_table[a_band][pwr_index].tx_gain;
2155 dsp_atten = power_gain_table[a_band][pwr_index].dsp_atten;
2156
2157 /* fill each CCK rate's iwl3945_channel_power_info structure
2158 * NOTE: All CCK-rate Txpwrs are the same for a given chnl!
2159 * NOTE: CCK rates start at end of OFDM rates! */
2160 for (rate_index = 0;
2161 rate_index < IWL_CCK_RATES; rate_index++) {
2162 pwr_info = &ch_info->power_info[rate_index+IWL_OFDM_RATES];
2163 pwr_info->requested_power = power;
2164 pwr_info->power_table_index = pwr_index;
2165 pwr_info->base_power_index = base_pwr_index;
2166 pwr_info->tpc.tx_gain = gain;
2167 pwr_info->tpc.dsp_atten = dsp_atten;
2168 }
2169
2170 /* set scan tx power, 1Mbit for CCK, 6Mbit for OFDM */
2171 for (scan_tbl_index = 0;
2172 scan_tbl_index < IWL_NUM_SCAN_RATES; scan_tbl_index++) {
2173 s32 actual_index = (scan_tbl_index == 0) ?
2174 IWL_RATE_1M_INDEX_TABLE : IWL_RATE_6M_INDEX_TABLE;
2175 iwl3945_hw_reg_set_scan_power(priv, scan_tbl_index,
2176 actual_index, clip_pwrs, ch_info, a_band);
2177 }
2178 }
2179
2180 return 0;
2181}
2182
2183int iwl3945_hw_rxq_stop(struct iwl_priv *priv)
2184{
2185 int rc;
2186
2187 iwl_legacy_write_direct32(priv, FH39_RCSR_CONFIG(0), 0);
2188 rc = iwl_poll_direct_bit(priv, FH39_RSSR_STATUS,
2189 FH39_RSSR_CHNL0_RX_STATUS_CHNL_IDLE, 1000);
2190 if (rc < 0)
2191 IWL_ERR(priv, "Can't stop Rx DMA.\n");
2192
2193 return 0;
2194}
2195
2196int iwl3945_hw_tx_queue_init(struct iwl_priv *priv, struct iwl_tx_queue *txq)
2197{
2198 int txq_id = txq->q.id;
2199
2200 struct iwl3945_shared *shared_data = priv->_3945.shared_virt;
2201
2202 shared_data->tx_base_ptr[txq_id] = cpu_to_le32((u32)txq->q.dma_addr);
2203
2204 iwl_legacy_write_direct32(priv, FH39_CBCC_CTRL(txq_id), 0);
2205 iwl_legacy_write_direct32(priv, FH39_CBCC_BASE(txq_id), 0);
2206
2207 iwl_legacy_write_direct32(priv, FH39_TCSR_CONFIG(txq_id),
2208 FH39_TCSR_TX_CONFIG_REG_VAL_CIRQ_RTC_NOINT |
2209 FH39_TCSR_TX_CONFIG_REG_VAL_MSG_MODE_TXF |
2210 FH39_TCSR_TX_CONFIG_REG_VAL_CIRQ_HOST_IFTFD |
2211 FH39_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE_VAL |
2212 FH39_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE);
2213
2214 /* fake read to flush all prev. writes */
2215 iwl_read32(priv, FH39_TSSR_CBB_BASE);
2216
2217 return 0;
2218}
2219
2220/*
2221 * HCMD utils
2222 */
2223static u16 iwl3945_get_hcmd_size(u8 cmd_id, u16 len)
2224{
2225 switch (cmd_id) {
2226 case REPLY_RXON:
2227 return sizeof(struct iwl3945_rxon_cmd);
2228 case POWER_TABLE_CMD:
2229 return sizeof(struct iwl3945_powertable_cmd);
2230 default:
2231 return len;
2232 }
2233}
2234
2235
2236static u16 iwl3945_build_addsta_hcmd(const struct iwl_legacy_addsta_cmd *cmd,
2237 u8 *data)
2238{
2239 struct iwl3945_addsta_cmd *addsta = (struct iwl3945_addsta_cmd *)data;
2240 addsta->mode = cmd->mode;
2241 memcpy(&addsta->sta, &cmd->sta, sizeof(struct sta_id_modify));
2242 memcpy(&addsta->key, &cmd->key, sizeof(struct iwl4965_keyinfo));
2243 addsta->station_flags = cmd->station_flags;
2244 addsta->station_flags_msk = cmd->station_flags_msk;
2245 addsta->tid_disable_tx = cpu_to_le16(0);
2246 addsta->rate_n_flags = cmd->rate_n_flags;
2247 addsta->add_immediate_ba_tid = cmd->add_immediate_ba_tid;
2248 addsta->remove_immediate_ba_tid = cmd->remove_immediate_ba_tid;
2249 addsta->add_immediate_ba_ssn = cmd->add_immediate_ba_ssn;
2250
2251 return (u16)sizeof(struct iwl3945_addsta_cmd);
2252}
2253
2254static int iwl3945_add_bssid_station(struct iwl_priv *priv,
2255 const u8 *addr, u8 *sta_id_r)
2256{
2257 struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
2258 int ret;
2259 u8 sta_id;
2260 unsigned long flags;
2261
2262 if (sta_id_r)
2263 *sta_id_r = IWL_INVALID_STATION;
2264
2265 ret = iwl_legacy_add_station_common(priv, ctx, addr, 0, NULL, &sta_id);
2266 if (ret) {
2267 IWL_ERR(priv, "Unable to add station %pM\n", addr);
2268 return ret;
2269 }
2270
2271 if (sta_id_r)
2272 *sta_id_r = sta_id;
2273
2274 spin_lock_irqsave(&priv->sta_lock, flags);
2275 priv->stations[sta_id].used |= IWL_STA_LOCAL;
2276 spin_unlock_irqrestore(&priv->sta_lock, flags);
2277
2278 return 0;
2279}
2280static int iwl3945_manage_ibss_station(struct iwl_priv *priv,
2281 struct ieee80211_vif *vif, bool add)
2282{
2283 struct iwl_vif_priv *vif_priv = (void *)vif->drv_priv;
2284 int ret;
2285
2286 if (add) {
2287 ret = iwl3945_add_bssid_station(priv, vif->bss_conf.bssid,
2288 &vif_priv->ibss_bssid_sta_id);
2289 if (ret)
2290 return ret;
2291
2292 iwl3945_sync_sta(priv, vif_priv->ibss_bssid_sta_id,
2293 (priv->band == IEEE80211_BAND_5GHZ) ?
2294 IWL_RATE_6M_PLCP : IWL_RATE_1M_PLCP);
2295 iwl3945_rate_scale_init(priv->hw, vif_priv->ibss_bssid_sta_id);
2296
2297 return 0;
2298 }
2299
2300 return iwl_legacy_remove_station(priv, vif_priv->ibss_bssid_sta_id,
2301 vif->bss_conf.bssid);
2302}
2303
2304/**
2305 * iwl3945_init_hw_rate_table - Initialize the hardware rate fallback table
2306 */
2307int iwl3945_init_hw_rate_table(struct iwl_priv *priv)
2308{
2309 int rc, i, index, prev_index;
2310 struct iwl3945_rate_scaling_cmd rate_cmd = {
2311 .reserved = {0, 0, 0},
2312 };
2313 struct iwl3945_rate_scaling_info *table = rate_cmd.table;
2314
2315 for (i = 0; i < ARRAY_SIZE(iwl3945_rates); i++) {
2316 index = iwl3945_rates[i].table_rs_index;
2317
2318 table[index].rate_n_flags =
2319 iwl3945_hw_set_rate_n_flags(iwl3945_rates[i].plcp, 0);
2320 table[index].try_cnt = priv->retry_rate;
2321 prev_index = iwl3945_get_prev_ieee_rate(i);
2322 table[index].next_rate_index =
2323 iwl3945_rates[prev_index].table_rs_index;
2324 }
2325
2326 switch (priv->band) {
2327 case IEEE80211_BAND_5GHZ:
2328 IWL_DEBUG_RATE(priv, "Select A mode rate scale\n");
2329 /* If one of the following CCK rates is used,
2330 * have it fall back to the 6M OFDM rate */
2331 for (i = IWL_RATE_1M_INDEX_TABLE;
2332 i <= IWL_RATE_11M_INDEX_TABLE; i++)
2333 table[i].next_rate_index =
2334 iwl3945_rates[IWL_FIRST_OFDM_RATE].table_rs_index;
2335
2336 /* Don't fall back to CCK rates */
2337 table[IWL_RATE_12M_INDEX_TABLE].next_rate_index =
2338 IWL_RATE_9M_INDEX_TABLE;
2339
2340 /* Don't drop out of OFDM rates */
2341 table[IWL_RATE_6M_INDEX_TABLE].next_rate_index =
2342 iwl3945_rates[IWL_FIRST_OFDM_RATE].table_rs_index;
2343 break;
2344
2345 case IEEE80211_BAND_2GHZ:
2346 IWL_DEBUG_RATE(priv, "Select B/G mode rate scale\n");
2347 /* If an OFDM rate is used, have it fall back to the
2348 * 1M CCK rates */
2349
2350 if (!(priv->_3945.sta_supp_rates & IWL_OFDM_RATES_MASK) &&
2351 iwl_legacy_is_associated(priv, IWL_RXON_CTX_BSS)) {
2352
2353 index = IWL_FIRST_CCK_RATE;
2354 for (i = IWL_RATE_6M_INDEX_TABLE;
2355 i <= IWL_RATE_54M_INDEX_TABLE; i++)
2356 table[i].next_rate_index =
2357 iwl3945_rates[index].table_rs_index;
2358
2359 index = IWL_RATE_11M_INDEX_TABLE;
2360 /* CCK shouldn't fall back to OFDM... */
2361 table[index].next_rate_index = IWL_RATE_5M_INDEX_TABLE;
2362 }
2363 break;
2364
2365 default:
2366 WARN_ON(1);
2367 break;
2368 }
2369
2370 /* Update the rate scaling for control frame Tx */
2371 rate_cmd.table_id = 0;
2372 rc = iwl_legacy_send_cmd_pdu(priv, REPLY_RATE_SCALE, sizeof(rate_cmd),
2373 &rate_cmd);
2374 if (rc)
2375 return rc;
2376
2377 /* Update the rate scaling for data frame Tx */
2378 rate_cmd.table_id = 1;
2379 return iwl_legacy_send_cmd_pdu(priv, REPLY_RATE_SCALE, sizeof(rate_cmd),
2380 &rate_cmd);
2381}
2382
2383/* Called when initializing driver */
2384int iwl3945_hw_set_hw_params(struct iwl_priv *priv)
2385{
2386 memset((void *)&priv->hw_params, 0,
2387 sizeof(struct iwl_hw_params));
2388
2389 priv->_3945.shared_virt =
2390 dma_alloc_coherent(&priv->pci_dev->dev,
2391 sizeof(struct iwl3945_shared),
2392 &priv->_3945.shared_phys, GFP_KERNEL);
2393 if (!priv->_3945.shared_virt) {
2394 IWL_ERR(priv, "failed to allocate pci memory\n");
2395 return -ENOMEM;
2396 }
2397
2398 /* Assign number of Usable TX queues */
2399 priv->hw_params.max_txq_num = priv->cfg->base_params->num_of_queues;
2400
2401 priv->hw_params.tfd_size = sizeof(struct iwl3945_tfd);
2402 priv->hw_params.rx_page_order = get_order(IWL_RX_BUF_SIZE_3K);
2403 priv->hw_params.max_rxq_size = RX_QUEUE_SIZE;
2404 priv->hw_params.max_rxq_log = RX_QUEUE_SIZE_LOG;
2405 priv->hw_params.max_stations = IWL3945_STATION_COUNT;
2406 priv->contexts[IWL_RXON_CTX_BSS].bcast_sta_id = IWL3945_BROADCAST_ID;
2407
2408 priv->sta_key_max_num = STA_KEY_MAX_NUM;
2409
2410 priv->hw_params.rx_wrt_ptr_reg = FH39_RSCSR_CHNL0_WPTR;
2411 priv->hw_params.max_beacon_itrvl = IWL39_MAX_UCODE_BEACON_INTERVAL;
2412 priv->hw_params.beacon_time_tsf_bits = IWL3945_EXT_BEACON_TIME_POS;
2413
2414 return 0;
2415}
2416
2417unsigned int iwl3945_hw_get_beacon_cmd(struct iwl_priv *priv,
2418 struct iwl3945_frame *frame, u8 rate)
2419{
2420 struct iwl3945_tx_beacon_cmd *tx_beacon_cmd;
2421 unsigned int frame_size;
2422
2423 tx_beacon_cmd = (struct iwl3945_tx_beacon_cmd *)&frame->u;
2424 memset(tx_beacon_cmd, 0, sizeof(*tx_beacon_cmd));
2425
2426 tx_beacon_cmd->tx.sta_id =
2427 priv->contexts[IWL_RXON_CTX_BSS].bcast_sta_id;
2428 tx_beacon_cmd->tx.stop_time.life_time = TX_CMD_LIFE_TIME_INFINITE;
2429
2430 frame_size = iwl3945_fill_beacon_frame(priv,
2431 tx_beacon_cmd->frame,
2432 sizeof(frame->u) - sizeof(*tx_beacon_cmd));
2433
2434 BUG_ON(frame_size > MAX_MPDU_SIZE);
2435 tx_beacon_cmd->tx.len = cpu_to_le16((u16)frame_size);
2436
2437 tx_beacon_cmd->tx.rate = rate;
2438 tx_beacon_cmd->tx.tx_flags = (TX_CMD_FLG_SEQ_CTL_MSK |
2439 TX_CMD_FLG_TSF_MSK);
2440
2441 /* supp_rates[0] == OFDM start at IWL_FIRST_OFDM_RATE*/
2442 tx_beacon_cmd->tx.supp_rates[0] =
2443 (IWL_OFDM_BASIC_RATES_MASK >> IWL_FIRST_OFDM_RATE) & 0xFF;
2444
2445 tx_beacon_cmd->tx.supp_rates[1] =
2446 (IWL_CCK_BASIC_RATES_MASK & 0xF);
2447
2448 return sizeof(struct iwl3945_tx_beacon_cmd) + frame_size;
2449}
2450
2451void iwl3945_hw_rx_handler_setup(struct iwl_priv *priv)
2452{
2453 priv->rx_handlers[REPLY_TX] = iwl3945_rx_reply_tx;
2454 priv->rx_handlers[REPLY_3945_RX] = iwl3945_rx_reply_rx;
2455}
2456
2457void iwl3945_hw_setup_deferred_work(struct iwl_priv *priv)
2458{
2459 INIT_DELAYED_WORK(&priv->_3945.thermal_periodic,
2460 iwl3945_bg_reg_txpower_periodic);
2461}
2462
2463void iwl3945_hw_cancel_deferred_work(struct iwl_priv *priv)
2464{
2465 cancel_delayed_work(&priv->_3945.thermal_periodic);
2466}
2467
2468/* check contents of special bootstrap uCode SRAM */
2469static int iwl3945_verify_bsm(struct iwl_priv *priv)
2470 {
2471 __le32 *image = priv->ucode_boot.v_addr;
2472 u32 len = priv->ucode_boot.len;
2473 u32 reg;
2474 u32 val;
2475
2476 IWL_DEBUG_INFO(priv, "Begin verify bsm\n");
2477
2478 /* verify BSM SRAM contents */
2479 val = iwl_legacy_read_prph(priv, BSM_WR_DWCOUNT_REG);
2480 for (reg = BSM_SRAM_LOWER_BOUND;
2481 reg < BSM_SRAM_LOWER_BOUND + len;
2482 reg += sizeof(u32), image++) {
2483 val = iwl_legacy_read_prph(priv, reg);
2484 if (val != le32_to_cpu(*image)) {
2485 IWL_ERR(priv, "BSM uCode verification failed at "
2486 "addr 0x%08X+%u (of %u), is 0x%x, s/b 0x%x\n",
2487 BSM_SRAM_LOWER_BOUND,
2488 reg - BSM_SRAM_LOWER_BOUND, len,
2489 val, le32_to_cpu(*image));
2490 return -EIO;
2491 }
2492 }
2493
2494 IWL_DEBUG_INFO(priv, "BSM bootstrap uCode image OK\n");
2495
2496 return 0;
2497}
2498
2499
2500/******************************************************************************
2501 *
2502 * EEPROM related functions
2503 *
2504 ******************************************************************************/
2505
2506/*
2507 * Clear the OWNER_MSK, to establish driver (instead of uCode running on
2508 * embedded controller) as EEPROM reader; each read is a series of pulses
2509 * to/from the EEPROM chip, not a single event, so even reads could conflict
2510 * if they weren't arbitrated by some ownership mechanism. Here, the driver
2511 * simply claims ownership, which should be safe when this function is called
2512 * (i.e. before loading uCode!).
2513 */
2514static int iwl3945_eeprom_acquire_semaphore(struct iwl_priv *priv)
2515{
2516 _iwl_legacy_clear_bit(priv, CSR_EEPROM_GP, CSR_EEPROM_GP_IF_OWNER_MSK);
2517 return 0;
2518}
2519
2520
2521static void iwl3945_eeprom_release_semaphore(struct iwl_priv *priv)
2522{
2523 return;
2524}
2525
2526 /**
2527 * iwl3945_load_bsm - Load bootstrap instructions
2528 *
2529 * BSM operation:
2530 *
2531 * The Bootstrap State Machine (BSM) stores a short bootstrap uCode program
2532 * in special SRAM that does not power down during RFKILL. When powering back
2533 * up after power-saving sleeps (or during initial uCode load), the BSM loads
2534 * the bootstrap program into the on-board processor, and starts it.
2535 *
2536 * The bootstrap program loads (via DMA) instructions and data for a new
2537 * program from host DRAM locations indicated by the host driver in the
2538 * BSM_DRAM_* registers. Once the new program is loaded, it starts
2539 * automatically.
2540 *
2541 * When initializing the NIC, the host driver points the BSM to the
2542 * "initialize" uCode image. This uCode sets up some internal data, then
2543 * notifies host via "initialize alive" that it is complete.
2544 *
2545 * The host then replaces the BSM_DRAM_* pointer values to point to the
2546 * normal runtime uCode instructions and a backup uCode data cache buffer
2547 * (filled initially with starting data values for the on-board processor),
2548 * then triggers the "initialize" uCode to load and launch the runtime uCode,
2549 * which begins normal operation.
2550 *
2551 * When doing a power-save shutdown, runtime uCode saves data SRAM into
2552 * the backup data cache in DRAM before SRAM is powered down.
2553 *
2554 * When powering back up, the BSM loads the bootstrap program. This reloads
2555 * the runtime uCode instructions and the backup data cache into SRAM,
2556 * and re-launches the runtime uCode from where it left off.
2557 */
2558static int iwl3945_load_bsm(struct iwl_priv *priv)
2559{
2560 __le32 *image = priv->ucode_boot.v_addr;
2561 u32 len = priv->ucode_boot.len;
2562 dma_addr_t pinst;
2563 dma_addr_t pdata;
2564 u32 inst_len;
2565 u32 data_len;
2566 int rc;
2567 int i;
2568 u32 done;
2569 u32 reg_offset;
2570
2571 IWL_DEBUG_INFO(priv, "Begin load bsm\n");
2572
2573 /* make sure bootstrap program is no larger than BSM's SRAM size */
2574 if (len > IWL39_MAX_BSM_SIZE)
2575 return -EINVAL;
2576
2577 /* Tell bootstrap uCode where to find the "Initialize" uCode
2578 * in host DRAM ... host DRAM physical address bits 31:0 for 3945.
2579 * NOTE: iwl3945_initialize_alive_start() will replace these values,
2580 * after the "initialize" uCode has run, to point to
2581 * runtime/protocol instructions and backup data cache. */
2582 pinst = priv->ucode_init.p_addr;
2583 pdata = priv->ucode_init_data.p_addr;
2584 inst_len = priv->ucode_init.len;
2585 data_len = priv->ucode_init_data.len;
2586
2587 iwl_legacy_write_prph(priv, BSM_DRAM_INST_PTR_REG, pinst);
2588 iwl_legacy_write_prph(priv, BSM_DRAM_DATA_PTR_REG, pdata);
2589 iwl_legacy_write_prph(priv, BSM_DRAM_INST_BYTECOUNT_REG, inst_len);
2590 iwl_legacy_write_prph(priv, BSM_DRAM_DATA_BYTECOUNT_REG, data_len);
2591
2592 /* Fill BSM memory with bootstrap instructions */
2593 for (reg_offset = BSM_SRAM_LOWER_BOUND;
2594 reg_offset < BSM_SRAM_LOWER_BOUND + len;
2595 reg_offset += sizeof(u32), image++)
2596 _iwl_legacy_write_prph(priv, reg_offset,
2597 le32_to_cpu(*image));
2598
2599 rc = iwl3945_verify_bsm(priv);
2600 if (rc)
2601 return rc;
2602
2603 /* Tell BSM to copy from BSM SRAM into instruction SRAM, when asked */
2604 iwl_legacy_write_prph(priv, BSM_WR_MEM_SRC_REG, 0x0);
2605 iwl_legacy_write_prph(priv, BSM_WR_MEM_DST_REG,
2606 IWL39_RTC_INST_LOWER_BOUND);
2607 iwl_legacy_write_prph(priv, BSM_WR_DWCOUNT_REG, len / sizeof(u32));
2608
2609 /* Load bootstrap code into instruction SRAM now,
2610 * to prepare to load "initialize" uCode */
2611 iwl_legacy_write_prph(priv, BSM_WR_CTRL_REG,
2612 BSM_WR_CTRL_REG_BIT_START);
2613
2614 /* Wait for load of bootstrap uCode to finish */
2615 for (i = 0; i < 100; i++) {
2616 done = iwl_legacy_read_prph(priv, BSM_WR_CTRL_REG);
2617 if (!(done & BSM_WR_CTRL_REG_BIT_START))
2618 break;
2619 udelay(10);
2620 }
2621 if (i < 100)
2622 IWL_DEBUG_INFO(priv, "BSM write complete, poll %d iterations\n", i);
2623 else {
2624 IWL_ERR(priv, "BSM write did not complete!\n");
2625 return -EIO;
2626 }
2627
2628 /* Enable future boot loads whenever power management unit triggers it
2629 * (e.g. when powering back up after power-save shutdown) */
2630 iwl_legacy_write_prph(priv, BSM_WR_CTRL_REG,
2631 BSM_WR_CTRL_REG_BIT_START_EN);
2632
2633 return 0;
2634}
2635
2636static struct iwl_hcmd_ops iwl3945_hcmd = {
2637 .rxon_assoc = iwl3945_send_rxon_assoc,
2638 .commit_rxon = iwl3945_commit_rxon,
2639};
2640
2641static struct iwl_lib_ops iwl3945_lib = {
2642 .txq_attach_buf_to_tfd = iwl3945_hw_txq_attach_buf_to_tfd,
2643 .txq_free_tfd = iwl3945_hw_txq_free_tfd,
2644 .txq_init = iwl3945_hw_tx_queue_init,
2645 .load_ucode = iwl3945_load_bsm,
2646 .dump_nic_error_log = iwl3945_dump_nic_error_log,
2647 .apm_ops = {
2648 .init = iwl3945_apm_init,
2649 .config = iwl3945_nic_config,
2650 },
2651 .eeprom_ops = {
2652 .regulatory_bands = {
2653 EEPROM_REGULATORY_BAND_1_CHANNELS,
2654 EEPROM_REGULATORY_BAND_2_CHANNELS,
2655 EEPROM_REGULATORY_BAND_3_CHANNELS,
2656 EEPROM_REGULATORY_BAND_4_CHANNELS,
2657 EEPROM_REGULATORY_BAND_5_CHANNELS,
2658 EEPROM_REGULATORY_BAND_NO_HT40,
2659 EEPROM_REGULATORY_BAND_NO_HT40,
2660 },
2661 .acquire_semaphore = iwl3945_eeprom_acquire_semaphore,
2662 .release_semaphore = iwl3945_eeprom_release_semaphore,
2663 },
2664 .send_tx_power = iwl3945_send_tx_power,
2665 .is_valid_rtc_data_addr = iwl3945_hw_valid_rtc_data_addr,
2666
2667 .debugfs_ops = {
2668 .rx_stats_read = iwl3945_ucode_rx_stats_read,
2669 .tx_stats_read = iwl3945_ucode_tx_stats_read,
2670 .general_stats_read = iwl3945_ucode_general_stats_read,
2671 },
2672};
2673
2674static const struct iwl_legacy_ops iwl3945_legacy_ops = {
2675 .post_associate = iwl3945_post_associate,
2676 .config_ap = iwl3945_config_ap,
2677 .manage_ibss_station = iwl3945_manage_ibss_station,
2678};
2679
2680static struct iwl_hcmd_utils_ops iwl3945_hcmd_utils = {
2681 .get_hcmd_size = iwl3945_get_hcmd_size,
2682 .build_addsta_hcmd = iwl3945_build_addsta_hcmd,
2683 .request_scan = iwl3945_request_scan,
2684 .post_scan = iwl3945_post_scan,
2685};
2686
2687static const struct iwl_ops iwl3945_ops = {
2688 .lib = &iwl3945_lib,
2689 .hcmd = &iwl3945_hcmd,
2690 .utils = &iwl3945_hcmd_utils,
2691 .led = &iwl3945_led_ops,
2692 .legacy = &iwl3945_legacy_ops,
2693 .ieee80211_ops = &iwl3945_hw_ops,
2694};
2695
2696static struct iwl_base_params iwl3945_base_params = {
2697 .eeprom_size = IWL3945_EEPROM_IMG_SIZE,
2698 .num_of_queues = IWL39_NUM_QUEUES,
2699 .pll_cfg_val = CSR39_ANA_PLL_CFG_VAL,
2700 .set_l0s = false,
2701 .use_bsm = true,
2702 .led_compensation = 64,
2703 .wd_timeout = IWL_DEF_WD_TIMEOUT,
2704};
2705
2706static struct iwl_cfg iwl3945_bg_cfg = {
2707 .name = "3945BG",
2708 .fw_name_pre = IWL3945_FW_PRE,
2709 .ucode_api_max = IWL3945_UCODE_API_MAX,
2710 .ucode_api_min = IWL3945_UCODE_API_MIN,
2711 .sku = IWL_SKU_G,
2712 .eeprom_ver = EEPROM_3945_EEPROM_VERSION,
2713 .ops = &iwl3945_ops,
2714 .mod_params = &iwl3945_mod_params,
2715 .base_params = &iwl3945_base_params,
2716 .led_mode = IWL_LED_BLINK,
2717};
2718
2719static struct iwl_cfg iwl3945_abg_cfg = {
2720 .name = "3945ABG",
2721 .fw_name_pre = IWL3945_FW_PRE,
2722 .ucode_api_max = IWL3945_UCODE_API_MAX,
2723 .ucode_api_min = IWL3945_UCODE_API_MIN,
2724 .sku = IWL_SKU_A|IWL_SKU_G,
2725 .eeprom_ver = EEPROM_3945_EEPROM_VERSION,
2726 .ops = &iwl3945_ops,
2727 .mod_params = &iwl3945_mod_params,
2728 .base_params = &iwl3945_base_params,
2729 .led_mode = IWL_LED_BLINK,
2730};
2731
2732DEFINE_PCI_DEVICE_TABLE(iwl3945_hw_card_ids) = {
2733 {IWL_PCI_DEVICE(0x4222, 0x1005, iwl3945_bg_cfg)},
2734 {IWL_PCI_DEVICE(0x4222, 0x1034, iwl3945_bg_cfg)},
2735 {IWL_PCI_DEVICE(0x4222, 0x1044, iwl3945_bg_cfg)},
2736 {IWL_PCI_DEVICE(0x4227, 0x1014, iwl3945_bg_cfg)},
2737 {IWL_PCI_DEVICE(0x4222, PCI_ANY_ID, iwl3945_abg_cfg)},
2738 {IWL_PCI_DEVICE(0x4227, PCI_ANY_ID, iwl3945_abg_cfg)},
2739 {0}
2740};
2741
2742MODULE_DEVICE_TABLE(pci, iwl3945_hw_card_ids);
diff --git a/drivers/net/wireless/iwlegacy/iwl-3945.h b/drivers/net/wireless/iwlegacy/iwl-3945.h
new file mode 100644
index 00000000000..b118b59b71d
--- /dev/null
+++ b/drivers/net/wireless/iwlegacy/iwl-3945.h
@@ -0,0 +1,308 @@
1/******************************************************************************
2 *
3 * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of version 2 of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
17 *
18 * The full GNU General Public License is included in this distribution in the
19 * file called LICENSE.
20 *
21 * Contact Information:
22 * Intel Linux Wireless <ilw@linux.intel.com>
23 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
24 *
25 *****************************************************************************/
26/*
27 * Please use this file (iwl-3945.h) for driver implementation definitions.
28 * Please use iwl-3945-commands.h for uCode API definitions.
29 * Please use iwl-3945-hw.h for hardware-related definitions.
30 */
31
32#ifndef __iwl_3945_h__
33#define __iwl_3945_h__
34
35#include <linux/pci.h> /* for struct pci_device_id */
36#include <linux/kernel.h>
37#include <net/ieee80211_radiotap.h>
38
39/* Hardware specific file defines the PCI IDs table for that hardware module */
40extern const struct pci_device_id iwl3945_hw_card_ids[];
41
42#include "iwl-csr.h"
43#include "iwl-prph.h"
44#include "iwl-fh.h"
45#include "iwl-3945-hw.h"
46#include "iwl-debug.h"
47#include "iwl-power.h"
48#include "iwl-dev.h"
49#include "iwl-led.h"
50
51/* Highest firmware API version supported */
52#define IWL3945_UCODE_API_MAX 2
53
54/* Lowest firmware API version supported */
55#define IWL3945_UCODE_API_MIN 1
56
57#define IWL3945_FW_PRE "iwlwifi-3945-"
58#define _IWL3945_MODULE_FIRMWARE(api) IWL3945_FW_PRE #api ".ucode"
59#define IWL3945_MODULE_FIRMWARE(api) _IWL3945_MODULE_FIRMWARE(api)
60
61/* Default noise level to report when noise measurement is not available.
62 * This may be because we're:
63 * 1) Not associated (4965, no beacon statistics being sent to driver)
64 * 2) Scanning (noise measurement does not apply to associated channel)
65 * 3) Receiving CCK (3945 delivers noise info only for OFDM frames)
66 * Use default noise value of -127 ... this is below the range of measurable
67 * Rx dBm for either 3945 or 4965, so it can indicate "unmeasurable" to user.
68 * Also, -127 works better than 0 when averaging frames with/without
69 * noise info (e.g. averaging might be done in app); measured dBm values are
70 * always negative ... using a negative value as the default keeps all
71 * averages within an s8's (used in some apps) range of negative values. */
72#define IWL_NOISE_MEAS_NOT_AVAILABLE (-127)
73
74/* Module parameters accessible from iwl-*.c */
75extern struct iwl_mod_params iwl3945_mod_params;
76
77struct iwl3945_rate_scale_data {
78 u64 data;
79 s32 success_counter;
80 s32 success_ratio;
81 s32 counter;
82 s32 average_tpt;
83 unsigned long stamp;
84};
85
86struct iwl3945_rs_sta {
87 spinlock_t lock;
88 struct iwl_priv *priv;
89 s32 *expected_tpt;
90 unsigned long last_partial_flush;
91 unsigned long last_flush;
92 u32 flush_time;
93 u32 last_tx_packets;
94 u32 tx_packets;
95 u8 tgg;
96 u8 flush_pending;
97 u8 start_rate;
98 struct timer_list rate_scale_flush;
99 struct iwl3945_rate_scale_data win[IWL_RATE_COUNT_3945];
100#ifdef CONFIG_MAC80211_DEBUGFS
101 struct dentry *rs_sta_dbgfs_stats_table_file;
102#endif
103
104 /* used to be in sta_info */
105 int last_txrate_idx;
106};
107
108
109/*
110 * The common struct MUST be first because it is shared between
111 * 3945 and 4965!
112 */
113struct iwl3945_sta_priv {
114 struct iwl_station_priv_common common;
115 struct iwl3945_rs_sta rs_sta;
116};
117
118enum iwl3945_antenna {
119 IWL_ANTENNA_DIVERSITY,
120 IWL_ANTENNA_MAIN,
121 IWL_ANTENNA_AUX
122};
123
124/*
125 * RTS threshold here is total size [2347] minus 4 FCS bytes
126 * Per spec:
127 * a value of 0 means RTS on all data/management packets
128 * a value > max MSDU size means no RTS
129 * else RTS for data/management frames where MPDU is larger
130 * than RTS value.
131 */
132#define DEFAULT_RTS_THRESHOLD 2347U
133#define MIN_RTS_THRESHOLD 0U
134#define MAX_RTS_THRESHOLD 2347U
135#define MAX_MSDU_SIZE 2304U
136#define MAX_MPDU_SIZE 2346U
137#define DEFAULT_BEACON_INTERVAL 100U
138#define DEFAULT_SHORT_RETRY_LIMIT 7U
139#define DEFAULT_LONG_RETRY_LIMIT 4U
140
141#define IWL_TX_FIFO_AC0 0
142#define IWL_TX_FIFO_AC1 1
143#define IWL_TX_FIFO_AC2 2
144#define IWL_TX_FIFO_AC3 3
145#define IWL_TX_FIFO_HCCA_1 5
146#define IWL_TX_FIFO_HCCA_2 6
147#define IWL_TX_FIFO_NONE 7
148
149#define IEEE80211_DATA_LEN 2304
150#define IEEE80211_4ADDR_LEN 30
151#define IEEE80211_HLEN (IEEE80211_4ADDR_LEN)
152#define IEEE80211_FRAME_LEN (IEEE80211_DATA_LEN + IEEE80211_HLEN)
153
154struct iwl3945_frame {
155 union {
156 struct ieee80211_hdr frame;
157 struct iwl3945_tx_beacon_cmd beacon;
158 u8 raw[IEEE80211_FRAME_LEN];
159 u8 cmd[360];
160 } u;
161 struct list_head list;
162};
163
164#define SEQ_TO_SN(seq) (((seq) & IEEE80211_SCTL_SEQ) >> 4)
165#define SN_TO_SEQ(ssn) (((ssn) << 4) & IEEE80211_SCTL_SEQ)
166#define MAX_SN ((IEEE80211_SCTL_SEQ) >> 4)
167
168#define SUP_RATE_11A_MAX_NUM_CHANNELS 8
169#define SUP_RATE_11B_MAX_NUM_CHANNELS 4
170#define SUP_RATE_11G_MAX_NUM_CHANNELS 12
171
172#define IWL_SUPPORTED_RATES_IE_LEN 8
173
174#define SCAN_INTERVAL 100
175
176#define MAX_TID_COUNT 9
177
178#define IWL_INVALID_RATE 0xFF
179#define IWL_INVALID_VALUE -1
180
181#define STA_PS_STATUS_WAKE 0
182#define STA_PS_STATUS_SLEEP 1
183
184struct iwl3945_ibss_seq {
185 u8 mac[ETH_ALEN];
186 u16 seq_num;
187 u16 frag_num;
188 unsigned long packet_time;
189 struct list_head list;
190};
191
192#define IWL_RX_HDR(x) ((struct iwl3945_rx_frame_hdr *)(\
193 x->u.rx_frame.stats.payload + \
194 x->u.rx_frame.stats.phy_count))
195#define IWL_RX_END(x) ((struct iwl3945_rx_frame_end *)(\
196 IWL_RX_HDR(x)->payload + \
197 le16_to_cpu(IWL_RX_HDR(x)->len)))
198#define IWL_RX_STATS(x) (&x->u.rx_frame.stats)
199#define IWL_RX_DATA(x) (IWL_RX_HDR(x)->payload)
200
201
202/******************************************************************************
203 *
204 * Functions implemented in iwl3945-base.c which are forward declared here
205 * for use by iwl-*.c
206 *
207 *****************************************************************************/
208extern int iwl3945_calc_db_from_ratio(int sig_ratio);
209extern void iwl3945_rx_replenish(void *data);
210extern void iwl3945_rx_queue_reset(struct iwl_priv *priv, struct iwl_rx_queue *rxq);
211extern unsigned int iwl3945_fill_beacon_frame(struct iwl_priv *priv,
212 struct ieee80211_hdr *hdr, int left);
213extern int iwl3945_dump_nic_event_log(struct iwl_priv *priv, bool full_log,
214 char **buf, bool display);
215extern void iwl3945_dump_nic_error_log(struct iwl_priv *priv);
216
217/******************************************************************************
218 *
219 * Functions implemented in iwl-[34]*.c which are forward declared here
220 * for use by iwl3945-base.c
221 *
222 * NOTE: The implementation of these functions are hardware specific
223 * which is why they are in the hardware specific files (vs. iwl-base.c)
224 *
225 * Naming convention --
226 * iwl3945_ <-- Its part of iwlwifi (should be changed to iwl3945_)
227 * iwl3945_hw_ <-- Hardware specific (implemented in iwl-XXXX.c by all HW)
228 * iwlXXXX_ <-- Hardware specific (implemented in iwl-XXXX.c for XXXX)
229 * iwl3945_bg_ <-- Called from work queue context
230 * iwl3945_mac_ <-- mac80211 callback
231 *
232 ****************************************************************************/
233extern void iwl3945_hw_rx_handler_setup(struct iwl_priv *priv);
234extern void iwl3945_hw_setup_deferred_work(struct iwl_priv *priv);
235extern void iwl3945_hw_cancel_deferred_work(struct iwl_priv *priv);
236extern int iwl3945_hw_rxq_stop(struct iwl_priv *priv);
237extern int iwl3945_hw_set_hw_params(struct iwl_priv *priv);
238extern int iwl3945_hw_nic_init(struct iwl_priv *priv);
239extern int iwl3945_hw_nic_stop_master(struct iwl_priv *priv);
240extern void iwl3945_hw_txq_ctx_free(struct iwl_priv *priv);
241extern void iwl3945_hw_txq_ctx_stop(struct iwl_priv *priv);
242extern int iwl3945_hw_nic_reset(struct iwl_priv *priv);
243extern int iwl3945_hw_txq_attach_buf_to_tfd(struct iwl_priv *priv,
244 struct iwl_tx_queue *txq,
245 dma_addr_t addr, u16 len,
246 u8 reset, u8 pad);
247extern void iwl3945_hw_txq_free_tfd(struct iwl_priv *priv,
248 struct iwl_tx_queue *txq);
249extern int iwl3945_hw_get_temperature(struct iwl_priv *priv);
250extern int iwl3945_hw_tx_queue_init(struct iwl_priv *priv,
251 struct iwl_tx_queue *txq);
252extern unsigned int iwl3945_hw_get_beacon_cmd(struct iwl_priv *priv,
253 struct iwl3945_frame *frame, u8 rate);
254void iwl3945_hw_build_tx_cmd_rate(struct iwl_priv *priv,
255 struct iwl_device_cmd *cmd,
256 struct ieee80211_tx_info *info,
257 struct ieee80211_hdr *hdr,
258 int sta_id, int tx_id);
259extern int iwl3945_hw_reg_send_txpower(struct iwl_priv *priv);
260extern int iwl3945_hw_reg_set_txpower(struct iwl_priv *priv, s8 power);
261extern void iwl3945_hw_rx_statistics(struct iwl_priv *priv,
262 struct iwl_rx_mem_buffer *rxb);
263void iwl3945_reply_statistics(struct iwl_priv *priv,
264 struct iwl_rx_mem_buffer *rxb);
265extern void iwl3945_disable_events(struct iwl_priv *priv);
266extern int iwl4965_get_temperature(const struct iwl_priv *priv);
267extern void iwl3945_post_associate(struct iwl_priv *priv);
268extern void iwl3945_config_ap(struct iwl_priv *priv);
269
270extern int iwl3945_commit_rxon(struct iwl_priv *priv,
271 struct iwl_rxon_context *ctx);
272
273/**
274 * iwl3945_hw_find_station - Find station id for a given BSSID
275 * @bssid: MAC address of station ID to find
276 *
277 * NOTE: This should not be hardware specific but the code has
278 * not yet been merged into a single common layer for managing the
279 * station tables.
280 */
281extern u8 iwl3945_hw_find_station(struct iwl_priv *priv, const u8 *bssid);
282
283extern struct ieee80211_ops iwl3945_hw_ops;
284
285/*
286 * Forward declare iwl-3945.c functions for iwl3945-base.c
287 */
288extern __le32 iwl3945_get_antenna_flags(const struct iwl_priv *priv);
289extern int iwl3945_init_hw_rate_table(struct iwl_priv *priv);
290extern void iwl3945_reg_txpower_periodic(struct iwl_priv *priv);
291extern int iwl3945_txpower_set_from_eeprom(struct iwl_priv *priv);
292
293extern const struct iwl_channel_info *iwl3945_get_channel_info(
294 const struct iwl_priv *priv, enum ieee80211_band band, u16 channel);
295
296extern int iwl3945_rs_next_rate(struct iwl_priv *priv, int rate);
297
298/* scanning */
299int iwl3945_request_scan(struct iwl_priv *priv, struct ieee80211_vif *vif);
300void iwl3945_post_scan(struct iwl_priv *priv);
301
302/* rates */
303extern const struct iwl3945_rate_info iwl3945_rates[IWL_RATE_COUNT_3945];
304
305/* Requires full declaration of iwl_priv before including */
306#include "iwl-io.h"
307
308#endif
diff --git a/drivers/net/wireless/iwlegacy/iwl-4965-calib.c b/drivers/net/wireless/iwlegacy/iwl-4965-calib.c
new file mode 100644
index 00000000000..162d877e686
--- /dev/null
+++ b/drivers/net/wireless/iwlegacy/iwl-4965-calib.c
@@ -0,0 +1,967 @@
1/******************************************************************************
2 *
3 * This file is provided under a dual BSD/GPLv2 license. When using or
4 * redistributing this file, you may do so under either license.
5 *
6 * GPL LICENSE SUMMARY
7 *
8 * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of version 2 of the GNU General Public License as
12 * published by the Free Software Foundation.
13 *
14 * This program is distributed in the hope that it will be useful, but
15 * WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
22 * USA
23 *
24 * The full GNU General Public License is included in this distribution
25 * in the file called LICENSE.GPL.
26 *
27 * Contact Information:
28 * Intel Linux Wireless <ilw@linux.intel.com>
29 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
30 *
31 * BSD LICENSE
32 *
33 * Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved.
34 * All rights reserved.
35 *
36 * Redistribution and use in source and binary forms, with or without
37 * modification, are permitted provided that the following conditions
38 * are met:
39 *
40 * * Redistributions of source code must retain the above copyright
41 * notice, this list of conditions and the following disclaimer.
42 * * Redistributions in binary form must reproduce the above copyright
43 * notice, this list of conditions and the following disclaimer in
44 * the documentation and/or other materials provided with the
45 * distribution.
46 * * Neither the name Intel Corporation nor the names of its
47 * contributors may be used to endorse or promote products derived
48 * from this software without specific prior written permission.
49 *
50 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
51 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
52 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
53 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
54 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
55 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
56 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
57 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
58 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
59 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
60 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
61 *****************************************************************************/
62
63#include <linux/slab.h>
64#include <net/mac80211.h>
65
66#include "iwl-dev.h"
67#include "iwl-core.h"
68#include "iwl-4965-calib.h"
69
70/*****************************************************************************
71 * INIT calibrations framework
72 *****************************************************************************/
73
74struct statistics_general_data {
75 u32 beacon_silence_rssi_a;
76 u32 beacon_silence_rssi_b;
77 u32 beacon_silence_rssi_c;
78 u32 beacon_energy_a;
79 u32 beacon_energy_b;
80 u32 beacon_energy_c;
81};
82
83void iwl4965_calib_free_results(struct iwl_priv *priv)
84{
85 int i;
86
87 for (i = 0; i < IWL_CALIB_MAX; i++) {
88 kfree(priv->calib_results[i].buf);
89 priv->calib_results[i].buf = NULL;
90 priv->calib_results[i].buf_len = 0;
91 }
92}
93
94/*****************************************************************************
95 * RUNTIME calibrations framework
96 *****************************************************************************/
97
98/* "false alarms" are signals that our DSP tries to lock onto,
99 * but then determines that they are either noise, or transmissions
100 * from a distant wireless network (also "noise", really) that get
101 * "stepped on" by stronger transmissions within our own network.
102 * This algorithm attempts to set a sensitivity level that is high
103 * enough to receive all of our own network traffic, but not so
104 * high that our DSP gets too busy trying to lock onto non-network
105 * activity/noise. */
106static int iwl4965_sens_energy_cck(struct iwl_priv *priv,
107 u32 norm_fa,
108 u32 rx_enable_time,
109 struct statistics_general_data *rx_info)
110{
111 u32 max_nrg_cck = 0;
112 int i = 0;
113 u8 max_silence_rssi = 0;
114 u32 silence_ref = 0;
115 u8 silence_rssi_a = 0;
116 u8 silence_rssi_b = 0;
117 u8 silence_rssi_c = 0;
118 u32 val;
119
120 /* "false_alarms" values below are cross-multiplications to assess the
121 * numbers of false alarms within the measured period of actual Rx
122 * (Rx is off when we're txing), vs the min/max expected false alarms
123 * (some should be expected if rx is sensitive enough) in a
124 * hypothetical listening period of 200 time units (TU), 204.8 msec:
125 *
126 * MIN_FA/fixed-time < false_alarms/actual-rx-time < MAX_FA/beacon-time
127 *
128 * */
129 u32 false_alarms = norm_fa * 200 * 1024;
130 u32 max_false_alarms = MAX_FA_CCK * rx_enable_time;
131 u32 min_false_alarms = MIN_FA_CCK * rx_enable_time;
132 struct iwl_sensitivity_data *data = NULL;
133 const struct iwl_sensitivity_ranges *ranges = priv->hw_params.sens;
134
135 data = &(priv->sensitivity_data);
136
137 data->nrg_auto_corr_silence_diff = 0;
138
139 /* Find max silence rssi among all 3 receivers.
140 * This is background noise, which may include transmissions from other
141 * networks, measured during silence before our network's beacon */
142 silence_rssi_a = (u8)((rx_info->beacon_silence_rssi_a &
143 ALL_BAND_FILTER) >> 8);
144 silence_rssi_b = (u8)((rx_info->beacon_silence_rssi_b &
145 ALL_BAND_FILTER) >> 8);
146 silence_rssi_c = (u8)((rx_info->beacon_silence_rssi_c &
147 ALL_BAND_FILTER) >> 8);
148
149 val = max(silence_rssi_b, silence_rssi_c);
150 max_silence_rssi = max(silence_rssi_a, (u8) val);
151
152 /* Store silence rssi in 20-beacon history table */
153 data->nrg_silence_rssi[data->nrg_silence_idx] = max_silence_rssi;
154 data->nrg_silence_idx++;
155 if (data->nrg_silence_idx >= NRG_NUM_PREV_STAT_L)
156 data->nrg_silence_idx = 0;
157
158 /* Find max silence rssi across 20 beacon history */
159 for (i = 0; i < NRG_NUM_PREV_STAT_L; i++) {
160 val = data->nrg_silence_rssi[i];
161 silence_ref = max(silence_ref, val);
162 }
163 IWL_DEBUG_CALIB(priv, "silence a %u, b %u, c %u, 20-bcn max %u\n",
164 silence_rssi_a, silence_rssi_b, silence_rssi_c,
165 silence_ref);
166
167 /* Find max rx energy (min value!) among all 3 receivers,
168 * measured during beacon frame.
169 * Save it in 10-beacon history table. */
170 i = data->nrg_energy_idx;
171 val = min(rx_info->beacon_energy_b, rx_info->beacon_energy_c);
172 data->nrg_value[i] = min(rx_info->beacon_energy_a, val);
173
174 data->nrg_energy_idx++;
175 if (data->nrg_energy_idx >= 10)
176 data->nrg_energy_idx = 0;
177
178 /* Find min rx energy (max value) across 10 beacon history.
179 * This is the minimum signal level that we want to receive well.
180 * Add backoff (margin so we don't miss slightly lower energy frames).
181 * This establishes an upper bound (min value) for energy threshold. */
182 max_nrg_cck = data->nrg_value[0];
183 for (i = 1; i < 10; i++)
184 max_nrg_cck = (u32) max(max_nrg_cck, (data->nrg_value[i]));
185 max_nrg_cck += 6;
186
187 IWL_DEBUG_CALIB(priv, "rx energy a %u, b %u, c %u, 10-bcn max/min %u\n",
188 rx_info->beacon_energy_a, rx_info->beacon_energy_b,
189 rx_info->beacon_energy_c, max_nrg_cck - 6);
190
191 /* Count number of consecutive beacons with fewer-than-desired
192 * false alarms. */
193 if (false_alarms < min_false_alarms)
194 data->num_in_cck_no_fa++;
195 else
196 data->num_in_cck_no_fa = 0;
197 IWL_DEBUG_CALIB(priv, "consecutive bcns with few false alarms = %u\n",
198 data->num_in_cck_no_fa);
199
200 /* If we got too many false alarms this time, reduce sensitivity */
201 if ((false_alarms > max_false_alarms) &&
202 (data->auto_corr_cck > AUTO_CORR_MAX_TH_CCK)) {
203 IWL_DEBUG_CALIB(priv, "norm FA %u > max FA %u\n",
204 false_alarms, max_false_alarms);
205 IWL_DEBUG_CALIB(priv, "... reducing sensitivity\n");
206 data->nrg_curr_state = IWL_FA_TOO_MANY;
207 /* Store for "fewer than desired" on later beacon */
208 data->nrg_silence_ref = silence_ref;
209
210 /* increase energy threshold (reduce nrg value)
211 * to decrease sensitivity */
212 data->nrg_th_cck = data->nrg_th_cck - NRG_STEP_CCK;
213 /* Else if we got fewer than desired, increase sensitivity */
214 } else if (false_alarms < min_false_alarms) {
215 data->nrg_curr_state = IWL_FA_TOO_FEW;
216
217 /* Compare silence level with silence level for most recent
218 * healthy number or too many false alarms */
219 data->nrg_auto_corr_silence_diff = (s32)data->nrg_silence_ref -
220 (s32)silence_ref;
221
222 IWL_DEBUG_CALIB(priv,
223 "norm FA %u < min FA %u, silence diff %d\n",
224 false_alarms, min_false_alarms,
225 data->nrg_auto_corr_silence_diff);
226
227 /* Increase value to increase sensitivity, but only if:
228 * 1a) previous beacon did *not* have *too many* false alarms
229 * 1b) AND there's a significant difference in Rx levels
230 * from a previous beacon with too many, or healthy # FAs
231 * OR 2) We've seen a lot of beacons (100) with too few
232 * false alarms */
233 if ((data->nrg_prev_state != IWL_FA_TOO_MANY) &&
234 ((data->nrg_auto_corr_silence_diff > NRG_DIFF) ||
235 (data->num_in_cck_no_fa > MAX_NUMBER_CCK_NO_FA))) {
236
237 IWL_DEBUG_CALIB(priv, "... increasing sensitivity\n");
238 /* Increase nrg value to increase sensitivity */
239 val = data->nrg_th_cck + NRG_STEP_CCK;
240 data->nrg_th_cck = min((u32)ranges->min_nrg_cck, val);
241 } else {
242 IWL_DEBUG_CALIB(priv,
243 "... but not changing sensitivity\n");
244 }
245
246 /* Else we got a healthy number of false alarms, keep status quo */
247 } else {
248 IWL_DEBUG_CALIB(priv, " FA in safe zone\n");
249 data->nrg_curr_state = IWL_FA_GOOD_RANGE;
250
251 /* Store for use in "fewer than desired" with later beacon */
252 data->nrg_silence_ref = silence_ref;
253
254 /* If previous beacon had too many false alarms,
255 * give it some extra margin by reducing sensitivity again
256 * (but don't go below measured energy of desired Rx) */
257 if (IWL_FA_TOO_MANY == data->nrg_prev_state) {
258 IWL_DEBUG_CALIB(priv, "... increasing margin\n");
259 if (data->nrg_th_cck > (max_nrg_cck + NRG_MARGIN))
260 data->nrg_th_cck -= NRG_MARGIN;
261 else
262 data->nrg_th_cck = max_nrg_cck;
263 }
264 }
265
266 /* Make sure the energy threshold does not go above the measured
267 * energy of the desired Rx signals (reduced by backoff margin),
268 * or else we might start missing Rx frames.
269 * Lower value is higher energy, so we use max()!
270 */
271 data->nrg_th_cck = max(max_nrg_cck, data->nrg_th_cck);
272 IWL_DEBUG_CALIB(priv, "new nrg_th_cck %u\n", data->nrg_th_cck);
273
274 data->nrg_prev_state = data->nrg_curr_state;
275
276 /* Auto-correlation CCK algorithm */
277 if (false_alarms > min_false_alarms) {
278
279 /* increase auto_corr values to decrease sensitivity
280 * so the DSP won't be disturbed by the noise
281 */
282 if (data->auto_corr_cck < AUTO_CORR_MAX_TH_CCK)
283 data->auto_corr_cck = AUTO_CORR_MAX_TH_CCK + 1;
284 else {
285 val = data->auto_corr_cck + AUTO_CORR_STEP_CCK;
286 data->auto_corr_cck =
287 min((u32)ranges->auto_corr_max_cck, val);
288 }
289 val = data->auto_corr_cck_mrc + AUTO_CORR_STEP_CCK;
290 data->auto_corr_cck_mrc =
291 min((u32)ranges->auto_corr_max_cck_mrc, val);
292 } else if ((false_alarms < min_false_alarms) &&
293 ((data->nrg_auto_corr_silence_diff > NRG_DIFF) ||
294 (data->num_in_cck_no_fa > MAX_NUMBER_CCK_NO_FA))) {
295
296 /* Decrease auto_corr values to increase sensitivity */
297 val = data->auto_corr_cck - AUTO_CORR_STEP_CCK;
298 data->auto_corr_cck =
299 max((u32)ranges->auto_corr_min_cck, val);
300 val = data->auto_corr_cck_mrc - AUTO_CORR_STEP_CCK;
301 data->auto_corr_cck_mrc =
302 max((u32)ranges->auto_corr_min_cck_mrc, val);
303 }
304
305 return 0;
306}
307
308
309static int iwl4965_sens_auto_corr_ofdm(struct iwl_priv *priv,
310 u32 norm_fa,
311 u32 rx_enable_time)
312{
313 u32 val;
314 u32 false_alarms = norm_fa * 200 * 1024;
315 u32 max_false_alarms = MAX_FA_OFDM * rx_enable_time;
316 u32 min_false_alarms = MIN_FA_OFDM * rx_enable_time;
317 struct iwl_sensitivity_data *data = NULL;
318 const struct iwl_sensitivity_ranges *ranges = priv->hw_params.sens;
319
320 data = &(priv->sensitivity_data);
321
322 /* If we got too many false alarms this time, reduce sensitivity */
323 if (false_alarms > max_false_alarms) {
324
325 IWL_DEBUG_CALIB(priv, "norm FA %u > max FA %u)\n",
326 false_alarms, max_false_alarms);
327
328 val = data->auto_corr_ofdm + AUTO_CORR_STEP_OFDM;
329 data->auto_corr_ofdm =
330 min((u32)ranges->auto_corr_max_ofdm, val);
331
332 val = data->auto_corr_ofdm_mrc + AUTO_CORR_STEP_OFDM;
333 data->auto_corr_ofdm_mrc =
334 min((u32)ranges->auto_corr_max_ofdm_mrc, val);
335
336 val = data->auto_corr_ofdm_x1 + AUTO_CORR_STEP_OFDM;
337 data->auto_corr_ofdm_x1 =
338 min((u32)ranges->auto_corr_max_ofdm_x1, val);
339
340 val = data->auto_corr_ofdm_mrc_x1 + AUTO_CORR_STEP_OFDM;
341 data->auto_corr_ofdm_mrc_x1 =
342 min((u32)ranges->auto_corr_max_ofdm_mrc_x1, val);
343 }
344
345 /* Else if we got fewer than desired, increase sensitivity */
346 else if (false_alarms < min_false_alarms) {
347
348 IWL_DEBUG_CALIB(priv, "norm FA %u < min FA %u\n",
349 false_alarms, min_false_alarms);
350
351 val = data->auto_corr_ofdm - AUTO_CORR_STEP_OFDM;
352 data->auto_corr_ofdm =
353 max((u32)ranges->auto_corr_min_ofdm, val);
354
355 val = data->auto_corr_ofdm_mrc - AUTO_CORR_STEP_OFDM;
356 data->auto_corr_ofdm_mrc =
357 max((u32)ranges->auto_corr_min_ofdm_mrc, val);
358
359 val = data->auto_corr_ofdm_x1 - AUTO_CORR_STEP_OFDM;
360 data->auto_corr_ofdm_x1 =
361 max((u32)ranges->auto_corr_min_ofdm_x1, val);
362
363 val = data->auto_corr_ofdm_mrc_x1 - AUTO_CORR_STEP_OFDM;
364 data->auto_corr_ofdm_mrc_x1 =
365 max((u32)ranges->auto_corr_min_ofdm_mrc_x1, val);
366 } else {
367 IWL_DEBUG_CALIB(priv, "min FA %u < norm FA %u < max FA %u OK\n",
368 min_false_alarms, false_alarms, max_false_alarms);
369 }
370 return 0;
371}
372
373static void iwl4965_prepare_legacy_sensitivity_tbl(struct iwl_priv *priv,
374 struct iwl_sensitivity_data *data,
375 __le16 *tbl)
376{
377 tbl[HD_AUTO_CORR32_X4_TH_ADD_MIN_INDEX] =
378 cpu_to_le16((u16)data->auto_corr_ofdm);
379 tbl[HD_AUTO_CORR32_X4_TH_ADD_MIN_MRC_INDEX] =
380 cpu_to_le16((u16)data->auto_corr_ofdm_mrc);
381 tbl[HD_AUTO_CORR32_X1_TH_ADD_MIN_INDEX] =
382 cpu_to_le16((u16)data->auto_corr_ofdm_x1);
383 tbl[HD_AUTO_CORR32_X1_TH_ADD_MIN_MRC_INDEX] =
384 cpu_to_le16((u16)data->auto_corr_ofdm_mrc_x1);
385
386 tbl[HD_AUTO_CORR40_X4_TH_ADD_MIN_INDEX] =
387 cpu_to_le16((u16)data->auto_corr_cck);
388 tbl[HD_AUTO_CORR40_X4_TH_ADD_MIN_MRC_INDEX] =
389 cpu_to_le16((u16)data->auto_corr_cck_mrc);
390
391 tbl[HD_MIN_ENERGY_CCK_DET_INDEX] =
392 cpu_to_le16((u16)data->nrg_th_cck);
393 tbl[HD_MIN_ENERGY_OFDM_DET_INDEX] =
394 cpu_to_le16((u16)data->nrg_th_ofdm);
395
396 tbl[HD_BARKER_CORR_TH_ADD_MIN_INDEX] =
397 cpu_to_le16(data->barker_corr_th_min);
398 tbl[HD_BARKER_CORR_TH_ADD_MIN_MRC_INDEX] =
399 cpu_to_le16(data->barker_corr_th_min_mrc);
400 tbl[HD_OFDM_ENERGY_TH_IN_INDEX] =
401 cpu_to_le16(data->nrg_th_cca);
402
403 IWL_DEBUG_CALIB(priv, "ofdm: ac %u mrc %u x1 %u mrc_x1 %u thresh %u\n",
404 data->auto_corr_ofdm, data->auto_corr_ofdm_mrc,
405 data->auto_corr_ofdm_x1, data->auto_corr_ofdm_mrc_x1,
406 data->nrg_th_ofdm);
407
408 IWL_DEBUG_CALIB(priv, "cck: ac %u mrc %u thresh %u\n",
409 data->auto_corr_cck, data->auto_corr_cck_mrc,
410 data->nrg_th_cck);
411}
412
413/* Prepare a SENSITIVITY_CMD, send to uCode if values have changed */
414static int iwl4965_sensitivity_write(struct iwl_priv *priv)
415{
416 struct iwl_sensitivity_cmd cmd;
417 struct iwl_sensitivity_data *data = NULL;
418 struct iwl_host_cmd cmd_out = {
419 .id = SENSITIVITY_CMD,
420 .len = sizeof(struct iwl_sensitivity_cmd),
421 .flags = CMD_ASYNC,
422 .data = &cmd,
423 };
424
425 data = &(priv->sensitivity_data);
426
427 memset(&cmd, 0, sizeof(cmd));
428
429 iwl4965_prepare_legacy_sensitivity_tbl(priv, data, &cmd.table[0]);
430
431 /* Update uCode's "work" table, and copy it to DSP */
432 cmd.control = SENSITIVITY_CMD_CONTROL_WORK_TABLE;
433
434 /* Don't send command to uCode if nothing has changed */
435 if (!memcmp(&cmd.table[0], &(priv->sensitivity_tbl[0]),
436 sizeof(u16)*HD_TABLE_SIZE)) {
437 IWL_DEBUG_CALIB(priv, "No change in SENSITIVITY_CMD\n");
438 return 0;
439 }
440
441 /* Copy table for comparison next time */
442 memcpy(&(priv->sensitivity_tbl[0]), &(cmd.table[0]),
443 sizeof(u16)*HD_TABLE_SIZE);
444
445 return iwl_legacy_send_cmd(priv, &cmd_out);
446}
447
448void iwl4965_init_sensitivity(struct iwl_priv *priv)
449{
450 int ret = 0;
451 int i;
452 struct iwl_sensitivity_data *data = NULL;
453 const struct iwl_sensitivity_ranges *ranges = priv->hw_params.sens;
454
455 if (priv->disable_sens_cal)
456 return;
457
458 IWL_DEBUG_CALIB(priv, "Start iwl4965_init_sensitivity\n");
459
460 /* Clear driver's sensitivity algo data */
461 data = &(priv->sensitivity_data);
462
463 if (ranges == NULL)
464 return;
465
466 memset(data, 0, sizeof(struct iwl_sensitivity_data));
467
468 data->num_in_cck_no_fa = 0;
469 data->nrg_curr_state = IWL_FA_TOO_MANY;
470 data->nrg_prev_state = IWL_FA_TOO_MANY;
471 data->nrg_silence_ref = 0;
472 data->nrg_silence_idx = 0;
473 data->nrg_energy_idx = 0;
474
475 for (i = 0; i < 10; i++)
476 data->nrg_value[i] = 0;
477
478 for (i = 0; i < NRG_NUM_PREV_STAT_L; i++)
479 data->nrg_silence_rssi[i] = 0;
480
481 data->auto_corr_ofdm = ranges->auto_corr_min_ofdm;
482 data->auto_corr_ofdm_mrc = ranges->auto_corr_min_ofdm_mrc;
483 data->auto_corr_ofdm_x1 = ranges->auto_corr_min_ofdm_x1;
484 data->auto_corr_ofdm_mrc_x1 = ranges->auto_corr_min_ofdm_mrc_x1;
485 data->auto_corr_cck = AUTO_CORR_CCK_MIN_VAL_DEF;
486 data->auto_corr_cck_mrc = ranges->auto_corr_min_cck_mrc;
487 data->nrg_th_cck = ranges->nrg_th_cck;
488 data->nrg_th_ofdm = ranges->nrg_th_ofdm;
489 data->barker_corr_th_min = ranges->barker_corr_th_min;
490 data->barker_corr_th_min_mrc = ranges->barker_corr_th_min_mrc;
491 data->nrg_th_cca = ranges->nrg_th_cca;
492
493 data->last_bad_plcp_cnt_ofdm = 0;
494 data->last_fa_cnt_ofdm = 0;
495 data->last_bad_plcp_cnt_cck = 0;
496 data->last_fa_cnt_cck = 0;
497
498 ret |= iwl4965_sensitivity_write(priv);
499 IWL_DEBUG_CALIB(priv, "<<return 0x%X\n", ret);
500}
501
502void iwl4965_sensitivity_calibration(struct iwl_priv *priv, void *resp)
503{
504 u32 rx_enable_time;
505 u32 fa_cck;
506 u32 fa_ofdm;
507 u32 bad_plcp_cck;
508 u32 bad_plcp_ofdm;
509 u32 norm_fa_ofdm;
510 u32 norm_fa_cck;
511 struct iwl_sensitivity_data *data = NULL;
512 struct statistics_rx_non_phy *rx_info;
513 struct statistics_rx_phy *ofdm, *cck;
514 unsigned long flags;
515 struct statistics_general_data statis;
516
517 if (priv->disable_sens_cal)
518 return;
519
520 data = &(priv->sensitivity_data);
521
522 if (!iwl_legacy_is_any_associated(priv)) {
523 IWL_DEBUG_CALIB(priv, "<< - not associated\n");
524 return;
525 }
526
527 spin_lock_irqsave(&priv->lock, flags);
528
529 rx_info = &(((struct iwl_notif_statistics *)resp)->rx.general);
530 ofdm = &(((struct iwl_notif_statistics *)resp)->rx.ofdm);
531 cck = &(((struct iwl_notif_statistics *)resp)->rx.cck);
532
533 if (rx_info->interference_data_flag != INTERFERENCE_DATA_AVAILABLE) {
534 IWL_DEBUG_CALIB(priv, "<< invalid data.\n");
535 spin_unlock_irqrestore(&priv->lock, flags);
536 return;
537 }
538
539 /* Extract Statistics: */
540 rx_enable_time = le32_to_cpu(rx_info->channel_load);
541 fa_cck = le32_to_cpu(cck->false_alarm_cnt);
542 fa_ofdm = le32_to_cpu(ofdm->false_alarm_cnt);
543 bad_plcp_cck = le32_to_cpu(cck->plcp_err);
544 bad_plcp_ofdm = le32_to_cpu(ofdm->plcp_err);
545
546 statis.beacon_silence_rssi_a =
547 le32_to_cpu(rx_info->beacon_silence_rssi_a);
548 statis.beacon_silence_rssi_b =
549 le32_to_cpu(rx_info->beacon_silence_rssi_b);
550 statis.beacon_silence_rssi_c =
551 le32_to_cpu(rx_info->beacon_silence_rssi_c);
552 statis.beacon_energy_a =
553 le32_to_cpu(rx_info->beacon_energy_a);
554 statis.beacon_energy_b =
555 le32_to_cpu(rx_info->beacon_energy_b);
556 statis.beacon_energy_c =
557 le32_to_cpu(rx_info->beacon_energy_c);
558
559 spin_unlock_irqrestore(&priv->lock, flags);
560
561 IWL_DEBUG_CALIB(priv, "rx_enable_time = %u usecs\n", rx_enable_time);
562
563 if (!rx_enable_time) {
564 IWL_DEBUG_CALIB(priv, "<< RX Enable Time == 0!\n");
565 return;
566 }
567
568 /* These statistics increase monotonically, and do not reset
569 * at each beacon. Calculate difference from last value, or just
570 * use the new statistics value if it has reset or wrapped around. */
571 if (data->last_bad_plcp_cnt_cck > bad_plcp_cck)
572 data->last_bad_plcp_cnt_cck = bad_plcp_cck;
573 else {
574 bad_plcp_cck -= data->last_bad_plcp_cnt_cck;
575 data->last_bad_plcp_cnt_cck += bad_plcp_cck;
576 }
577
578 if (data->last_bad_plcp_cnt_ofdm > bad_plcp_ofdm)
579 data->last_bad_plcp_cnt_ofdm = bad_plcp_ofdm;
580 else {
581 bad_plcp_ofdm -= data->last_bad_plcp_cnt_ofdm;
582 data->last_bad_plcp_cnt_ofdm += bad_plcp_ofdm;
583 }
584
585 if (data->last_fa_cnt_ofdm > fa_ofdm)
586 data->last_fa_cnt_ofdm = fa_ofdm;
587 else {
588 fa_ofdm -= data->last_fa_cnt_ofdm;
589 data->last_fa_cnt_ofdm += fa_ofdm;
590 }
591
592 if (data->last_fa_cnt_cck > fa_cck)
593 data->last_fa_cnt_cck = fa_cck;
594 else {
595 fa_cck -= data->last_fa_cnt_cck;
596 data->last_fa_cnt_cck += fa_cck;
597 }
598
599 /* Total aborted signal locks */
600 norm_fa_ofdm = fa_ofdm + bad_plcp_ofdm;
601 norm_fa_cck = fa_cck + bad_plcp_cck;
602
603 IWL_DEBUG_CALIB(priv,
604 "cck: fa %u badp %u ofdm: fa %u badp %u\n", fa_cck,
605 bad_plcp_cck, fa_ofdm, bad_plcp_ofdm);
606
607 iwl4965_sens_auto_corr_ofdm(priv, norm_fa_ofdm, rx_enable_time);
608 iwl4965_sens_energy_cck(priv, norm_fa_cck, rx_enable_time, &statis);
609
610 iwl4965_sensitivity_write(priv);
611}
612
613static inline u8 iwl4965_find_first_chain(u8 mask)
614{
615 if (mask & ANT_A)
616 return CHAIN_A;
617 if (mask & ANT_B)
618 return CHAIN_B;
619 return CHAIN_C;
620}
621
622/**
623 * Run disconnected antenna algorithm to find out which antennas are
624 * disconnected.
625 */
626static void
627iwl4965_find_disconn_antenna(struct iwl_priv *priv, u32* average_sig,
628 struct iwl_chain_noise_data *data)
629{
630 u32 active_chains = 0;
631 u32 max_average_sig;
632 u16 max_average_sig_antenna_i;
633 u8 num_tx_chains;
634 u8 first_chain;
635 u16 i = 0;
636
637 average_sig[0] = data->chain_signal_a /
638 priv->cfg->base_params->chain_noise_num_beacons;
639 average_sig[1] = data->chain_signal_b /
640 priv->cfg->base_params->chain_noise_num_beacons;
641 average_sig[2] = data->chain_signal_c /
642 priv->cfg->base_params->chain_noise_num_beacons;
643
644 if (average_sig[0] >= average_sig[1]) {
645 max_average_sig = average_sig[0];
646 max_average_sig_antenna_i = 0;
647 active_chains = (1 << max_average_sig_antenna_i);
648 } else {
649 max_average_sig = average_sig[1];
650 max_average_sig_antenna_i = 1;
651 active_chains = (1 << max_average_sig_antenna_i);
652 }
653
654 if (average_sig[2] >= max_average_sig) {
655 max_average_sig = average_sig[2];
656 max_average_sig_antenna_i = 2;
657 active_chains = (1 << max_average_sig_antenna_i);
658 }
659
660 IWL_DEBUG_CALIB(priv, "average_sig: a %d b %d c %d\n",
661 average_sig[0], average_sig[1], average_sig[2]);
662 IWL_DEBUG_CALIB(priv, "max_average_sig = %d, antenna %d\n",
663 max_average_sig, max_average_sig_antenna_i);
664
665 /* Compare signal strengths for all 3 receivers. */
666 for (i = 0; i < NUM_RX_CHAINS; i++) {
667 if (i != max_average_sig_antenna_i) {
668 s32 rssi_delta = (max_average_sig - average_sig[i]);
669
670 /* If signal is very weak, compared with
671 * strongest, mark it as disconnected. */
672 if (rssi_delta > MAXIMUM_ALLOWED_PATHLOSS)
673 data->disconn_array[i] = 1;
674 else
675 active_chains |= (1 << i);
676 IWL_DEBUG_CALIB(priv, "i = %d rssiDelta = %d "
677 "disconn_array[i] = %d\n",
678 i, rssi_delta, data->disconn_array[i]);
679 }
680 }
681
682 /*
683 * The above algorithm sometimes fails when the ucode
684 * reports 0 for all chains. It's not clear why that
685 * happens to start with, but it is then causing trouble
686 * because this can make us enable more chains than the
687 * hardware really has.
688 *
689 * To be safe, simply mask out any chains that we know
690 * are not on the device.
691 */
692 active_chains &= priv->hw_params.valid_rx_ant;
693
694 num_tx_chains = 0;
695 for (i = 0; i < NUM_RX_CHAINS; i++) {
696 /* loops on all the bits of
697 * priv->hw_setting.valid_tx_ant */
698 u8 ant_msk = (1 << i);
699 if (!(priv->hw_params.valid_tx_ant & ant_msk))
700 continue;
701
702 num_tx_chains++;
703 if (data->disconn_array[i] == 0)
704 /* there is a Tx antenna connected */
705 break;
706 if (num_tx_chains == priv->hw_params.tx_chains_num &&
707 data->disconn_array[i]) {
708 /*
709 * If all chains are disconnected
710 * connect the first valid tx chain
711 */
712 first_chain =
713 iwl4965_find_first_chain(priv->cfg->valid_tx_ant);
714 data->disconn_array[first_chain] = 0;
715 active_chains |= BIT(first_chain);
716 IWL_DEBUG_CALIB(priv,
717 "All Tx chains are disconnected W/A - declare %d as connected\n",
718 first_chain);
719 break;
720 }
721 }
722
723 if (active_chains != priv->hw_params.valid_rx_ant &&
724 active_chains != priv->chain_noise_data.active_chains)
725 IWL_DEBUG_CALIB(priv,
726 "Detected that not all antennas are connected! "
727 "Connected: %#x, valid: %#x.\n",
728 active_chains, priv->hw_params.valid_rx_ant);
729
730 /* Save for use within RXON, TX, SCAN commands, etc. */
731 data->active_chains = active_chains;
732 IWL_DEBUG_CALIB(priv, "active_chains (bitwise) = 0x%x\n",
733 active_chains);
734}
735
736static void iwl4965_gain_computation(struct iwl_priv *priv,
737 u32 *average_noise,
738 u16 min_average_noise_antenna_i,
739 u32 min_average_noise,
740 u8 default_chain)
741{
742 int i, ret;
743 struct iwl_chain_noise_data *data = &priv->chain_noise_data;
744
745 data->delta_gain_code[min_average_noise_antenna_i] = 0;
746
747 for (i = default_chain; i < NUM_RX_CHAINS; i++) {
748 s32 delta_g = 0;
749
750 if (!(data->disconn_array[i]) &&
751 (data->delta_gain_code[i] ==
752 CHAIN_NOISE_DELTA_GAIN_INIT_VAL)) {
753 delta_g = average_noise[i] - min_average_noise;
754 data->delta_gain_code[i] = (u8)((delta_g * 10) / 15);
755 data->delta_gain_code[i] =
756 min(data->delta_gain_code[i],
757 (u8) CHAIN_NOISE_MAX_DELTA_GAIN_CODE);
758
759 data->delta_gain_code[i] =
760 (data->delta_gain_code[i] | (1 << 2));
761 } else {
762 data->delta_gain_code[i] = 0;
763 }
764 }
765 IWL_DEBUG_CALIB(priv, "delta_gain_codes: a %d b %d c %d\n",
766 data->delta_gain_code[0],
767 data->delta_gain_code[1],
768 data->delta_gain_code[2]);
769
770 /* Differential gain gets sent to uCode only once */
771 if (!data->radio_write) {
772 struct iwl_calib_diff_gain_cmd cmd;
773 data->radio_write = 1;
774
775 memset(&cmd, 0, sizeof(cmd));
776 cmd.hdr.op_code = IWL_PHY_CALIBRATE_DIFF_GAIN_CMD;
777 cmd.diff_gain_a = data->delta_gain_code[0];
778 cmd.diff_gain_b = data->delta_gain_code[1];
779 cmd.diff_gain_c = data->delta_gain_code[2];
780 ret = iwl_legacy_send_cmd_pdu(priv, REPLY_PHY_CALIBRATION_CMD,
781 sizeof(cmd), &cmd);
782 if (ret)
783 IWL_DEBUG_CALIB(priv, "fail sending cmd "
784 "REPLY_PHY_CALIBRATION_CMD\n");
785
786 /* TODO we might want recalculate
787 * rx_chain in rxon cmd */
788
789 /* Mark so we run this algo only once! */
790 data->state = IWL_CHAIN_NOISE_CALIBRATED;
791 }
792}
793
794
795
796/*
797 * Accumulate 16 beacons of signal and noise statistics for each of
798 * 3 receivers/antennas/rx-chains, then figure out:
799 * 1) Which antennas are connected.
800 * 2) Differential rx gain settings to balance the 3 receivers.
801 */
802void iwl4965_chain_noise_calibration(struct iwl_priv *priv, void *stat_resp)
803{
804 struct iwl_chain_noise_data *data = NULL;
805
806 u32 chain_noise_a;
807 u32 chain_noise_b;
808 u32 chain_noise_c;
809 u32 chain_sig_a;
810 u32 chain_sig_b;
811 u32 chain_sig_c;
812 u32 average_sig[NUM_RX_CHAINS] = {INITIALIZATION_VALUE};
813 u32 average_noise[NUM_RX_CHAINS] = {INITIALIZATION_VALUE};
814 u32 min_average_noise = MIN_AVERAGE_NOISE_MAX_VALUE;
815 u16 min_average_noise_antenna_i = INITIALIZATION_VALUE;
816 u16 i = 0;
817 u16 rxon_chnum = INITIALIZATION_VALUE;
818 u16 stat_chnum = INITIALIZATION_VALUE;
819 u8 rxon_band24;
820 u8 stat_band24;
821 unsigned long flags;
822 struct statistics_rx_non_phy *rx_info;
823
824 struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
825
826 if (priv->disable_chain_noise_cal)
827 return;
828
829 data = &(priv->chain_noise_data);
830
831 /*
832 * Accumulate just the first "chain_noise_num_beacons" after
833 * the first association, then we're done forever.
834 */
835 if (data->state != IWL_CHAIN_NOISE_ACCUMULATE) {
836 if (data->state == IWL_CHAIN_NOISE_ALIVE)
837 IWL_DEBUG_CALIB(priv, "Wait for noise calib reset\n");
838 return;
839 }
840
841 spin_lock_irqsave(&priv->lock, flags);
842
843 rx_info = &(((struct iwl_notif_statistics *)stat_resp)->
844 rx.general);
845
846 if (rx_info->interference_data_flag != INTERFERENCE_DATA_AVAILABLE) {
847 IWL_DEBUG_CALIB(priv, " << Interference data unavailable\n");
848 spin_unlock_irqrestore(&priv->lock, flags);
849 return;
850 }
851
852 rxon_band24 = !!(ctx->staging.flags & RXON_FLG_BAND_24G_MSK);
853 rxon_chnum = le16_to_cpu(ctx->staging.channel);
854
855 stat_band24 = !!(((struct iwl_notif_statistics *)
856 stat_resp)->flag &
857 STATISTICS_REPLY_FLG_BAND_24G_MSK);
858 stat_chnum = le32_to_cpu(((struct iwl_notif_statistics *)
859 stat_resp)->flag) >> 16;
860
861 /* Make sure we accumulate data for just the associated channel
862 * (even if scanning). */
863 if ((rxon_chnum != stat_chnum) || (rxon_band24 != stat_band24)) {
864 IWL_DEBUG_CALIB(priv, "Stats not from chan=%d, band24=%d\n",
865 rxon_chnum, rxon_band24);
866 spin_unlock_irqrestore(&priv->lock, flags);
867 return;
868 }
869
870 /*
871 * Accumulate beacon statistics values across
872 * "chain_noise_num_beacons"
873 */
874 chain_noise_a = le32_to_cpu(rx_info->beacon_silence_rssi_a) &
875 IN_BAND_FILTER;
876 chain_noise_b = le32_to_cpu(rx_info->beacon_silence_rssi_b) &
877 IN_BAND_FILTER;
878 chain_noise_c = le32_to_cpu(rx_info->beacon_silence_rssi_c) &
879 IN_BAND_FILTER;
880
881 chain_sig_a = le32_to_cpu(rx_info->beacon_rssi_a) & IN_BAND_FILTER;
882 chain_sig_b = le32_to_cpu(rx_info->beacon_rssi_b) & IN_BAND_FILTER;
883 chain_sig_c = le32_to_cpu(rx_info->beacon_rssi_c) & IN_BAND_FILTER;
884
885 spin_unlock_irqrestore(&priv->lock, flags);
886
887 data->beacon_count++;
888
889 data->chain_noise_a = (chain_noise_a + data->chain_noise_a);
890 data->chain_noise_b = (chain_noise_b + data->chain_noise_b);
891 data->chain_noise_c = (chain_noise_c + data->chain_noise_c);
892
893 data->chain_signal_a = (chain_sig_a + data->chain_signal_a);
894 data->chain_signal_b = (chain_sig_b + data->chain_signal_b);
895 data->chain_signal_c = (chain_sig_c + data->chain_signal_c);
896
897 IWL_DEBUG_CALIB(priv, "chan=%d, band24=%d, beacon=%d\n",
898 rxon_chnum, rxon_band24, data->beacon_count);
899 IWL_DEBUG_CALIB(priv, "chain_sig: a %d b %d c %d\n",
900 chain_sig_a, chain_sig_b, chain_sig_c);
901 IWL_DEBUG_CALIB(priv, "chain_noise: a %d b %d c %d\n",
902 chain_noise_a, chain_noise_b, chain_noise_c);
903
904 /* If this is the "chain_noise_num_beacons", determine:
905 * 1) Disconnected antennas (using signal strengths)
906 * 2) Differential gain (using silence noise) to balance receivers */
907 if (data->beacon_count !=
908 priv->cfg->base_params->chain_noise_num_beacons)
909 return;
910
911 /* Analyze signal for disconnected antenna */
912 iwl4965_find_disconn_antenna(priv, average_sig, data);
913
914 /* Analyze noise for rx balance */
915 average_noise[0] = data->chain_noise_a /
916 priv->cfg->base_params->chain_noise_num_beacons;
917 average_noise[1] = data->chain_noise_b /
918 priv->cfg->base_params->chain_noise_num_beacons;
919 average_noise[2] = data->chain_noise_c /
920 priv->cfg->base_params->chain_noise_num_beacons;
921
922 for (i = 0; i < NUM_RX_CHAINS; i++) {
923 if (!(data->disconn_array[i]) &&
924 (average_noise[i] <= min_average_noise)) {
925 /* This means that chain i is active and has
926 * lower noise values so far: */
927 min_average_noise = average_noise[i];
928 min_average_noise_antenna_i = i;
929 }
930 }
931
932 IWL_DEBUG_CALIB(priv, "average_noise: a %d b %d c %d\n",
933 average_noise[0], average_noise[1],
934 average_noise[2]);
935
936 IWL_DEBUG_CALIB(priv, "min_average_noise = %d, antenna %d\n",
937 min_average_noise, min_average_noise_antenna_i);
938
939 iwl4965_gain_computation(priv, average_noise,
940 min_average_noise_antenna_i, min_average_noise,
941 iwl4965_find_first_chain(priv->cfg->valid_rx_ant));
942
943 /* Some power changes may have been made during the calibration.
944 * Update and commit the RXON
945 */
946 if (priv->cfg->ops->lib->update_chain_flags)
947 priv->cfg->ops->lib->update_chain_flags(priv);
948
949 data->state = IWL_CHAIN_NOISE_DONE;
950 iwl_legacy_power_update_mode(priv, false);
951}
952
953void iwl4965_reset_run_time_calib(struct iwl_priv *priv)
954{
955 int i;
956 memset(&(priv->sensitivity_data), 0,
957 sizeof(struct iwl_sensitivity_data));
958 memset(&(priv->chain_noise_data), 0,
959 sizeof(struct iwl_chain_noise_data));
960 for (i = 0; i < NUM_RX_CHAINS; i++)
961 priv->chain_noise_data.delta_gain_code[i] =
962 CHAIN_NOISE_DELTA_GAIN_INIT_VAL;
963
964 /* Ask for statistics now, the uCode will send notification
965 * periodically after association */
966 iwl_legacy_send_statistics_request(priv, CMD_ASYNC, true);
967}
diff --git a/drivers/net/wireless/iwlegacy/iwl-4965-calib.h b/drivers/net/wireless/iwlegacy/iwl-4965-calib.h
new file mode 100644
index 00000000000..f46c80e6e00
--- /dev/null
+++ b/drivers/net/wireless/iwlegacy/iwl-4965-calib.h
@@ -0,0 +1,75 @@
1/******************************************************************************
2 *
3 * This file is provided under a dual BSD/GPLv2 license. When using or
4 * redistributing this file, you may do so under either license.
5 *
6 * GPL LICENSE SUMMARY
7 *
8 * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of version 2 of the GNU General Public License as
12 * published by the Free Software Foundation.
13 *
14 * This program is distributed in the hope that it will be useful, but
15 * WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
22 * USA
23 *
24 * The full GNU General Public License is included in this distribution
25 * in the file called LICENSE.GPL.
26 *
27 * Contact Information:
28 * Intel Linux Wireless <ilw@linux.intel.com>
29 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
30 *
31 * BSD LICENSE
32 *
33 * Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved.
34 * All rights reserved.
35 *
36 * Redistribution and use in source and binary forms, with or without
37 * modification, are permitted provided that the following conditions
38 * are met:
39 *
40 * * Redistributions of source code must retain the above copyright
41 * notice, this list of conditions and the following disclaimer.
42 * * Redistributions in binary form must reproduce the above copyright
43 * notice, this list of conditions and the following disclaimer in
44 * the documentation and/or other materials provided with the
45 * distribution.
46 * * Neither the name Intel Corporation nor the names of its
47 * contributors may be used to endorse or promote products derived
48 * from this software without specific prior written permission.
49 *
50 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
51 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
52 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
53 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
54 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
55 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
56 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
57 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
58 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
59 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
60 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
61 *****************************************************************************/
62#ifndef __iwl_4965_calib_h__
63#define __iwl_4965_calib_h__
64
65#include "iwl-dev.h"
66#include "iwl-core.h"
67#include "iwl-commands.h"
68
69void iwl4965_chain_noise_calibration(struct iwl_priv *priv, void *stat_resp);
70void iwl4965_sensitivity_calibration(struct iwl_priv *priv, void *resp);
71void iwl4965_init_sensitivity(struct iwl_priv *priv);
72void iwl4965_reset_run_time_calib(struct iwl_priv *priv);
73void iwl4965_calib_free_results(struct iwl_priv *priv);
74
75#endif /* __iwl_4965_calib_h__ */
diff --git a/drivers/net/wireless/iwlegacy/iwl-4965-debugfs.c b/drivers/net/wireless/iwlegacy/iwl-4965-debugfs.c
new file mode 100644
index 00000000000..1c93665766e
--- /dev/null
+++ b/drivers/net/wireless/iwlegacy/iwl-4965-debugfs.c
@@ -0,0 +1,774 @@
1/******************************************************************************
2*
3* GPL LICENSE SUMMARY
4*
5* Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
6*
7* This program is free software; you can redistribute it and/or modify
8* it under the terms of version 2 of the GNU General Public License as
9* published by the Free Software Foundation.
10*
11* This program is distributed in the hope that it will be useful, but
12* WITHOUT ANY WARRANTY; without even the implied warranty of
13* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14* General Public License for more details.
15*
16* You should have received a copy of the GNU General Public License
17* along with this program; if not, write to the Free Software
18* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
19* USA
20*
21* The full GNU General Public License is included in this distribution
22* in the file called LICENSE.GPL.
23*
24* Contact Information:
25* Intel Linux Wireless <ilw@linux.intel.com>
26* Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
27*****************************************************************************/
28#include "iwl-4965.h"
29#include "iwl-4965-debugfs.h"
30
31static const char *fmt_value = " %-30s %10u\n";
32static const char *fmt_table = " %-30s %10u %10u %10u %10u\n";
33static const char *fmt_header =
34 "%-32s current cumulative delta max\n";
35
36static int iwl4965_statistics_flag(struct iwl_priv *priv, char *buf, int bufsz)
37{
38 int p = 0;
39 u32 flag;
40
41 flag = le32_to_cpu(priv->_4965.statistics.flag);
42
43 p += scnprintf(buf + p, bufsz - p, "Statistics Flag(0x%X):\n", flag);
44 if (flag & UCODE_STATISTICS_CLEAR_MSK)
45 p += scnprintf(buf + p, bufsz - p,
46 "\tStatistics have been cleared\n");
47 p += scnprintf(buf + p, bufsz - p, "\tOperational Frequency: %s\n",
48 (flag & UCODE_STATISTICS_FREQUENCY_MSK)
49 ? "2.4 GHz" : "5.2 GHz");
50 p += scnprintf(buf + p, bufsz - p, "\tTGj Narrow Band: %s\n",
51 (flag & UCODE_STATISTICS_NARROW_BAND_MSK)
52 ? "enabled" : "disabled");
53
54 return p;
55}
56
57ssize_t iwl4965_ucode_rx_stats_read(struct file *file, char __user *user_buf,
58 size_t count, loff_t *ppos)
59{
60 struct iwl_priv *priv = file->private_data;
61 int pos = 0;
62 char *buf;
63 int bufsz = sizeof(struct statistics_rx_phy) * 40 +
64 sizeof(struct statistics_rx_non_phy) * 40 +
65 sizeof(struct statistics_rx_ht_phy) * 40 + 400;
66 ssize_t ret;
67 struct statistics_rx_phy *ofdm, *accum_ofdm, *delta_ofdm, *max_ofdm;
68 struct statistics_rx_phy *cck, *accum_cck, *delta_cck, *max_cck;
69 struct statistics_rx_non_phy *general, *accum_general;
70 struct statistics_rx_non_phy *delta_general, *max_general;
71 struct statistics_rx_ht_phy *ht, *accum_ht, *delta_ht, *max_ht;
72
73 if (!iwl_legacy_is_alive(priv))
74 return -EAGAIN;
75
76 buf = kzalloc(bufsz, GFP_KERNEL);
77 if (!buf) {
78 IWL_ERR(priv, "Can not allocate Buffer\n");
79 return -ENOMEM;
80 }
81
82 /*
83 * the statistic information display here is based on
84 * the last statistics notification from uCode
85 * might not reflect the current uCode activity
86 */
87 ofdm = &priv->_4965.statistics.rx.ofdm;
88 cck = &priv->_4965.statistics.rx.cck;
89 general = &priv->_4965.statistics.rx.general;
90 ht = &priv->_4965.statistics.rx.ofdm_ht;
91 accum_ofdm = &priv->_4965.accum_statistics.rx.ofdm;
92 accum_cck = &priv->_4965.accum_statistics.rx.cck;
93 accum_general = &priv->_4965.accum_statistics.rx.general;
94 accum_ht = &priv->_4965.accum_statistics.rx.ofdm_ht;
95 delta_ofdm = &priv->_4965.delta_statistics.rx.ofdm;
96 delta_cck = &priv->_4965.delta_statistics.rx.cck;
97 delta_general = &priv->_4965.delta_statistics.rx.general;
98 delta_ht = &priv->_4965.delta_statistics.rx.ofdm_ht;
99 max_ofdm = &priv->_4965.max_delta.rx.ofdm;
100 max_cck = &priv->_4965.max_delta.rx.cck;
101 max_general = &priv->_4965.max_delta.rx.general;
102 max_ht = &priv->_4965.max_delta.rx.ofdm_ht;
103
104 pos += iwl4965_statistics_flag(priv, buf, bufsz);
105 pos += scnprintf(buf + pos, bufsz - pos,
106 fmt_header, "Statistics_Rx - OFDM:");
107 pos += scnprintf(buf + pos, bufsz - pos,
108 fmt_table, "ina_cnt:",
109 le32_to_cpu(ofdm->ina_cnt),
110 accum_ofdm->ina_cnt,
111 delta_ofdm->ina_cnt, max_ofdm->ina_cnt);
112 pos += scnprintf(buf + pos, bufsz - pos,
113 fmt_table, "fina_cnt:",
114 le32_to_cpu(ofdm->fina_cnt), accum_ofdm->fina_cnt,
115 delta_ofdm->fina_cnt, max_ofdm->fina_cnt);
116 pos += scnprintf(buf + pos, bufsz - pos,
117 fmt_table, "plcp_err:",
118 le32_to_cpu(ofdm->plcp_err), accum_ofdm->plcp_err,
119 delta_ofdm->plcp_err, max_ofdm->plcp_err);
120 pos += scnprintf(buf + pos, bufsz - pos,
121 fmt_table, "crc32_err:",
122 le32_to_cpu(ofdm->crc32_err), accum_ofdm->crc32_err,
123 delta_ofdm->crc32_err, max_ofdm->crc32_err);
124 pos += scnprintf(buf + pos, bufsz - pos,
125 fmt_table, "overrun_err:",
126 le32_to_cpu(ofdm->overrun_err),
127 accum_ofdm->overrun_err, delta_ofdm->overrun_err,
128 max_ofdm->overrun_err);
129 pos += scnprintf(buf + pos, bufsz - pos,
130 fmt_table, "early_overrun_err:",
131 le32_to_cpu(ofdm->early_overrun_err),
132 accum_ofdm->early_overrun_err,
133 delta_ofdm->early_overrun_err,
134 max_ofdm->early_overrun_err);
135 pos += scnprintf(buf + pos, bufsz - pos,
136 fmt_table, "crc32_good:",
137 le32_to_cpu(ofdm->crc32_good),
138 accum_ofdm->crc32_good, delta_ofdm->crc32_good,
139 max_ofdm->crc32_good);
140 pos += scnprintf(buf + pos, bufsz - pos,
141 fmt_table, "false_alarm_cnt:",
142 le32_to_cpu(ofdm->false_alarm_cnt),
143 accum_ofdm->false_alarm_cnt,
144 delta_ofdm->false_alarm_cnt,
145 max_ofdm->false_alarm_cnt);
146 pos += scnprintf(buf + pos, bufsz - pos,
147 fmt_table, "fina_sync_err_cnt:",
148 le32_to_cpu(ofdm->fina_sync_err_cnt),
149 accum_ofdm->fina_sync_err_cnt,
150 delta_ofdm->fina_sync_err_cnt,
151 max_ofdm->fina_sync_err_cnt);
152 pos += scnprintf(buf + pos, bufsz - pos,
153 fmt_table, "sfd_timeout:",
154 le32_to_cpu(ofdm->sfd_timeout),
155 accum_ofdm->sfd_timeout, delta_ofdm->sfd_timeout,
156 max_ofdm->sfd_timeout);
157 pos += scnprintf(buf + pos, bufsz - pos,
158 fmt_table, "fina_timeout:",
159 le32_to_cpu(ofdm->fina_timeout),
160 accum_ofdm->fina_timeout, delta_ofdm->fina_timeout,
161 max_ofdm->fina_timeout);
162 pos += scnprintf(buf + pos, bufsz - pos,
163 fmt_table, "unresponded_rts:",
164 le32_to_cpu(ofdm->unresponded_rts),
165 accum_ofdm->unresponded_rts,
166 delta_ofdm->unresponded_rts,
167 max_ofdm->unresponded_rts);
168 pos += scnprintf(buf + pos, bufsz - pos,
169 fmt_table, "rxe_frame_lmt_ovrun:",
170 le32_to_cpu(ofdm->rxe_frame_limit_overrun),
171 accum_ofdm->rxe_frame_limit_overrun,
172 delta_ofdm->rxe_frame_limit_overrun,
173 max_ofdm->rxe_frame_limit_overrun);
174 pos += scnprintf(buf + pos, bufsz - pos,
175 fmt_table, "sent_ack_cnt:",
176 le32_to_cpu(ofdm->sent_ack_cnt),
177 accum_ofdm->sent_ack_cnt, delta_ofdm->sent_ack_cnt,
178 max_ofdm->sent_ack_cnt);
179 pos += scnprintf(buf + pos, bufsz - pos,
180 fmt_table, "sent_cts_cnt:",
181 le32_to_cpu(ofdm->sent_cts_cnt),
182 accum_ofdm->sent_cts_cnt, delta_ofdm->sent_cts_cnt,
183 max_ofdm->sent_cts_cnt);
184 pos += scnprintf(buf + pos, bufsz - pos,
185 fmt_table, "sent_ba_rsp_cnt:",
186 le32_to_cpu(ofdm->sent_ba_rsp_cnt),
187 accum_ofdm->sent_ba_rsp_cnt,
188 delta_ofdm->sent_ba_rsp_cnt,
189 max_ofdm->sent_ba_rsp_cnt);
190 pos += scnprintf(buf + pos, bufsz - pos,
191 fmt_table, "dsp_self_kill:",
192 le32_to_cpu(ofdm->dsp_self_kill),
193 accum_ofdm->dsp_self_kill,
194 delta_ofdm->dsp_self_kill,
195 max_ofdm->dsp_self_kill);
196 pos += scnprintf(buf + pos, bufsz - pos,
197 fmt_table, "mh_format_err:",
198 le32_to_cpu(ofdm->mh_format_err),
199 accum_ofdm->mh_format_err,
200 delta_ofdm->mh_format_err,
201 max_ofdm->mh_format_err);
202 pos += scnprintf(buf + pos, bufsz - pos,
203 fmt_table, "re_acq_main_rssi_sum:",
204 le32_to_cpu(ofdm->re_acq_main_rssi_sum),
205 accum_ofdm->re_acq_main_rssi_sum,
206 delta_ofdm->re_acq_main_rssi_sum,
207 max_ofdm->re_acq_main_rssi_sum);
208
209 pos += scnprintf(buf + pos, bufsz - pos,
210 fmt_header, "Statistics_Rx - CCK:");
211 pos += scnprintf(buf + pos, bufsz - pos,
212 fmt_table, "ina_cnt:",
213 le32_to_cpu(cck->ina_cnt), accum_cck->ina_cnt,
214 delta_cck->ina_cnt, max_cck->ina_cnt);
215 pos += scnprintf(buf + pos, bufsz - pos,
216 fmt_table, "fina_cnt:",
217 le32_to_cpu(cck->fina_cnt), accum_cck->fina_cnt,
218 delta_cck->fina_cnt, max_cck->fina_cnt);
219 pos += scnprintf(buf + pos, bufsz - pos,
220 fmt_table, "plcp_err:",
221 le32_to_cpu(cck->plcp_err), accum_cck->plcp_err,
222 delta_cck->plcp_err, max_cck->plcp_err);
223 pos += scnprintf(buf + pos, bufsz - pos,
224 fmt_table, "crc32_err:",
225 le32_to_cpu(cck->crc32_err), accum_cck->crc32_err,
226 delta_cck->crc32_err, max_cck->crc32_err);
227 pos += scnprintf(buf + pos, bufsz - pos,
228 fmt_table, "overrun_err:",
229 le32_to_cpu(cck->overrun_err),
230 accum_cck->overrun_err, delta_cck->overrun_err,
231 max_cck->overrun_err);
232 pos += scnprintf(buf + pos, bufsz - pos,
233 fmt_table, "early_overrun_err:",
234 le32_to_cpu(cck->early_overrun_err),
235 accum_cck->early_overrun_err,
236 delta_cck->early_overrun_err,
237 max_cck->early_overrun_err);
238 pos += scnprintf(buf + pos, bufsz - pos,
239 fmt_table, "crc32_good:",
240 le32_to_cpu(cck->crc32_good), accum_cck->crc32_good,
241 delta_cck->crc32_good, max_cck->crc32_good);
242 pos += scnprintf(buf + pos, bufsz - pos,
243 fmt_table, "false_alarm_cnt:",
244 le32_to_cpu(cck->false_alarm_cnt),
245 accum_cck->false_alarm_cnt,
246 delta_cck->false_alarm_cnt, max_cck->false_alarm_cnt);
247 pos += scnprintf(buf + pos, bufsz - pos,
248 fmt_table, "fina_sync_err_cnt:",
249 le32_to_cpu(cck->fina_sync_err_cnt),
250 accum_cck->fina_sync_err_cnt,
251 delta_cck->fina_sync_err_cnt,
252 max_cck->fina_sync_err_cnt);
253 pos += scnprintf(buf + pos, bufsz - pos,
254 fmt_table, "sfd_timeout:",
255 le32_to_cpu(cck->sfd_timeout),
256 accum_cck->sfd_timeout, delta_cck->sfd_timeout,
257 max_cck->sfd_timeout);
258 pos += scnprintf(buf + pos, bufsz - pos,
259 fmt_table, "fina_timeout:",
260 le32_to_cpu(cck->fina_timeout),
261 accum_cck->fina_timeout, delta_cck->fina_timeout,
262 max_cck->fina_timeout);
263 pos += scnprintf(buf + pos, bufsz - pos,
264 fmt_table, "unresponded_rts:",
265 le32_to_cpu(cck->unresponded_rts),
266 accum_cck->unresponded_rts, delta_cck->unresponded_rts,
267 max_cck->unresponded_rts);
268 pos += scnprintf(buf + pos, bufsz - pos,
269 fmt_table, "rxe_frame_lmt_ovrun:",
270 le32_to_cpu(cck->rxe_frame_limit_overrun),
271 accum_cck->rxe_frame_limit_overrun,
272 delta_cck->rxe_frame_limit_overrun,
273 max_cck->rxe_frame_limit_overrun);
274 pos += scnprintf(buf + pos, bufsz - pos,
275 fmt_table, "sent_ack_cnt:",
276 le32_to_cpu(cck->sent_ack_cnt),
277 accum_cck->sent_ack_cnt, delta_cck->sent_ack_cnt,
278 max_cck->sent_ack_cnt);
279 pos += scnprintf(buf + pos, bufsz - pos,
280 fmt_table, "sent_cts_cnt:",
281 le32_to_cpu(cck->sent_cts_cnt),
282 accum_cck->sent_cts_cnt, delta_cck->sent_cts_cnt,
283 max_cck->sent_cts_cnt);
284 pos += scnprintf(buf + pos, bufsz - pos,
285 fmt_table, "sent_ba_rsp_cnt:",
286 le32_to_cpu(cck->sent_ba_rsp_cnt),
287 accum_cck->sent_ba_rsp_cnt,
288 delta_cck->sent_ba_rsp_cnt,
289 max_cck->sent_ba_rsp_cnt);
290 pos += scnprintf(buf + pos, bufsz - pos,
291 fmt_table, "dsp_self_kill:",
292 le32_to_cpu(cck->dsp_self_kill),
293 accum_cck->dsp_self_kill, delta_cck->dsp_self_kill,
294 max_cck->dsp_self_kill);
295 pos += scnprintf(buf + pos, bufsz - pos,
296 fmt_table, "mh_format_err:",
297 le32_to_cpu(cck->mh_format_err),
298 accum_cck->mh_format_err, delta_cck->mh_format_err,
299 max_cck->mh_format_err);
300 pos += scnprintf(buf + pos, bufsz - pos,
301 fmt_table, "re_acq_main_rssi_sum:",
302 le32_to_cpu(cck->re_acq_main_rssi_sum),
303 accum_cck->re_acq_main_rssi_sum,
304 delta_cck->re_acq_main_rssi_sum,
305 max_cck->re_acq_main_rssi_sum);
306
307 pos += scnprintf(buf + pos, bufsz - pos,
308 fmt_header, "Statistics_Rx - GENERAL:");
309 pos += scnprintf(buf + pos, bufsz - pos,
310 fmt_table, "bogus_cts:",
311 le32_to_cpu(general->bogus_cts),
312 accum_general->bogus_cts, delta_general->bogus_cts,
313 max_general->bogus_cts);
314 pos += scnprintf(buf + pos, bufsz - pos,
315 fmt_table, "bogus_ack:",
316 le32_to_cpu(general->bogus_ack),
317 accum_general->bogus_ack, delta_general->bogus_ack,
318 max_general->bogus_ack);
319 pos += scnprintf(buf + pos, bufsz - pos,
320 fmt_table, "non_bssid_frames:",
321 le32_to_cpu(general->non_bssid_frames),
322 accum_general->non_bssid_frames,
323 delta_general->non_bssid_frames,
324 max_general->non_bssid_frames);
325 pos += scnprintf(buf + pos, bufsz - pos,
326 fmt_table, "filtered_frames:",
327 le32_to_cpu(general->filtered_frames),
328 accum_general->filtered_frames,
329 delta_general->filtered_frames,
330 max_general->filtered_frames);
331 pos += scnprintf(buf + pos, bufsz - pos,
332 fmt_table, "non_channel_beacons:",
333 le32_to_cpu(general->non_channel_beacons),
334 accum_general->non_channel_beacons,
335 delta_general->non_channel_beacons,
336 max_general->non_channel_beacons);
337 pos += scnprintf(buf + pos, bufsz - pos,
338 fmt_table, "channel_beacons:",
339 le32_to_cpu(general->channel_beacons),
340 accum_general->channel_beacons,
341 delta_general->channel_beacons,
342 max_general->channel_beacons);
343 pos += scnprintf(buf + pos, bufsz - pos,
344 fmt_table, "num_missed_bcon:",
345 le32_to_cpu(general->num_missed_bcon),
346 accum_general->num_missed_bcon,
347 delta_general->num_missed_bcon,
348 max_general->num_missed_bcon);
349 pos += scnprintf(buf + pos, bufsz - pos,
350 fmt_table, "adc_rx_saturation_time:",
351 le32_to_cpu(general->adc_rx_saturation_time),
352 accum_general->adc_rx_saturation_time,
353 delta_general->adc_rx_saturation_time,
354 max_general->adc_rx_saturation_time);
355 pos += scnprintf(buf + pos, bufsz - pos,
356 fmt_table, "ina_detect_search_tm:",
357 le32_to_cpu(general->ina_detection_search_time),
358 accum_general->ina_detection_search_time,
359 delta_general->ina_detection_search_time,
360 max_general->ina_detection_search_time);
361 pos += scnprintf(buf + pos, bufsz - pos,
362 fmt_table, "beacon_silence_rssi_a:",
363 le32_to_cpu(general->beacon_silence_rssi_a),
364 accum_general->beacon_silence_rssi_a,
365 delta_general->beacon_silence_rssi_a,
366 max_general->beacon_silence_rssi_a);
367 pos += scnprintf(buf + pos, bufsz - pos,
368 fmt_table, "beacon_silence_rssi_b:",
369 le32_to_cpu(general->beacon_silence_rssi_b),
370 accum_general->beacon_silence_rssi_b,
371 delta_general->beacon_silence_rssi_b,
372 max_general->beacon_silence_rssi_b);
373 pos += scnprintf(buf + pos, bufsz - pos,
374 fmt_table, "beacon_silence_rssi_c:",
375 le32_to_cpu(general->beacon_silence_rssi_c),
376 accum_general->beacon_silence_rssi_c,
377 delta_general->beacon_silence_rssi_c,
378 max_general->beacon_silence_rssi_c);
379 pos += scnprintf(buf + pos, bufsz - pos,
380 fmt_table, "interference_data_flag:",
381 le32_to_cpu(general->interference_data_flag),
382 accum_general->interference_data_flag,
383 delta_general->interference_data_flag,
384 max_general->interference_data_flag);
385 pos += scnprintf(buf + pos, bufsz - pos,
386 fmt_table, "channel_load:",
387 le32_to_cpu(general->channel_load),
388 accum_general->channel_load,
389 delta_general->channel_load,
390 max_general->channel_load);
391 pos += scnprintf(buf + pos, bufsz - pos,
392 fmt_table, "dsp_false_alarms:",
393 le32_to_cpu(general->dsp_false_alarms),
394 accum_general->dsp_false_alarms,
395 delta_general->dsp_false_alarms,
396 max_general->dsp_false_alarms);
397 pos += scnprintf(buf + pos, bufsz - pos,
398 fmt_table, "beacon_rssi_a:",
399 le32_to_cpu(general->beacon_rssi_a),
400 accum_general->beacon_rssi_a,
401 delta_general->beacon_rssi_a,
402 max_general->beacon_rssi_a);
403 pos += scnprintf(buf + pos, bufsz - pos,
404 fmt_table, "beacon_rssi_b:",
405 le32_to_cpu(general->beacon_rssi_b),
406 accum_general->beacon_rssi_b,
407 delta_general->beacon_rssi_b,
408 max_general->beacon_rssi_b);
409 pos += scnprintf(buf + pos, bufsz - pos,
410 fmt_table, "beacon_rssi_c:",
411 le32_to_cpu(general->beacon_rssi_c),
412 accum_general->beacon_rssi_c,
413 delta_general->beacon_rssi_c,
414 max_general->beacon_rssi_c);
415 pos += scnprintf(buf + pos, bufsz - pos,
416 fmt_table, "beacon_energy_a:",
417 le32_to_cpu(general->beacon_energy_a),
418 accum_general->beacon_energy_a,
419 delta_general->beacon_energy_a,
420 max_general->beacon_energy_a);
421 pos += scnprintf(buf + pos, bufsz - pos,
422 fmt_table, "beacon_energy_b:",
423 le32_to_cpu(general->beacon_energy_b),
424 accum_general->beacon_energy_b,
425 delta_general->beacon_energy_b,
426 max_general->beacon_energy_b);
427 pos += scnprintf(buf + pos, bufsz - pos,
428 fmt_table, "beacon_energy_c:",
429 le32_to_cpu(general->beacon_energy_c),
430 accum_general->beacon_energy_c,
431 delta_general->beacon_energy_c,
432 max_general->beacon_energy_c);
433
434 pos += scnprintf(buf + pos, bufsz - pos,
435 fmt_header, "Statistics_Rx - OFDM_HT:");
436 pos += scnprintf(buf + pos, bufsz - pos,
437 fmt_table, "plcp_err:",
438 le32_to_cpu(ht->plcp_err), accum_ht->plcp_err,
439 delta_ht->plcp_err, max_ht->plcp_err);
440 pos += scnprintf(buf + pos, bufsz - pos,
441 fmt_table, "overrun_err:",
442 le32_to_cpu(ht->overrun_err), accum_ht->overrun_err,
443 delta_ht->overrun_err, max_ht->overrun_err);
444 pos += scnprintf(buf + pos, bufsz - pos,
445 fmt_table, "early_overrun_err:",
446 le32_to_cpu(ht->early_overrun_err),
447 accum_ht->early_overrun_err,
448 delta_ht->early_overrun_err,
449 max_ht->early_overrun_err);
450 pos += scnprintf(buf + pos, bufsz - pos,
451 fmt_table, "crc32_good:",
452 le32_to_cpu(ht->crc32_good), accum_ht->crc32_good,
453 delta_ht->crc32_good, max_ht->crc32_good);
454 pos += scnprintf(buf + pos, bufsz - pos,
455 fmt_table, "crc32_err:",
456 le32_to_cpu(ht->crc32_err), accum_ht->crc32_err,
457 delta_ht->crc32_err, max_ht->crc32_err);
458 pos += scnprintf(buf + pos, bufsz - pos,
459 fmt_table, "mh_format_err:",
460 le32_to_cpu(ht->mh_format_err),
461 accum_ht->mh_format_err,
462 delta_ht->mh_format_err, max_ht->mh_format_err);
463 pos += scnprintf(buf + pos, bufsz - pos,
464 fmt_table, "agg_crc32_good:",
465 le32_to_cpu(ht->agg_crc32_good),
466 accum_ht->agg_crc32_good,
467 delta_ht->agg_crc32_good, max_ht->agg_crc32_good);
468 pos += scnprintf(buf + pos, bufsz - pos,
469 fmt_table, "agg_mpdu_cnt:",
470 le32_to_cpu(ht->agg_mpdu_cnt),
471 accum_ht->agg_mpdu_cnt,
472 delta_ht->agg_mpdu_cnt, max_ht->agg_mpdu_cnt);
473 pos += scnprintf(buf + pos, bufsz - pos,
474 fmt_table, "agg_cnt:",
475 le32_to_cpu(ht->agg_cnt), accum_ht->agg_cnt,
476 delta_ht->agg_cnt, max_ht->agg_cnt);
477 pos += scnprintf(buf + pos, bufsz - pos,
478 fmt_table, "unsupport_mcs:",
479 le32_to_cpu(ht->unsupport_mcs),
480 accum_ht->unsupport_mcs,
481 delta_ht->unsupport_mcs, max_ht->unsupport_mcs);
482
483 ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
484 kfree(buf);
485 return ret;
486}
487
488ssize_t iwl4965_ucode_tx_stats_read(struct file *file,
489 char __user *user_buf,
490 size_t count, loff_t *ppos)
491{
492 struct iwl_priv *priv = file->private_data;
493 int pos = 0;
494 char *buf;
495 int bufsz = (sizeof(struct statistics_tx) * 48) + 250;
496 ssize_t ret;
497 struct statistics_tx *tx, *accum_tx, *delta_tx, *max_tx;
498
499 if (!iwl_legacy_is_alive(priv))
500 return -EAGAIN;
501
502 buf = kzalloc(bufsz, GFP_KERNEL);
503 if (!buf) {
504 IWL_ERR(priv, "Can not allocate Buffer\n");
505 return -ENOMEM;
506 }
507
508 /* the statistic information display here is based on
509 * the last statistics notification from uCode
510 * might not reflect the current uCode activity
511 */
512 tx = &priv->_4965.statistics.tx;
513 accum_tx = &priv->_4965.accum_statistics.tx;
514 delta_tx = &priv->_4965.delta_statistics.tx;
515 max_tx = &priv->_4965.max_delta.tx;
516
517 pos += iwl4965_statistics_flag(priv, buf, bufsz);
518 pos += scnprintf(buf + pos, bufsz - pos,
519 fmt_header, "Statistics_Tx:");
520 pos += scnprintf(buf + pos, bufsz - pos,
521 fmt_table, "preamble:",
522 le32_to_cpu(tx->preamble_cnt),
523 accum_tx->preamble_cnt,
524 delta_tx->preamble_cnt, max_tx->preamble_cnt);
525 pos += scnprintf(buf + pos, bufsz - pos,
526 fmt_table, "rx_detected_cnt:",
527 le32_to_cpu(tx->rx_detected_cnt),
528 accum_tx->rx_detected_cnt,
529 delta_tx->rx_detected_cnt, max_tx->rx_detected_cnt);
530 pos += scnprintf(buf + pos, bufsz - pos,
531 fmt_table, "bt_prio_defer_cnt:",
532 le32_to_cpu(tx->bt_prio_defer_cnt),
533 accum_tx->bt_prio_defer_cnt,
534 delta_tx->bt_prio_defer_cnt,
535 max_tx->bt_prio_defer_cnt);
536 pos += scnprintf(buf + pos, bufsz - pos,
537 fmt_table, "bt_prio_kill_cnt:",
538 le32_to_cpu(tx->bt_prio_kill_cnt),
539 accum_tx->bt_prio_kill_cnt,
540 delta_tx->bt_prio_kill_cnt,
541 max_tx->bt_prio_kill_cnt);
542 pos += scnprintf(buf + pos, bufsz - pos,
543 fmt_table, "few_bytes_cnt:",
544 le32_to_cpu(tx->few_bytes_cnt),
545 accum_tx->few_bytes_cnt,
546 delta_tx->few_bytes_cnt, max_tx->few_bytes_cnt);
547 pos += scnprintf(buf + pos, bufsz - pos,
548 fmt_table, "cts_timeout:",
549 le32_to_cpu(tx->cts_timeout), accum_tx->cts_timeout,
550 delta_tx->cts_timeout, max_tx->cts_timeout);
551 pos += scnprintf(buf + pos, bufsz - pos,
552 fmt_table, "ack_timeout:",
553 le32_to_cpu(tx->ack_timeout),
554 accum_tx->ack_timeout,
555 delta_tx->ack_timeout, max_tx->ack_timeout);
556 pos += scnprintf(buf + pos, bufsz - pos,
557 fmt_table, "expected_ack_cnt:",
558 le32_to_cpu(tx->expected_ack_cnt),
559 accum_tx->expected_ack_cnt,
560 delta_tx->expected_ack_cnt,
561 max_tx->expected_ack_cnt);
562 pos += scnprintf(buf + pos, bufsz - pos,
563 fmt_table, "actual_ack_cnt:",
564 le32_to_cpu(tx->actual_ack_cnt),
565 accum_tx->actual_ack_cnt,
566 delta_tx->actual_ack_cnt,
567 max_tx->actual_ack_cnt);
568 pos += scnprintf(buf + pos, bufsz - pos,
569 fmt_table, "dump_msdu_cnt:",
570 le32_to_cpu(tx->dump_msdu_cnt),
571 accum_tx->dump_msdu_cnt,
572 delta_tx->dump_msdu_cnt,
573 max_tx->dump_msdu_cnt);
574 pos += scnprintf(buf + pos, bufsz - pos,
575 fmt_table, "abort_nxt_frame_mismatch:",
576 le32_to_cpu(tx->burst_abort_next_frame_mismatch_cnt),
577 accum_tx->burst_abort_next_frame_mismatch_cnt,
578 delta_tx->burst_abort_next_frame_mismatch_cnt,
579 max_tx->burst_abort_next_frame_mismatch_cnt);
580 pos += scnprintf(buf + pos, bufsz - pos,
581 fmt_table, "abort_missing_nxt_frame:",
582 le32_to_cpu(tx->burst_abort_missing_next_frame_cnt),
583 accum_tx->burst_abort_missing_next_frame_cnt,
584 delta_tx->burst_abort_missing_next_frame_cnt,
585 max_tx->burst_abort_missing_next_frame_cnt);
586 pos += scnprintf(buf + pos, bufsz - pos,
587 fmt_table, "cts_timeout_collision:",
588 le32_to_cpu(tx->cts_timeout_collision),
589 accum_tx->cts_timeout_collision,
590 delta_tx->cts_timeout_collision,
591 max_tx->cts_timeout_collision);
592 pos += scnprintf(buf + pos, bufsz - pos,
593 fmt_table, "ack_ba_timeout_collision:",
594 le32_to_cpu(tx->ack_or_ba_timeout_collision),
595 accum_tx->ack_or_ba_timeout_collision,
596 delta_tx->ack_or_ba_timeout_collision,
597 max_tx->ack_or_ba_timeout_collision);
598 pos += scnprintf(buf + pos, bufsz - pos,
599 fmt_table, "agg ba_timeout:",
600 le32_to_cpu(tx->agg.ba_timeout),
601 accum_tx->agg.ba_timeout,
602 delta_tx->agg.ba_timeout,
603 max_tx->agg.ba_timeout);
604 pos += scnprintf(buf + pos, bufsz - pos,
605 fmt_table, "agg ba_resched_frames:",
606 le32_to_cpu(tx->agg.ba_reschedule_frames),
607 accum_tx->agg.ba_reschedule_frames,
608 delta_tx->agg.ba_reschedule_frames,
609 max_tx->agg.ba_reschedule_frames);
610 pos += scnprintf(buf + pos, bufsz - pos,
611 fmt_table, "agg scd_query_agg_frame:",
612 le32_to_cpu(tx->agg.scd_query_agg_frame_cnt),
613 accum_tx->agg.scd_query_agg_frame_cnt,
614 delta_tx->agg.scd_query_agg_frame_cnt,
615 max_tx->agg.scd_query_agg_frame_cnt);
616 pos += scnprintf(buf + pos, bufsz - pos,
617 fmt_table, "agg scd_query_no_agg:",
618 le32_to_cpu(tx->agg.scd_query_no_agg),
619 accum_tx->agg.scd_query_no_agg,
620 delta_tx->agg.scd_query_no_agg,
621 max_tx->agg.scd_query_no_agg);
622 pos += scnprintf(buf + pos, bufsz - pos,
623 fmt_table, "agg scd_query_agg:",
624 le32_to_cpu(tx->agg.scd_query_agg),
625 accum_tx->agg.scd_query_agg,
626 delta_tx->agg.scd_query_agg,
627 max_tx->agg.scd_query_agg);
628 pos += scnprintf(buf + pos, bufsz - pos,
629 fmt_table, "agg scd_query_mismatch:",
630 le32_to_cpu(tx->agg.scd_query_mismatch),
631 accum_tx->agg.scd_query_mismatch,
632 delta_tx->agg.scd_query_mismatch,
633 max_tx->agg.scd_query_mismatch);
634 pos += scnprintf(buf + pos, bufsz - pos,
635 fmt_table, "agg frame_not_ready:",
636 le32_to_cpu(tx->agg.frame_not_ready),
637 accum_tx->agg.frame_not_ready,
638 delta_tx->agg.frame_not_ready,
639 max_tx->agg.frame_not_ready);
640 pos += scnprintf(buf + pos, bufsz - pos,
641 fmt_table, "agg underrun:",
642 le32_to_cpu(tx->agg.underrun),
643 accum_tx->agg.underrun,
644 delta_tx->agg.underrun, max_tx->agg.underrun);
645 pos += scnprintf(buf + pos, bufsz - pos,
646 fmt_table, "agg bt_prio_kill:",
647 le32_to_cpu(tx->agg.bt_prio_kill),
648 accum_tx->agg.bt_prio_kill,
649 delta_tx->agg.bt_prio_kill,
650 max_tx->agg.bt_prio_kill);
651 pos += scnprintf(buf + pos, bufsz - pos,
652 fmt_table, "agg rx_ba_rsp_cnt:",
653 le32_to_cpu(tx->agg.rx_ba_rsp_cnt),
654 accum_tx->agg.rx_ba_rsp_cnt,
655 delta_tx->agg.rx_ba_rsp_cnt,
656 max_tx->agg.rx_ba_rsp_cnt);
657
658 ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
659 kfree(buf);
660 return ret;
661}
662
663ssize_t
664iwl4965_ucode_general_stats_read(struct file *file, char __user *user_buf,
665 size_t count, loff_t *ppos)
666{
667 struct iwl_priv *priv = file->private_data;
668 int pos = 0;
669 char *buf;
670 int bufsz = sizeof(struct statistics_general) * 10 + 300;
671 ssize_t ret;
672 struct statistics_general_common *general, *accum_general;
673 struct statistics_general_common *delta_general, *max_general;
674 struct statistics_dbg *dbg, *accum_dbg, *delta_dbg, *max_dbg;
675 struct statistics_div *div, *accum_div, *delta_div, *max_div;
676
677 if (!iwl_legacy_is_alive(priv))
678 return -EAGAIN;
679
680 buf = kzalloc(bufsz, GFP_KERNEL);
681 if (!buf) {
682 IWL_ERR(priv, "Can not allocate Buffer\n");
683 return -ENOMEM;
684 }
685
686 /* the statistic information display here is based on
687 * the last statistics notification from uCode
688 * might not reflect the current uCode activity
689 */
690 general = &priv->_4965.statistics.general.common;
691 dbg = &priv->_4965.statistics.general.common.dbg;
692 div = &priv->_4965.statistics.general.common.div;
693 accum_general = &priv->_4965.accum_statistics.general.common;
694 accum_dbg = &priv->_4965.accum_statistics.general.common.dbg;
695 accum_div = &priv->_4965.accum_statistics.general.common.div;
696 delta_general = &priv->_4965.delta_statistics.general.common;
697 max_general = &priv->_4965.max_delta.general.common;
698 delta_dbg = &priv->_4965.delta_statistics.general.common.dbg;
699 max_dbg = &priv->_4965.max_delta.general.common.dbg;
700 delta_div = &priv->_4965.delta_statistics.general.common.div;
701 max_div = &priv->_4965.max_delta.general.common.div;
702
703 pos += iwl4965_statistics_flag(priv, buf, bufsz);
704 pos += scnprintf(buf + pos, bufsz - pos,
705 fmt_header, "Statistics_General:");
706 pos += scnprintf(buf + pos, bufsz - pos,
707 fmt_value, "temperature:",
708 le32_to_cpu(general->temperature));
709 pos += scnprintf(buf + pos, bufsz - pos,
710 fmt_value, "ttl_timestamp:",
711 le32_to_cpu(general->ttl_timestamp));
712 pos += scnprintf(buf + pos, bufsz - pos,
713 fmt_table, "burst_check:",
714 le32_to_cpu(dbg->burst_check),
715 accum_dbg->burst_check,
716 delta_dbg->burst_check, max_dbg->burst_check);
717 pos += scnprintf(buf + pos, bufsz - pos,
718 fmt_table, "burst_count:",
719 le32_to_cpu(dbg->burst_count),
720 accum_dbg->burst_count,
721 delta_dbg->burst_count, max_dbg->burst_count);
722 pos += scnprintf(buf + pos, bufsz - pos,
723 fmt_table, "wait_for_silence_timeout_count:",
724 le32_to_cpu(dbg->wait_for_silence_timeout_cnt),
725 accum_dbg->wait_for_silence_timeout_cnt,
726 delta_dbg->wait_for_silence_timeout_cnt,
727 max_dbg->wait_for_silence_timeout_cnt);
728 pos += scnprintf(buf + pos, bufsz - pos,
729 fmt_table, "sleep_time:",
730 le32_to_cpu(general->sleep_time),
731 accum_general->sleep_time,
732 delta_general->sleep_time, max_general->sleep_time);
733 pos += scnprintf(buf + pos, bufsz - pos,
734 fmt_table, "slots_out:",
735 le32_to_cpu(general->slots_out),
736 accum_general->slots_out,
737 delta_general->slots_out, max_general->slots_out);
738 pos += scnprintf(buf + pos, bufsz - pos,
739 fmt_table, "slots_idle:",
740 le32_to_cpu(general->slots_idle),
741 accum_general->slots_idle,
742 delta_general->slots_idle, max_general->slots_idle);
743 pos += scnprintf(buf + pos, bufsz - pos,
744 fmt_table, "tx_on_a:",
745 le32_to_cpu(div->tx_on_a), accum_div->tx_on_a,
746 delta_div->tx_on_a, max_div->tx_on_a);
747 pos += scnprintf(buf + pos, bufsz - pos,
748 fmt_table, "tx_on_b:",
749 le32_to_cpu(div->tx_on_b), accum_div->tx_on_b,
750 delta_div->tx_on_b, max_div->tx_on_b);
751 pos += scnprintf(buf + pos, bufsz - pos,
752 fmt_table, "exec_time:",
753 le32_to_cpu(div->exec_time), accum_div->exec_time,
754 delta_div->exec_time, max_div->exec_time);
755 pos += scnprintf(buf + pos, bufsz - pos,
756 fmt_table, "probe_time:",
757 le32_to_cpu(div->probe_time), accum_div->probe_time,
758 delta_div->probe_time, max_div->probe_time);
759 pos += scnprintf(buf + pos, bufsz - pos,
760 fmt_table, "rx_enable_counter:",
761 le32_to_cpu(general->rx_enable_counter),
762 accum_general->rx_enable_counter,
763 delta_general->rx_enable_counter,
764 max_general->rx_enable_counter);
765 pos += scnprintf(buf + pos, bufsz - pos,
766 fmt_table, "num_of_sos_states:",
767 le32_to_cpu(general->num_of_sos_states),
768 accum_general->num_of_sos_states,
769 delta_general->num_of_sos_states,
770 max_general->num_of_sos_states);
771 ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
772 kfree(buf);
773 return ret;
774}
diff --git a/drivers/net/wireless/iwlegacy/iwl-4965-debugfs.h b/drivers/net/wireless/iwlegacy/iwl-4965-debugfs.h
new file mode 100644
index 00000000000..6c8e35361a9
--- /dev/null
+++ b/drivers/net/wireless/iwlegacy/iwl-4965-debugfs.h
@@ -0,0 +1,59 @@
1/******************************************************************************
2 *
3 * GPL LICENSE SUMMARY
4 *
5 * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of version 2 of the GNU General Public License as
9 * published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful, but
12 * WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
19 * USA
20 *
21 * The full GNU General Public License is included in this distribution
22 * in the file called LICENSE.GPL.
23 *
24 * Contact Information:
25 * Intel Linux Wireless <ilw@linux.intel.com>
26 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
27 *****************************************************************************/
28
29#include "iwl-dev.h"
30#include "iwl-core.h"
31#include "iwl-debug.h"
32
33#ifdef CONFIG_IWLWIFI_LEGACY_DEBUGFS
34ssize_t iwl4965_ucode_rx_stats_read(struct file *file, char __user *user_buf,
35 size_t count, loff_t *ppos);
36ssize_t iwl4965_ucode_tx_stats_read(struct file *file, char __user *user_buf,
37 size_t count, loff_t *ppos);
38ssize_t iwl4965_ucode_general_stats_read(struct file *file,
39 char __user *user_buf, size_t count, loff_t *ppos);
40#else
41static ssize_t
42iwl4965_ucode_rx_stats_read(struct file *file, char __user *user_buf,
43 size_t count, loff_t *ppos)
44{
45 return 0;
46}
47static ssize_t
48iwl4965_ucode_tx_stats_read(struct file *file, char __user *user_buf,
49 size_t count, loff_t *ppos)
50{
51 return 0;
52}
53static ssize_t
54iwl4965_ucode_general_stats_read(struct file *file, char __user *user_buf,
55 size_t count, loff_t *ppos)
56{
57 return 0;
58}
59#endif
diff --git a/drivers/net/wireless/iwlegacy/iwl-4965-eeprom.c b/drivers/net/wireless/iwlegacy/iwl-4965-eeprom.c
new file mode 100644
index 00000000000..cb9baab1ff7
--- /dev/null
+++ b/drivers/net/wireless/iwlegacy/iwl-4965-eeprom.c
@@ -0,0 +1,154 @@
1/******************************************************************************
2 *
3 * This file is provided under a dual BSD/GPLv2 license. When using or
4 * redistributing this file, you may do so under either license.
5 *
6 * GPL LICENSE SUMMARY
7 *
8 * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of version 2 of the GNU General Public License as
12 * published by the Free Software Foundation.
13 *
14 * This program is distributed in the hope that it will be useful, but
15 * WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
22 * USA
23 *
24 * The full GNU General Public License is included in this distribution
25 * in the file called LICENSE.GPL.
26 *
27 * Contact Information:
28 * Intel Linux Wireless <ilw@linux.intel.com>
29 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
30 *
31 * BSD LICENSE
32 *
33 * Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved.
34 * All rights reserved.
35 *
36 * Redistribution and use in source and binary forms, with or without
37 * modification, are permitted provided that the following conditions
38 * are met:
39 *
40 * * Redistributions of source code must retain the above copyright
41 * notice, this list of conditions and the following disclaimer.
42 * * Redistributions in binary form must reproduce the above copyright
43 * notice, this list of conditions and the following disclaimer in
44 * the documentation and/or other materials provided with the
45 * distribution.
46 * * Neither the name Intel Corporation nor the names of its
47 * contributors may be used to endorse or promote products derived
48 * from this software without specific prior written permission.
49 *
50 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
51 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
52 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
53 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
54 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
55 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
56 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
57 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
58 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
59 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
60 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
61 *****************************************************************************/
62
63
64#include <linux/kernel.h>
65#include <linux/module.h>
66#include <linux/slab.h>
67#include <linux/init.h>
68
69#include <net/mac80211.h>
70
71#include "iwl-commands.h"
72#include "iwl-dev.h"
73#include "iwl-core.h"
74#include "iwl-debug.h"
75#include "iwl-4965.h"
76#include "iwl-io.h"
77
78/******************************************************************************
79 *
80 * EEPROM related functions
81 *
82******************************************************************************/
83
84/*
85 * The device's EEPROM semaphore prevents conflicts between driver and uCode
86 * when accessing the EEPROM; each access is a series of pulses to/from the
87 * EEPROM chip, not a single event, so even reads could conflict if they
88 * weren't arbitrated by the semaphore.
89 */
90int iwl4965_eeprom_acquire_semaphore(struct iwl_priv *priv)
91{
92 u16 count;
93 int ret;
94
95 for (count = 0; count < EEPROM_SEM_RETRY_LIMIT; count++) {
96 /* Request semaphore */
97 iwl_legacy_set_bit(priv, CSR_HW_IF_CONFIG_REG,
98 CSR_HW_IF_CONFIG_REG_BIT_EEPROM_OWN_SEM);
99
100 /* See if we got it */
101 ret = iwl_poll_bit(priv, CSR_HW_IF_CONFIG_REG,
102 CSR_HW_IF_CONFIG_REG_BIT_EEPROM_OWN_SEM,
103 CSR_HW_IF_CONFIG_REG_BIT_EEPROM_OWN_SEM,
104 EEPROM_SEM_TIMEOUT);
105 if (ret >= 0) {
106 IWL_DEBUG_IO(priv,
107 "Acquired semaphore after %d tries.\n",
108 count+1);
109 return ret;
110 }
111 }
112
113 return ret;
114}
115
116void iwl4965_eeprom_release_semaphore(struct iwl_priv *priv)
117{
118 iwl_legacy_clear_bit(priv, CSR_HW_IF_CONFIG_REG,
119 CSR_HW_IF_CONFIG_REG_BIT_EEPROM_OWN_SEM);
120
121}
122
123int iwl4965_eeprom_check_version(struct iwl_priv *priv)
124{
125 u16 eeprom_ver;
126 u16 calib_ver;
127
128 eeprom_ver = iwl_legacy_eeprom_query16(priv, EEPROM_VERSION);
129 calib_ver = iwl_legacy_eeprom_query16(priv,
130 EEPROM_4965_CALIB_VERSION_OFFSET);
131
132 if (eeprom_ver < priv->cfg->eeprom_ver ||
133 calib_ver < priv->cfg->eeprom_calib_ver)
134 goto err;
135
136 IWL_INFO(priv, "device EEPROM VER=0x%x, CALIB=0x%x\n",
137 eeprom_ver, calib_ver);
138
139 return 0;
140err:
141 IWL_ERR(priv, "Unsupported (too old) EEPROM VER=0x%x < 0x%x "
142 "CALIB=0x%x < 0x%x\n",
143 eeprom_ver, priv->cfg->eeprom_ver,
144 calib_ver, priv->cfg->eeprom_calib_ver);
145 return -EINVAL;
146
147}
148
149void iwl4965_eeprom_get_mac(const struct iwl_priv *priv, u8 *mac)
150{
151 const u8 *addr = iwl_legacy_eeprom_query_addr(priv,
152 EEPROM_MAC_ADDRESS);
153 memcpy(mac, addr, ETH_ALEN);
154}
diff --git a/drivers/net/wireless/iwlegacy/iwl-4965-hw.h b/drivers/net/wireless/iwlegacy/iwl-4965-hw.h
new file mode 100644
index 00000000000..fc6fa2886d9
--- /dev/null
+++ b/drivers/net/wireless/iwlegacy/iwl-4965-hw.h
@@ -0,0 +1,811 @@
1/******************************************************************************
2 *
3 * This file is provided under a dual BSD/GPLv2 license. When using or
4 * redistributing this file, you may do so under either license.
5 *
6 * GPL LICENSE SUMMARY
7 *
8 * Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved.
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of version 2 of the GNU General Public License as
12 * published by the Free Software Foundation.
13 *
14 * This program is distributed in the hope that it will be useful, but
15 * WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
22 * USA
23 *
24 * The full GNU General Public License is included in this distribution
25 * in the file called LICENSE.GPL.
26 *
27 * Contact Information:
28 * Intel Linux Wireless <ilw@linux.intel.com>
29 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
30 *
31 * BSD LICENSE
32 *
33 * Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved.
34 * All rights reserved.
35 *
36 * Redistribution and use in source and binary forms, with or without
37 * modification, are permitted provided that the following conditions
38 * are met:
39 *
40 * * Redistributions of source code must retain the above copyright
41 * notice, this list of conditions and the following disclaimer.
42 * * Redistributions in binary form must reproduce the above copyright
43 * notice, this list of conditions and the following disclaimer in
44 * the documentation and/or other materials provided with the
45 * distribution.
46 * * Neither the name Intel Corporation nor the names of its
47 * contributors may be used to endorse or promote products derived
48 * from this software without specific prior written permission.
49 *
50 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
51 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
52 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
53 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
54 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
55 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
56 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
57 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
58 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
59 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
60 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
61 *
62 *****************************************************************************/
63/*
64 * Please use this file (iwl-4965-hw.h) only for hardware-related definitions.
65 * Use iwl-commands.h for uCode API definitions.
66 * Use iwl-dev.h for driver implementation definitions.
67 */
68
69#ifndef __iwl_4965_hw_h__
70#define __iwl_4965_hw_h__
71
72#include "iwl-fh.h"
73
74/* EEPROM */
75#define IWL4965_EEPROM_IMG_SIZE 1024
76
77/*
78 * uCode queue management definitions ...
79 * The first queue used for block-ack aggregation is #7 (4965 only).
80 * All block-ack aggregation queues should map to Tx DMA/FIFO channel 7.
81 */
82#define IWL49_FIRST_AMPDU_QUEUE 7
83
84/* Sizes and addresses for instruction and data memory (SRAM) in
85 * 4965's embedded processor. Driver access is via HBUS_TARG_MEM_* regs. */
86#define IWL49_RTC_INST_LOWER_BOUND (0x000000)
87#define IWL49_RTC_INST_UPPER_BOUND (0x018000)
88
89#define IWL49_RTC_DATA_LOWER_BOUND (0x800000)
90#define IWL49_RTC_DATA_UPPER_BOUND (0x80A000)
91
92#define IWL49_RTC_INST_SIZE (IWL49_RTC_INST_UPPER_BOUND - \
93 IWL49_RTC_INST_LOWER_BOUND)
94#define IWL49_RTC_DATA_SIZE (IWL49_RTC_DATA_UPPER_BOUND - \
95 IWL49_RTC_DATA_LOWER_BOUND)
96
97#define IWL49_MAX_INST_SIZE IWL49_RTC_INST_SIZE
98#define IWL49_MAX_DATA_SIZE IWL49_RTC_DATA_SIZE
99
100/* Size of uCode instruction memory in bootstrap state machine */
101#define IWL49_MAX_BSM_SIZE BSM_SRAM_SIZE
102
103static inline int iwl4965_hw_valid_rtc_data_addr(u32 addr)
104{
105 return (addr >= IWL49_RTC_DATA_LOWER_BOUND) &&
106 (addr < IWL49_RTC_DATA_UPPER_BOUND);
107}
108
109/********************* START TEMPERATURE *************************************/
110
111/**
112 * 4965 temperature calculation.
113 *
114 * The driver must calculate the device temperature before calculating
115 * a txpower setting (amplifier gain is temperature dependent). The
116 * calculation uses 4 measurements, 3 of which (R1, R2, R3) are calibration
117 * values used for the life of the driver, and one of which (R4) is the
118 * real-time temperature indicator.
119 *
120 * uCode provides all 4 values to the driver via the "initialize alive"
121 * notification (see struct iwl4965_init_alive_resp). After the runtime uCode
122 * image loads, uCode updates the R4 value via statistics notifications
123 * (see STATISTICS_NOTIFICATION), which occur after each received beacon
124 * when associated, or can be requested via REPLY_STATISTICS_CMD.
125 *
126 * NOTE: uCode provides the R4 value as a 23-bit signed value. Driver
127 * must sign-extend to 32 bits before applying formula below.
128 *
129 * Formula:
130 *
131 * degrees Kelvin = ((97 * 259 * (R4 - R2) / (R3 - R1)) / 100) + 8
132 *
133 * NOTE: The basic formula is 259 * (R4-R2) / (R3-R1). The 97/100 is
134 * an additional correction, which should be centered around 0 degrees
135 * Celsius (273 degrees Kelvin). The 8 (3 percent of 273) compensates for
136 * centering the 97/100 correction around 0 degrees K.
137 *
138 * Add 273 to Kelvin value to find degrees Celsius, for comparing current
139 * temperature with factory-measured temperatures when calculating txpower
140 * settings.
141 */
142#define TEMPERATURE_CALIB_KELVIN_OFFSET 8
143#define TEMPERATURE_CALIB_A_VAL 259
144
145/* Limit range of calculated temperature to be between these Kelvin values */
146#define IWL_TX_POWER_TEMPERATURE_MIN (263)
147#define IWL_TX_POWER_TEMPERATURE_MAX (410)
148
149#define IWL_TX_POWER_TEMPERATURE_OUT_OF_RANGE(t) \
150 (((t) < IWL_TX_POWER_TEMPERATURE_MIN) || \
151 ((t) > IWL_TX_POWER_TEMPERATURE_MAX))
152
153/********************* END TEMPERATURE ***************************************/
154
155/********************* START TXPOWER *****************************************/
156
157/**
158 * 4965 txpower calculations rely on information from three sources:
159 *
160 * 1) EEPROM
161 * 2) "initialize" alive notification
162 * 3) statistics notifications
163 *
164 * EEPROM data consists of:
165 *
166 * 1) Regulatory information (max txpower and channel usage flags) is provided
167 * separately for each channel that can possibly supported by 4965.
168 * 40 MHz wide (.11n HT40) channels are listed separately from 20 MHz
169 * (legacy) channels.
170 *
171 * See struct iwl4965_eeprom_channel for format, and struct iwl4965_eeprom
172 * for locations in EEPROM.
173 *
174 * 2) Factory txpower calibration information is provided separately for
175 * sub-bands of contiguous channels. 2.4GHz has just one sub-band,
176 * but 5 GHz has several sub-bands.
177 *
178 * In addition, per-band (2.4 and 5 Ghz) saturation txpowers are provided.
179 *
180 * See struct iwl4965_eeprom_calib_info (and the tree of structures
181 * contained within it) for format, and struct iwl4965_eeprom for
182 * locations in EEPROM.
183 *
184 * "Initialization alive" notification (see struct iwl4965_init_alive_resp)
185 * consists of:
186 *
187 * 1) Temperature calculation parameters.
188 *
189 * 2) Power supply voltage measurement.
190 *
191 * 3) Tx gain compensation to balance 2 transmitters for MIMO use.
192 *
193 * Statistics notifications deliver:
194 *
195 * 1) Current values for temperature param R4.
196 */
197
198/**
199 * To calculate a txpower setting for a given desired target txpower, channel,
200 * modulation bit rate, and transmitter chain (4965 has 2 transmitters to
201 * support MIMO and transmit diversity), driver must do the following:
202 *
203 * 1) Compare desired txpower vs. (EEPROM) regulatory limit for this channel.
204 * Do not exceed regulatory limit; reduce target txpower if necessary.
205 *
206 * If setting up txpowers for MIMO rates (rate indexes 8-15, 24-31),
207 * 2 transmitters will be used simultaneously; driver must reduce the
208 * regulatory limit by 3 dB (half-power) for each transmitter, so the
209 * combined total output of the 2 transmitters is within regulatory limits.
210 *
211 *
212 * 2) Compare target txpower vs. (EEPROM) saturation txpower *reduced by
213 * backoff for this bit rate*. Do not exceed (saturation - backoff[rate]);
214 * reduce target txpower if necessary.
215 *
216 * Backoff values below are in 1/2 dB units (equivalent to steps in
217 * txpower gain tables):
218 *
219 * OFDM 6 - 36 MBit: 10 steps (5 dB)
220 * OFDM 48 MBit: 15 steps (7.5 dB)
221 * OFDM 54 MBit: 17 steps (8.5 dB)
222 * OFDM 60 MBit: 20 steps (10 dB)
223 * CCK all rates: 10 steps (5 dB)
224 *
225 * Backoff values apply to saturation txpower on a per-transmitter basis;
226 * when using MIMO (2 transmitters), each transmitter uses the same
227 * saturation level provided in EEPROM, and the same backoff values;
228 * no reduction (such as with regulatory txpower limits) is required.
229 *
230 * Saturation and Backoff values apply equally to 20 Mhz (legacy) channel
231 * widths and 40 Mhz (.11n HT40) channel widths; there is no separate
232 * factory measurement for ht40 channels.
233 *
234 * The result of this step is the final target txpower. The rest of
235 * the steps figure out the proper settings for the device to achieve
236 * that target txpower.
237 *
238 *
239 * 3) Determine (EEPROM) calibration sub band for the target channel, by
240 * comparing against first and last channels in each sub band
241 * (see struct iwl4965_eeprom_calib_subband_info).
242 *
243 *
244 * 4) Linearly interpolate (EEPROM) factory calibration measurement sets,
245 * referencing the 2 factory-measured (sample) channels within the sub band.
246 *
247 * Interpolation is based on difference between target channel's frequency
248 * and the sample channels' frequencies. Since channel numbers are based
249 * on frequency (5 MHz between each channel number), this is equivalent
250 * to interpolating based on channel number differences.
251 *
252 * Note that the sample channels may or may not be the channels at the
253 * edges of the sub band. The target channel may be "outside" of the
254 * span of the sampled channels.
255 *
256 * Driver may choose the pair (for 2 Tx chains) of measurements (see
257 * struct iwl4965_eeprom_calib_ch_info) for which the actual measured
258 * txpower comes closest to the desired txpower. Usually, though,
259 * the middle set of measurements is closest to the regulatory limits,
260 * and is therefore a good choice for all txpower calculations (this
261 * assumes that high accuracy is needed for maximizing legal txpower,
262 * while lower txpower configurations do not need as much accuracy).
263 *
264 * Driver should interpolate both members of the chosen measurement pair,
265 * i.e. for both Tx chains (radio transmitters), unless the driver knows
266 * that only one of the chains will be used (e.g. only one tx antenna
267 * connected, but this should be unusual). The rate scaling algorithm
268 * switches antennas to find best performance, so both Tx chains will
269 * be used (although only one at a time) even for non-MIMO transmissions.
270 *
271 * Driver should interpolate factory values for temperature, gain table
272 * index, and actual power. The power amplifier detector values are
273 * not used by the driver.
274 *
275 * Sanity check: If the target channel happens to be one of the sample
276 * channels, the results should agree with the sample channel's
277 * measurements!
278 *
279 *
280 * 5) Find difference between desired txpower and (interpolated)
281 * factory-measured txpower. Using (interpolated) factory gain table index
282 * (shown elsewhere) as a starting point, adjust this index lower to
283 * increase txpower, or higher to decrease txpower, until the target
284 * txpower is reached. Each step in the gain table is 1/2 dB.
285 *
286 * For example, if factory measured txpower is 16 dBm, and target txpower
287 * is 13 dBm, add 6 steps to the factory gain index to reduce txpower
288 * by 3 dB.
289 *
290 *
291 * 6) Find difference between current device temperature and (interpolated)
292 * factory-measured temperature for sub-band. Factory values are in
293 * degrees Celsius. To calculate current temperature, see comments for
294 * "4965 temperature calculation".
295 *
296 * If current temperature is higher than factory temperature, driver must
297 * increase gain (lower gain table index), and vice verse.
298 *
299 * Temperature affects gain differently for different channels:
300 *
301 * 2.4 GHz all channels: 3.5 degrees per half-dB step
302 * 5 GHz channels 34-43: 4.5 degrees per half-dB step
303 * 5 GHz channels >= 44: 4.0 degrees per half-dB step
304 *
305 * NOTE: Temperature can increase rapidly when transmitting, especially
306 * with heavy traffic at high txpowers. Driver should update
307 * temperature calculations often under these conditions to
308 * maintain strong txpower in the face of rising temperature.
309 *
310 *
311 * 7) Find difference between current power supply voltage indicator
312 * (from "initialize alive") and factory-measured power supply voltage
313 * indicator (EEPROM).
314 *
315 * If the current voltage is higher (indicator is lower) than factory
316 * voltage, gain should be reduced (gain table index increased) by:
317 *
318 * (eeprom - current) / 7
319 *
320 * If the current voltage is lower (indicator is higher) than factory
321 * voltage, gain should be increased (gain table index decreased) by:
322 *
323 * 2 * (current - eeprom) / 7
324 *
325 * If number of index steps in either direction turns out to be > 2,
326 * something is wrong ... just use 0.
327 *
328 * NOTE: Voltage compensation is independent of band/channel.
329 *
330 * NOTE: "Initialize" uCode measures current voltage, which is assumed
331 * to be constant after this initial measurement. Voltage
332 * compensation for txpower (number of steps in gain table)
333 * may be calculated once and used until the next uCode bootload.
334 *
335 *
336 * 8) If setting up txpowers for MIMO rates (rate indexes 8-15, 24-31),
337 * adjust txpower for each transmitter chain, so txpower is balanced
338 * between the two chains. There are 5 pairs of tx_atten[group][chain]
339 * values in "initialize alive", one pair for each of 5 channel ranges:
340 *
341 * Group 0: 5 GHz channel 34-43
342 * Group 1: 5 GHz channel 44-70
343 * Group 2: 5 GHz channel 71-124
344 * Group 3: 5 GHz channel 125-200
345 * Group 4: 2.4 GHz all channels
346 *
347 * Add the tx_atten[group][chain] value to the index for the target chain.
348 * The values are signed, but are in pairs of 0 and a non-negative number,
349 * so as to reduce gain (if necessary) of the "hotter" channel. This
350 * avoids any need to double-check for regulatory compliance after
351 * this step.
352 *
353 *
354 * 9) If setting up for a CCK rate, lower the gain by adding a CCK compensation
355 * value to the index:
356 *
357 * Hardware rev B: 9 steps (4.5 dB)
358 * Hardware rev C: 5 steps (2.5 dB)
359 *
360 * Hardware rev for 4965 can be determined by reading CSR_HW_REV_WA_REG,
361 * bits [3:2], 1 = B, 2 = C.
362 *
363 * NOTE: This compensation is in addition to any saturation backoff that
364 * might have been applied in an earlier step.
365 *
366 *
367 * 10) Select the gain table, based on band (2.4 vs 5 GHz).
368 *
369 * Limit the adjusted index to stay within the table!
370 *
371 *
372 * 11) Read gain table entries for DSP and radio gain, place into appropriate
373 * location(s) in command (struct iwl4965_txpowertable_cmd).
374 */
375
376/**
377 * When MIMO is used (2 transmitters operating simultaneously), driver should
378 * limit each transmitter to deliver a max of 3 dB below the regulatory limit
379 * for the device. That is, use half power for each transmitter, so total
380 * txpower is within regulatory limits.
381 *
382 * The value "6" represents number of steps in gain table to reduce power 3 dB.
383 * Each step is 1/2 dB.
384 */
385#define IWL_TX_POWER_MIMO_REGULATORY_COMPENSATION (6)
386
387/**
388 * CCK gain compensation.
389 *
390 * When calculating txpowers for CCK, after making sure that the target power
391 * is within regulatory and saturation limits, driver must additionally
392 * back off gain by adding these values to the gain table index.
393 *
394 * Hardware rev for 4965 can be determined by reading CSR_HW_REV_WA_REG,
395 * bits [3:2], 1 = B, 2 = C.
396 */
397#define IWL_TX_POWER_CCK_COMPENSATION_B_STEP (9)
398#define IWL_TX_POWER_CCK_COMPENSATION_C_STEP (5)
399
400/*
401 * 4965 power supply voltage compensation for txpower
402 */
403#define TX_POWER_IWL_VOLTAGE_CODES_PER_03V (7)
404
405/**
406 * Gain tables.
407 *
408 * The following tables contain pair of values for setting txpower, i.e.
409 * gain settings for the output of the device's digital signal processor (DSP),
410 * and for the analog gain structure of the transmitter.
411 *
412 * Each entry in the gain tables represents a step of 1/2 dB. Note that these
413 * are *relative* steps, not indications of absolute output power. Output
414 * power varies with temperature, voltage, and channel frequency, and also
415 * requires consideration of average power (to satisfy regulatory constraints),
416 * and peak power (to avoid distortion of the output signal).
417 *
418 * Each entry contains two values:
419 * 1) DSP gain (or sometimes called DSP attenuation). This is a fine-grained
420 * linear value that multiplies the output of the digital signal processor,
421 * before being sent to the analog radio.
422 * 2) Radio gain. This sets the analog gain of the radio Tx path.
423 * It is a coarser setting, and behaves in a logarithmic (dB) fashion.
424 *
425 * EEPROM contains factory calibration data for txpower. This maps actual
426 * measured txpower levels to gain settings in the "well known" tables
427 * below ("well-known" means here that both factory calibration *and* the
428 * driver work with the same table).
429 *
430 * There are separate tables for 2.4 GHz and 5 GHz bands. The 5 GHz table
431 * has an extension (into negative indexes), in case the driver needs to
432 * boost power setting for high device temperatures (higher than would be
433 * present during factory calibration). A 5 Ghz EEPROM index of "40"
434 * corresponds to the 49th entry in the table used by the driver.
435 */
436#define MIN_TX_GAIN_INDEX (0) /* highest gain, lowest idx, 2.4 */
437#define MIN_TX_GAIN_INDEX_52GHZ_EXT (-9) /* highest gain, lowest idx, 5 */
438
439/**
440 * 2.4 GHz gain table
441 *
442 * Index Dsp gain Radio gain
443 * 0 110 0x3f (highest gain)
444 * 1 104 0x3f
445 * 2 98 0x3f
446 * 3 110 0x3e
447 * 4 104 0x3e
448 * 5 98 0x3e
449 * 6 110 0x3d
450 * 7 104 0x3d
451 * 8 98 0x3d
452 * 9 110 0x3c
453 * 10 104 0x3c
454 * 11 98 0x3c
455 * 12 110 0x3b
456 * 13 104 0x3b
457 * 14 98 0x3b
458 * 15 110 0x3a
459 * 16 104 0x3a
460 * 17 98 0x3a
461 * 18 110 0x39
462 * 19 104 0x39
463 * 20 98 0x39
464 * 21 110 0x38
465 * 22 104 0x38
466 * 23 98 0x38
467 * 24 110 0x37
468 * 25 104 0x37
469 * 26 98 0x37
470 * 27 110 0x36
471 * 28 104 0x36
472 * 29 98 0x36
473 * 30 110 0x35
474 * 31 104 0x35
475 * 32 98 0x35
476 * 33 110 0x34
477 * 34 104 0x34
478 * 35 98 0x34
479 * 36 110 0x33
480 * 37 104 0x33
481 * 38 98 0x33
482 * 39 110 0x32
483 * 40 104 0x32
484 * 41 98 0x32
485 * 42 110 0x31
486 * 43 104 0x31
487 * 44 98 0x31
488 * 45 110 0x30
489 * 46 104 0x30
490 * 47 98 0x30
491 * 48 110 0x6
492 * 49 104 0x6
493 * 50 98 0x6
494 * 51 110 0x5
495 * 52 104 0x5
496 * 53 98 0x5
497 * 54 110 0x4
498 * 55 104 0x4
499 * 56 98 0x4
500 * 57 110 0x3
501 * 58 104 0x3
502 * 59 98 0x3
503 * 60 110 0x2
504 * 61 104 0x2
505 * 62 98 0x2
506 * 63 110 0x1
507 * 64 104 0x1
508 * 65 98 0x1
509 * 66 110 0x0
510 * 67 104 0x0
511 * 68 98 0x0
512 * 69 97 0
513 * 70 96 0
514 * 71 95 0
515 * 72 94 0
516 * 73 93 0
517 * 74 92 0
518 * 75 91 0
519 * 76 90 0
520 * 77 89 0
521 * 78 88 0
522 * 79 87 0
523 * 80 86 0
524 * 81 85 0
525 * 82 84 0
526 * 83 83 0
527 * 84 82 0
528 * 85 81 0
529 * 86 80 0
530 * 87 79 0
531 * 88 78 0
532 * 89 77 0
533 * 90 76 0
534 * 91 75 0
535 * 92 74 0
536 * 93 73 0
537 * 94 72 0
538 * 95 71 0
539 * 96 70 0
540 * 97 69 0
541 * 98 68 0
542 */
543
544/**
545 * 5 GHz gain table
546 *
547 * Index Dsp gain Radio gain
548 * -9 123 0x3F (highest gain)
549 * -8 117 0x3F
550 * -7 110 0x3F
551 * -6 104 0x3F
552 * -5 98 0x3F
553 * -4 110 0x3E
554 * -3 104 0x3E
555 * -2 98 0x3E
556 * -1 110 0x3D
557 * 0 104 0x3D
558 * 1 98 0x3D
559 * 2 110 0x3C
560 * 3 104 0x3C
561 * 4 98 0x3C
562 * 5 110 0x3B
563 * 6 104 0x3B
564 * 7 98 0x3B
565 * 8 110 0x3A
566 * 9 104 0x3A
567 * 10 98 0x3A
568 * 11 110 0x39
569 * 12 104 0x39
570 * 13 98 0x39
571 * 14 110 0x38
572 * 15 104 0x38
573 * 16 98 0x38
574 * 17 110 0x37
575 * 18 104 0x37
576 * 19 98 0x37
577 * 20 110 0x36
578 * 21 104 0x36
579 * 22 98 0x36
580 * 23 110 0x35
581 * 24 104 0x35
582 * 25 98 0x35
583 * 26 110 0x34
584 * 27 104 0x34
585 * 28 98 0x34
586 * 29 110 0x33
587 * 30 104 0x33
588 * 31 98 0x33
589 * 32 110 0x32
590 * 33 104 0x32
591 * 34 98 0x32
592 * 35 110 0x31
593 * 36 104 0x31
594 * 37 98 0x31
595 * 38 110 0x30
596 * 39 104 0x30
597 * 40 98 0x30
598 * 41 110 0x25
599 * 42 104 0x25
600 * 43 98 0x25
601 * 44 110 0x24
602 * 45 104 0x24
603 * 46 98 0x24
604 * 47 110 0x23
605 * 48 104 0x23
606 * 49 98 0x23
607 * 50 110 0x22
608 * 51 104 0x18
609 * 52 98 0x18
610 * 53 110 0x17
611 * 54 104 0x17
612 * 55 98 0x17
613 * 56 110 0x16
614 * 57 104 0x16
615 * 58 98 0x16
616 * 59 110 0x15
617 * 60 104 0x15
618 * 61 98 0x15
619 * 62 110 0x14
620 * 63 104 0x14
621 * 64 98 0x14
622 * 65 110 0x13
623 * 66 104 0x13
624 * 67 98 0x13
625 * 68 110 0x12
626 * 69 104 0x08
627 * 70 98 0x08
628 * 71 110 0x07
629 * 72 104 0x07
630 * 73 98 0x07
631 * 74 110 0x06
632 * 75 104 0x06
633 * 76 98 0x06
634 * 77 110 0x05
635 * 78 104 0x05
636 * 79 98 0x05
637 * 80 110 0x04
638 * 81 104 0x04
639 * 82 98 0x04
640 * 83 110 0x03
641 * 84 104 0x03
642 * 85 98 0x03
643 * 86 110 0x02
644 * 87 104 0x02
645 * 88 98 0x02
646 * 89 110 0x01
647 * 90 104 0x01
648 * 91 98 0x01
649 * 92 110 0x00
650 * 93 104 0x00
651 * 94 98 0x00
652 * 95 93 0x00
653 * 96 88 0x00
654 * 97 83 0x00
655 * 98 78 0x00
656 */
657
658
659/**
660 * Sanity checks and default values for EEPROM regulatory levels.
661 * If EEPROM values fall outside MIN/MAX range, use default values.
662 *
663 * Regulatory limits refer to the maximum average txpower allowed by
664 * regulatory agencies in the geographies in which the device is meant
665 * to be operated. These limits are SKU-specific (i.e. geography-specific),
666 * and channel-specific; each channel has an individual regulatory limit
667 * listed in the EEPROM.
668 *
669 * Units are in half-dBm (i.e. "34" means 17 dBm).
670 */
671#define IWL_TX_POWER_DEFAULT_REGULATORY_24 (34)
672#define IWL_TX_POWER_DEFAULT_REGULATORY_52 (34)
673#define IWL_TX_POWER_REGULATORY_MIN (0)
674#define IWL_TX_POWER_REGULATORY_MAX (34)
675
676/**
677 * Sanity checks and default values for EEPROM saturation levels.
678 * If EEPROM values fall outside MIN/MAX range, use default values.
679 *
680 * Saturation is the highest level that the output power amplifier can produce
681 * without significant clipping distortion. This is a "peak" power level.
682 * Different types of modulation (i.e. various "rates", and OFDM vs. CCK)
683 * require differing amounts of backoff, relative to their average power output,
684 * in order to avoid clipping distortion.
685 *
686 * Driver must make sure that it is violating neither the saturation limit,
687 * nor the regulatory limit, when calculating Tx power settings for various
688 * rates.
689 *
690 * Units are in half-dBm (i.e. "38" means 19 dBm).
691 */
692#define IWL_TX_POWER_DEFAULT_SATURATION_24 (38)
693#define IWL_TX_POWER_DEFAULT_SATURATION_52 (38)
694#define IWL_TX_POWER_SATURATION_MIN (20)
695#define IWL_TX_POWER_SATURATION_MAX (50)
696
697/**
698 * Channel groups used for Tx Attenuation calibration (MIMO tx channel balance)
699 * and thermal Txpower calibration.
700 *
701 * When calculating txpower, driver must compensate for current device
702 * temperature; higher temperature requires higher gain. Driver must calculate
703 * current temperature (see "4965 temperature calculation"), then compare vs.
704 * factory calibration temperature in EEPROM; if current temperature is higher
705 * than factory temperature, driver must *increase* gain by proportions shown
706 * in table below. If current temperature is lower than factory, driver must
707 * *decrease* gain.
708 *
709 * Different frequency ranges require different compensation, as shown below.
710 */
711/* Group 0, 5.2 GHz ch 34-43: 4.5 degrees per 1/2 dB. */
712#define CALIB_IWL_TX_ATTEN_GR1_FCH 34
713#define CALIB_IWL_TX_ATTEN_GR1_LCH 43
714
715/* Group 1, 5.3 GHz ch 44-70: 4.0 degrees per 1/2 dB. */
716#define CALIB_IWL_TX_ATTEN_GR2_FCH 44
717#define CALIB_IWL_TX_ATTEN_GR2_LCH 70
718
719/* Group 2, 5.5 GHz ch 71-124: 4.0 degrees per 1/2 dB. */
720#define CALIB_IWL_TX_ATTEN_GR3_FCH 71
721#define CALIB_IWL_TX_ATTEN_GR3_LCH 124
722
723/* Group 3, 5.7 GHz ch 125-200: 4.0 degrees per 1/2 dB. */
724#define CALIB_IWL_TX_ATTEN_GR4_FCH 125
725#define CALIB_IWL_TX_ATTEN_GR4_LCH 200
726
727/* Group 4, 2.4 GHz all channels: 3.5 degrees per 1/2 dB. */
728#define CALIB_IWL_TX_ATTEN_GR5_FCH 1
729#define CALIB_IWL_TX_ATTEN_GR5_LCH 20
730
731enum {
732 CALIB_CH_GROUP_1 = 0,
733 CALIB_CH_GROUP_2 = 1,
734 CALIB_CH_GROUP_3 = 2,
735 CALIB_CH_GROUP_4 = 3,
736 CALIB_CH_GROUP_5 = 4,
737 CALIB_CH_GROUP_MAX
738};
739
740/********************* END TXPOWER *****************************************/
741
742
743/**
744 * Tx/Rx Queues
745 *
746 * Most communication between driver and 4965 is via queues of data buffers.
747 * For example, all commands that the driver issues to device's embedded
748 * controller (uCode) are via the command queue (one of the Tx queues). All
749 * uCode command responses/replies/notifications, including Rx frames, are
750 * conveyed from uCode to driver via the Rx queue.
751 *
752 * Most support for these queues, including handshake support, resides in
753 * structures in host DRAM, shared between the driver and the device. When
754 * allocating this memory, the driver must make sure that data written by
755 * the host CPU updates DRAM immediately (and does not get "stuck" in CPU's
756 * cache memory), so DRAM and cache are consistent, and the device can
757 * immediately see changes made by the driver.
758 *
759 * 4965 supports up to 16 DRAM-based Tx queues, and services these queues via
760 * up to 7 DMA channels (FIFOs). Each Tx queue is supported by a circular array
761 * in DRAM containing 256 Transmit Frame Descriptors (TFDs).
762 */
763#define IWL49_NUM_FIFOS 7
764#define IWL49_CMD_FIFO_NUM 4
765#define IWL49_NUM_QUEUES 16
766#define IWL49_NUM_AMPDU_QUEUES 8
767
768
769/**
770 * struct iwl4965_schedq_bc_tbl
771 *
772 * Byte Count table
773 *
774 * Each Tx queue uses a byte-count table containing 320 entries:
775 * one 16-bit entry for each of 256 TFDs, plus an additional 64 entries that
776 * duplicate the first 64 entries (to avoid wrap-around within a Tx window;
777 * max Tx window is 64 TFDs).
778 *
779 * When driver sets up a new TFD, it must also enter the total byte count
780 * of the frame to be transmitted into the corresponding entry in the byte
781 * count table for the chosen Tx queue. If the TFD index is 0-63, the driver
782 * must duplicate the byte count entry in corresponding index 256-319.
783 *
784 * padding puts each byte count table on a 1024-byte boundary;
785 * 4965 assumes tables are separated by 1024 bytes.
786 */
787struct iwl4965_scd_bc_tbl {
788 __le16 tfd_offset[TFD_QUEUE_BC_SIZE];
789 u8 pad[1024 - (TFD_QUEUE_BC_SIZE) * sizeof(__le16)];
790} __packed;
791
792
793#define IWL4965_RTC_INST_LOWER_BOUND (0x000000)
794
795/* RSSI to dBm */
796#define IWL4965_RSSI_OFFSET 44
797
798/* PCI registers */
799#define PCI_CFG_RETRY_TIMEOUT 0x041
800
801/* PCI register values */
802#define PCI_CFG_LINK_CTRL_VAL_L0S_EN 0x01
803#define PCI_CFG_LINK_CTRL_VAL_L1_EN 0x02
804
805#define IWL4965_DEFAULT_TX_RETRY 15
806
807/* EEPROM */
808#define IWL4965_FIRST_AMPDU_QUEUE 10
809
810
811#endif /* !__iwl_4965_hw_h__ */
diff --git a/drivers/net/wireless/iwlegacy/iwl-4965-led.c b/drivers/net/wireless/iwlegacy/iwl-4965-led.c
new file mode 100644
index 00000000000..26d324e3069
--- /dev/null
+++ b/drivers/net/wireless/iwlegacy/iwl-4965-led.c
@@ -0,0 +1,74 @@
1/******************************************************************************
2 *
3 * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of version 2 of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
17 *
18 * The full GNU General Public License is included in this distribution in the
19 * file called LICENSE.
20 *
21 * Contact Information:
22 * Intel Linux Wireless <ilw@linux.intel.com>
23 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
24 *
25 *****************************************************************************/
26
27#include <linux/kernel.h>
28#include <linux/module.h>
29#include <linux/init.h>
30#include <linux/pci.h>
31#include <linux/dma-mapping.h>
32#include <linux/delay.h>
33#include <linux/skbuff.h>
34#include <linux/netdevice.h>
35#include <linux/wireless.h>
36#include <net/mac80211.h>
37#include <linux/etherdevice.h>
38#include <asm/unaligned.h>
39
40#include "iwl-commands.h"
41#include "iwl-dev.h"
42#include "iwl-core.h"
43#include "iwl-io.h"
44#include "iwl-4965-led.h"
45
46/* Send led command */
47static int
48iwl4965_send_led_cmd(struct iwl_priv *priv, struct iwl_led_cmd *led_cmd)
49{
50 struct iwl_host_cmd cmd = {
51 .id = REPLY_LEDS_CMD,
52 .len = sizeof(struct iwl_led_cmd),
53 .data = led_cmd,
54 .flags = CMD_ASYNC,
55 .callback = NULL,
56 };
57 u32 reg;
58
59 reg = iwl_read32(priv, CSR_LED_REG);
60 if (reg != (reg & CSR_LED_BSM_CTRL_MSK))
61 iwl_write32(priv, CSR_LED_REG, reg & CSR_LED_BSM_CTRL_MSK);
62
63 return iwl_legacy_send_cmd(priv, &cmd);
64}
65
66/* Set led register off */
67void iwl4965_led_enable(struct iwl_priv *priv)
68{
69 iwl_write32(priv, CSR_LED_REG, CSR_LED_REG_TRUN_ON);
70}
71
72const struct iwl_led_ops iwl4965_led_ops = {
73 .cmd = iwl4965_send_led_cmd,
74};
diff --git a/drivers/net/wireless/iwlegacy/iwl-4965-led.h b/drivers/net/wireless/iwlegacy/iwl-4965-led.h
new file mode 100644
index 00000000000..5ed3615fc33
--- /dev/null
+++ b/drivers/net/wireless/iwlegacy/iwl-4965-led.h
@@ -0,0 +1,33 @@
1/******************************************************************************
2 *
3 * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of version 2 of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
17 *
18 * The full GNU General Public License is included in this distribution in the
19 * file called LICENSE.
20 *
21 * Contact Information:
22 * Intel Linux Wireless <ilw@linux.intel.com>
23 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
24 *
25 *****************************************************************************/
26
27#ifndef __iwl_4965_led_h__
28#define __iwl_4965_led_h__
29
30extern const struct iwl_led_ops iwl4965_led_ops;
31void iwl4965_led_enable(struct iwl_priv *priv);
32
33#endif /* __iwl_4965_led_h__ */
diff --git a/drivers/net/wireless/iwlegacy/iwl-4965-lib.c b/drivers/net/wireless/iwlegacy/iwl-4965-lib.c
new file mode 100644
index 00000000000..2be6d9e3b01
--- /dev/null
+++ b/drivers/net/wireless/iwlegacy/iwl-4965-lib.c
@@ -0,0 +1,1194 @@
1/******************************************************************************
2 *
3 * GPL LICENSE SUMMARY
4 *
5 * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of version 2 of the GNU General Public License as
9 * published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful, but
12 * WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
19 * USA
20 *
21 * The full GNU General Public License is included in this distribution
22 * in the file called LICENSE.GPL.
23 *
24 * Contact Information:
25 * Intel Linux Wireless <ilw@linux.intel.com>
26 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
27 *
28 *****************************************************************************/
29#include <linux/etherdevice.h>
30#include <linux/kernel.h>
31#include <linux/module.h>
32#include <linux/init.h>
33#include <linux/sched.h>
34
35#include "iwl-dev.h"
36#include "iwl-core.h"
37#include "iwl-io.h"
38#include "iwl-helpers.h"
39#include "iwl-4965-hw.h"
40#include "iwl-4965.h"
41#include "iwl-sta.h"
42
43void iwl4965_check_abort_status(struct iwl_priv *priv,
44 u8 frame_count, u32 status)
45{
46 if (frame_count == 1 && status == TX_STATUS_FAIL_RFKILL_FLUSH) {
47 IWL_ERR(priv, "Tx flush command to flush out all frames\n");
48 if (!test_bit(STATUS_EXIT_PENDING, &priv->status))
49 queue_work(priv->workqueue, &priv->tx_flush);
50 }
51}
52
53/*
54 * EEPROM
55 */
56struct iwl_mod_params iwl4965_mod_params = {
57 .amsdu_size_8K = 1,
58 .restart_fw = 1,
59 /* the rest are 0 by default */
60};
61
62void iwl4965_rx_queue_reset(struct iwl_priv *priv, struct iwl_rx_queue *rxq)
63{
64 unsigned long flags;
65 int i;
66 spin_lock_irqsave(&rxq->lock, flags);
67 INIT_LIST_HEAD(&rxq->rx_free);
68 INIT_LIST_HEAD(&rxq->rx_used);
69 /* Fill the rx_used queue with _all_ of the Rx buffers */
70 for (i = 0; i < RX_FREE_BUFFERS + RX_QUEUE_SIZE; i++) {
71 /* In the reset function, these buffers may have been allocated
72 * to an SKB, so we need to unmap and free potential storage */
73 if (rxq->pool[i].page != NULL) {
74 pci_unmap_page(priv->pci_dev, rxq->pool[i].page_dma,
75 PAGE_SIZE << priv->hw_params.rx_page_order,
76 PCI_DMA_FROMDEVICE);
77 __iwl_legacy_free_pages(priv, rxq->pool[i].page);
78 rxq->pool[i].page = NULL;
79 }
80 list_add_tail(&rxq->pool[i].list, &rxq->rx_used);
81 }
82
83 for (i = 0; i < RX_QUEUE_SIZE; i++)
84 rxq->queue[i] = NULL;
85
86 /* Set us so that we have processed and used all buffers, but have
87 * not restocked the Rx queue with fresh buffers */
88 rxq->read = rxq->write = 0;
89 rxq->write_actual = 0;
90 rxq->free_count = 0;
91 spin_unlock_irqrestore(&rxq->lock, flags);
92}
93
94int iwl4965_rx_init(struct iwl_priv *priv, struct iwl_rx_queue *rxq)
95{
96 u32 rb_size;
97 const u32 rfdnlog = RX_QUEUE_SIZE_LOG; /* 256 RBDs */
98 u32 rb_timeout = 0;
99
100 if (priv->cfg->mod_params->amsdu_size_8K)
101 rb_size = FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_8K;
102 else
103 rb_size = FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_4K;
104
105 /* Stop Rx DMA */
106 iwl_legacy_write_direct32(priv, FH_MEM_RCSR_CHNL0_CONFIG_REG, 0);
107
108 /* Reset driver's Rx queue write index */
109 iwl_legacy_write_direct32(priv, FH_RSCSR_CHNL0_RBDCB_WPTR_REG, 0);
110
111 /* Tell device where to find RBD circular buffer in DRAM */
112 iwl_legacy_write_direct32(priv, FH_RSCSR_CHNL0_RBDCB_BASE_REG,
113 (u32)(rxq->bd_dma >> 8));
114
115 /* Tell device where in DRAM to update its Rx status */
116 iwl_legacy_write_direct32(priv, FH_RSCSR_CHNL0_STTS_WPTR_REG,
117 rxq->rb_stts_dma >> 4);
118
119 /* Enable Rx DMA
120 * Direct rx interrupts to hosts
121 * Rx buffer size 4 or 8k
122 * RB timeout 0x10
123 * 256 RBDs
124 */
125 iwl_legacy_write_direct32(priv, FH_MEM_RCSR_CHNL0_CONFIG_REG,
126 FH_RCSR_RX_CONFIG_CHNL_EN_ENABLE_VAL |
127 FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_INT_HOST_VAL |
128 FH_RCSR_CHNL0_RX_CONFIG_SINGLE_FRAME_MSK |
129 rb_size|
130 (rb_timeout << FH_RCSR_RX_CONFIG_REG_IRQ_RBTH_POS)|
131 (rfdnlog << FH_RCSR_RX_CONFIG_RBDCB_SIZE_POS));
132
133 /* Set interrupt coalescing timer to default (2048 usecs) */
134 iwl_write8(priv, CSR_INT_COALESCING, IWL_HOST_INT_TIMEOUT_DEF);
135
136 return 0;
137}
138
139static void iwl4965_set_pwr_vmain(struct iwl_priv *priv)
140{
141/*
142 * (for documentation purposes)
143 * to set power to V_AUX, do:
144
145 if (pci_pme_capable(priv->pci_dev, PCI_D3cold))
146 iwl_legacy_set_bits_mask_prph(priv, APMG_PS_CTRL_REG,
147 APMG_PS_CTRL_VAL_PWR_SRC_VAUX,
148 ~APMG_PS_CTRL_MSK_PWR_SRC);
149 */
150
151 iwl_legacy_set_bits_mask_prph(priv, APMG_PS_CTRL_REG,
152 APMG_PS_CTRL_VAL_PWR_SRC_VMAIN,
153 ~APMG_PS_CTRL_MSK_PWR_SRC);
154}
155
156int iwl4965_hw_nic_init(struct iwl_priv *priv)
157{
158 unsigned long flags;
159 struct iwl_rx_queue *rxq = &priv->rxq;
160 int ret;
161
162 /* nic_init */
163 spin_lock_irqsave(&priv->lock, flags);
164 priv->cfg->ops->lib->apm_ops.init(priv);
165
166 /* Set interrupt coalescing calibration timer to default (512 usecs) */
167 iwl_write8(priv, CSR_INT_COALESCING, IWL_HOST_INT_CALIB_TIMEOUT_DEF);
168
169 spin_unlock_irqrestore(&priv->lock, flags);
170
171 iwl4965_set_pwr_vmain(priv);
172
173 priv->cfg->ops->lib->apm_ops.config(priv);
174
175 /* Allocate the RX queue, or reset if it is already allocated */
176 if (!rxq->bd) {
177 ret = iwl_legacy_rx_queue_alloc(priv);
178 if (ret) {
179 IWL_ERR(priv, "Unable to initialize Rx queue\n");
180 return -ENOMEM;
181 }
182 } else
183 iwl4965_rx_queue_reset(priv, rxq);
184
185 iwl4965_rx_replenish(priv);
186
187 iwl4965_rx_init(priv, rxq);
188
189 spin_lock_irqsave(&priv->lock, flags);
190
191 rxq->need_update = 1;
192 iwl_legacy_rx_queue_update_write_ptr(priv, rxq);
193
194 spin_unlock_irqrestore(&priv->lock, flags);
195
196 /* Allocate or reset and init all Tx and Command queues */
197 if (!priv->txq) {
198 ret = iwl4965_txq_ctx_alloc(priv);
199 if (ret)
200 return ret;
201 } else
202 iwl4965_txq_ctx_reset(priv);
203
204 set_bit(STATUS_INIT, &priv->status);
205
206 return 0;
207}
208
209/**
210 * iwl4965_dma_addr2rbd_ptr - convert a DMA address to a uCode read buffer ptr
211 */
212static inline __le32 iwl4965_dma_addr2rbd_ptr(struct iwl_priv *priv,
213 dma_addr_t dma_addr)
214{
215 return cpu_to_le32((u32)(dma_addr >> 8));
216}
217
218/**
219 * iwl4965_rx_queue_restock - refill RX queue from pre-allocated pool
220 *
221 * If there are slots in the RX queue that need to be restocked,
222 * and we have free pre-allocated buffers, fill the ranks as much
223 * as we can, pulling from rx_free.
224 *
225 * This moves the 'write' index forward to catch up with 'processed', and
226 * also updates the memory address in the firmware to reference the new
227 * target buffer.
228 */
229void iwl4965_rx_queue_restock(struct iwl_priv *priv)
230{
231 struct iwl_rx_queue *rxq = &priv->rxq;
232 struct list_head *element;
233 struct iwl_rx_mem_buffer *rxb;
234 unsigned long flags;
235
236 spin_lock_irqsave(&rxq->lock, flags);
237 while ((iwl_legacy_rx_queue_space(rxq) > 0) && (rxq->free_count)) {
238 /* The overwritten rxb must be a used one */
239 rxb = rxq->queue[rxq->write];
240 BUG_ON(rxb && rxb->page);
241
242 /* Get next free Rx buffer, remove from free list */
243 element = rxq->rx_free.next;
244 rxb = list_entry(element, struct iwl_rx_mem_buffer, list);
245 list_del(element);
246
247 /* Point to Rx buffer via next RBD in circular buffer */
248 rxq->bd[rxq->write] = iwl4965_dma_addr2rbd_ptr(priv,
249 rxb->page_dma);
250 rxq->queue[rxq->write] = rxb;
251 rxq->write = (rxq->write + 1) & RX_QUEUE_MASK;
252 rxq->free_count--;
253 }
254 spin_unlock_irqrestore(&rxq->lock, flags);
255 /* If the pre-allocated buffer pool is dropping low, schedule to
256 * refill it */
257 if (rxq->free_count <= RX_LOW_WATERMARK)
258 queue_work(priv->workqueue, &priv->rx_replenish);
259
260
261 /* If we've added more space for the firmware to place data, tell it.
262 * Increment device's write pointer in multiples of 8. */
263 if (rxq->write_actual != (rxq->write & ~0x7)) {
264 spin_lock_irqsave(&rxq->lock, flags);
265 rxq->need_update = 1;
266 spin_unlock_irqrestore(&rxq->lock, flags);
267 iwl_legacy_rx_queue_update_write_ptr(priv, rxq);
268 }
269}
270
271/**
272 * iwl4965_rx_replenish - Move all used packet from rx_used to rx_free
273 *
274 * When moving to rx_free an SKB is allocated for the slot.
275 *
276 * Also restock the Rx queue via iwl_rx_queue_restock.
277 * This is called as a scheduled work item (except for during initialization)
278 */
279static void iwl4965_rx_allocate(struct iwl_priv *priv, gfp_t priority)
280{
281 struct iwl_rx_queue *rxq = &priv->rxq;
282 struct list_head *element;
283 struct iwl_rx_mem_buffer *rxb;
284 struct page *page;
285 unsigned long flags;
286 gfp_t gfp_mask = priority;
287
288 while (1) {
289 spin_lock_irqsave(&rxq->lock, flags);
290 if (list_empty(&rxq->rx_used)) {
291 spin_unlock_irqrestore(&rxq->lock, flags);
292 return;
293 }
294 spin_unlock_irqrestore(&rxq->lock, flags);
295
296 if (rxq->free_count > RX_LOW_WATERMARK)
297 gfp_mask |= __GFP_NOWARN;
298
299 if (priv->hw_params.rx_page_order > 0)
300 gfp_mask |= __GFP_COMP;
301
302 /* Alloc a new receive buffer */
303 page = alloc_pages(gfp_mask, priv->hw_params.rx_page_order);
304 if (!page) {
305 if (net_ratelimit())
306 IWL_DEBUG_INFO(priv, "alloc_pages failed, "
307 "order: %d\n",
308 priv->hw_params.rx_page_order);
309
310 if ((rxq->free_count <= RX_LOW_WATERMARK) &&
311 net_ratelimit())
312 IWL_CRIT(priv,
313 "Failed to alloc_pages with %s. "
314 "Only %u free buffers remaining.\n",
315 priority == GFP_ATOMIC ?
316 "GFP_ATOMIC" : "GFP_KERNEL",
317 rxq->free_count);
318 /* We don't reschedule replenish work here -- we will
319 * call the restock method and if it still needs
320 * more buffers it will schedule replenish */
321 return;
322 }
323
324 spin_lock_irqsave(&rxq->lock, flags);
325
326 if (list_empty(&rxq->rx_used)) {
327 spin_unlock_irqrestore(&rxq->lock, flags);
328 __free_pages(page, priv->hw_params.rx_page_order);
329 return;
330 }
331 element = rxq->rx_used.next;
332 rxb = list_entry(element, struct iwl_rx_mem_buffer, list);
333 list_del(element);
334
335 spin_unlock_irqrestore(&rxq->lock, flags);
336
337 BUG_ON(rxb->page);
338 rxb->page = page;
339 /* Get physical address of the RB */
340 rxb->page_dma = pci_map_page(priv->pci_dev, page, 0,
341 PAGE_SIZE << priv->hw_params.rx_page_order,
342 PCI_DMA_FROMDEVICE);
343 /* dma address must be no more than 36 bits */
344 BUG_ON(rxb->page_dma & ~DMA_BIT_MASK(36));
345 /* and also 256 byte aligned! */
346 BUG_ON(rxb->page_dma & DMA_BIT_MASK(8));
347
348 spin_lock_irqsave(&rxq->lock, flags);
349
350 list_add_tail(&rxb->list, &rxq->rx_free);
351 rxq->free_count++;
352 priv->alloc_rxb_page++;
353
354 spin_unlock_irqrestore(&rxq->lock, flags);
355 }
356}
357
358void iwl4965_rx_replenish(struct iwl_priv *priv)
359{
360 unsigned long flags;
361
362 iwl4965_rx_allocate(priv, GFP_KERNEL);
363
364 spin_lock_irqsave(&priv->lock, flags);
365 iwl4965_rx_queue_restock(priv);
366 spin_unlock_irqrestore(&priv->lock, flags);
367}
368
369void iwl4965_rx_replenish_now(struct iwl_priv *priv)
370{
371 iwl4965_rx_allocate(priv, GFP_ATOMIC);
372
373 iwl4965_rx_queue_restock(priv);
374}
375
376/* Assumes that the skb field of the buffers in 'pool' is kept accurate.
377 * If an SKB has been detached, the POOL needs to have its SKB set to NULL
378 * This free routine walks the list of POOL entries and if SKB is set to
379 * non NULL it is unmapped and freed
380 */
381void iwl4965_rx_queue_free(struct iwl_priv *priv, struct iwl_rx_queue *rxq)
382{
383 int i;
384 for (i = 0; i < RX_QUEUE_SIZE + RX_FREE_BUFFERS; i++) {
385 if (rxq->pool[i].page != NULL) {
386 pci_unmap_page(priv->pci_dev, rxq->pool[i].page_dma,
387 PAGE_SIZE << priv->hw_params.rx_page_order,
388 PCI_DMA_FROMDEVICE);
389 __iwl_legacy_free_pages(priv, rxq->pool[i].page);
390 rxq->pool[i].page = NULL;
391 }
392 }
393
394 dma_free_coherent(&priv->pci_dev->dev, 4 * RX_QUEUE_SIZE, rxq->bd,
395 rxq->bd_dma);
396 dma_free_coherent(&priv->pci_dev->dev, sizeof(struct iwl_rb_status),
397 rxq->rb_stts, rxq->rb_stts_dma);
398 rxq->bd = NULL;
399 rxq->rb_stts = NULL;
400}
401
402int iwl4965_rxq_stop(struct iwl_priv *priv)
403{
404
405 /* stop Rx DMA */
406 iwl_legacy_write_direct32(priv, FH_MEM_RCSR_CHNL0_CONFIG_REG, 0);
407 iwl_poll_direct_bit(priv, FH_MEM_RSSR_RX_STATUS_REG,
408 FH_RSSR_CHNL0_RX_STATUS_CHNL_IDLE, 1000);
409
410 return 0;
411}
412
413int iwl4965_hwrate_to_mac80211_idx(u32 rate_n_flags, enum ieee80211_band band)
414{
415 int idx = 0;
416 int band_offset = 0;
417
418 /* HT rate format: mac80211 wants an MCS number, which is just LSB */
419 if (rate_n_flags & RATE_MCS_HT_MSK) {
420 idx = (rate_n_flags & 0xff);
421 return idx;
422 /* Legacy rate format, search for match in table */
423 } else {
424 if (band == IEEE80211_BAND_5GHZ)
425 band_offset = IWL_FIRST_OFDM_RATE;
426 for (idx = band_offset; idx < IWL_RATE_COUNT_LEGACY; idx++)
427 if (iwlegacy_rates[idx].plcp == (rate_n_flags & 0xFF))
428 return idx - band_offset;
429 }
430
431 return -1;
432}
433
434static int iwl4965_calc_rssi(struct iwl_priv *priv,
435 struct iwl_rx_phy_res *rx_resp)
436{
437 /* data from PHY/DSP regarding signal strength, etc.,
438 * contents are always there, not configurable by host. */
439 struct iwl4965_rx_non_cfg_phy *ncphy =
440 (struct iwl4965_rx_non_cfg_phy *)rx_resp->non_cfg_phy_buf;
441 u32 agc = (le16_to_cpu(ncphy->agc_info) & IWL49_AGC_DB_MASK)
442 >> IWL49_AGC_DB_POS;
443
444 u32 valid_antennae =
445 (le16_to_cpu(rx_resp->phy_flags) & IWL49_RX_PHY_FLAGS_ANTENNAE_MASK)
446 >> IWL49_RX_PHY_FLAGS_ANTENNAE_OFFSET;
447 u8 max_rssi = 0;
448 u32 i;
449
450 /* Find max rssi among 3 possible receivers.
451 * These values are measured by the digital signal processor (DSP).
452 * They should stay fairly constant even as the signal strength varies,
453 * if the radio's automatic gain control (AGC) is working right.
454 * AGC value (see below) will provide the "interesting" info. */
455 for (i = 0; i < 3; i++)
456 if (valid_antennae & (1 << i))
457 max_rssi = max(ncphy->rssi_info[i << 1], max_rssi);
458
459 IWL_DEBUG_STATS(priv, "Rssi In A %d B %d C %d Max %d AGC dB %d\n",
460 ncphy->rssi_info[0], ncphy->rssi_info[2], ncphy->rssi_info[4],
461 max_rssi, agc);
462
463 /* dBm = max_rssi dB - agc dB - constant.
464 * Higher AGC (higher radio gain) means lower signal. */
465 return max_rssi - agc - IWL4965_RSSI_OFFSET;
466}
467
468
469static u32 iwl4965_translate_rx_status(struct iwl_priv *priv, u32 decrypt_in)
470{
471 u32 decrypt_out = 0;
472
473 if ((decrypt_in & RX_RES_STATUS_STATION_FOUND) ==
474 RX_RES_STATUS_STATION_FOUND)
475 decrypt_out |= (RX_RES_STATUS_STATION_FOUND |
476 RX_RES_STATUS_NO_STATION_INFO_MISMATCH);
477
478 decrypt_out |= (decrypt_in & RX_RES_STATUS_SEC_TYPE_MSK);
479
480 /* packet was not encrypted */
481 if ((decrypt_in & RX_RES_STATUS_SEC_TYPE_MSK) ==
482 RX_RES_STATUS_SEC_TYPE_NONE)
483 return decrypt_out;
484
485 /* packet was encrypted with unknown alg */
486 if ((decrypt_in & RX_RES_STATUS_SEC_TYPE_MSK) ==
487 RX_RES_STATUS_SEC_TYPE_ERR)
488 return decrypt_out;
489
490 /* decryption was not done in HW */
491 if ((decrypt_in & RX_MPDU_RES_STATUS_DEC_DONE_MSK) !=
492 RX_MPDU_RES_STATUS_DEC_DONE_MSK)
493 return decrypt_out;
494
495 switch (decrypt_in & RX_RES_STATUS_SEC_TYPE_MSK) {
496
497 case RX_RES_STATUS_SEC_TYPE_CCMP:
498 /* alg is CCM: check MIC only */
499 if (!(decrypt_in & RX_MPDU_RES_STATUS_MIC_OK))
500 /* Bad MIC */
501 decrypt_out |= RX_RES_STATUS_BAD_ICV_MIC;
502 else
503 decrypt_out |= RX_RES_STATUS_DECRYPT_OK;
504
505 break;
506
507 case RX_RES_STATUS_SEC_TYPE_TKIP:
508 if (!(decrypt_in & RX_MPDU_RES_STATUS_TTAK_OK)) {
509 /* Bad TTAK */
510 decrypt_out |= RX_RES_STATUS_BAD_KEY_TTAK;
511 break;
512 }
513 /* fall through if TTAK OK */
514 default:
515 if (!(decrypt_in & RX_MPDU_RES_STATUS_ICV_OK))
516 decrypt_out |= RX_RES_STATUS_BAD_ICV_MIC;
517 else
518 decrypt_out |= RX_RES_STATUS_DECRYPT_OK;
519 break;
520 }
521
522 IWL_DEBUG_RX(priv, "decrypt_in:0x%x decrypt_out = 0x%x\n",
523 decrypt_in, decrypt_out);
524
525 return decrypt_out;
526}
527
528static void iwl4965_pass_packet_to_mac80211(struct iwl_priv *priv,
529 struct ieee80211_hdr *hdr,
530 u16 len,
531 u32 ampdu_status,
532 struct iwl_rx_mem_buffer *rxb,
533 struct ieee80211_rx_status *stats)
534{
535 struct sk_buff *skb;
536 __le16 fc = hdr->frame_control;
537
538 /* We only process data packets if the interface is open */
539 if (unlikely(!priv->is_open)) {
540 IWL_DEBUG_DROP_LIMIT(priv,
541 "Dropping packet while interface is not open.\n");
542 return;
543 }
544
545 /* In case of HW accelerated crypto and bad decryption, drop */
546 if (!priv->cfg->mod_params->sw_crypto &&
547 iwl_legacy_set_decrypted_flag(priv, hdr, ampdu_status, stats))
548 return;
549
550 skb = dev_alloc_skb(128);
551 if (!skb) {
552 IWL_ERR(priv, "dev_alloc_skb failed\n");
553 return;
554 }
555
556 skb_add_rx_frag(skb, 0, rxb->page, (void *)hdr - rxb_addr(rxb), len);
557
558 iwl_legacy_update_stats(priv, false, fc, len);
559 memcpy(IEEE80211_SKB_RXCB(skb), stats, sizeof(*stats));
560
561 ieee80211_rx(priv->hw, skb);
562 priv->alloc_rxb_page--;
563 rxb->page = NULL;
564}
565
566/* Called for REPLY_RX (legacy ABG frames), or
567 * REPLY_RX_MPDU_CMD (HT high-throughput N frames). */
568void iwl4965_rx_reply_rx(struct iwl_priv *priv,
569 struct iwl_rx_mem_buffer *rxb)
570{
571 struct ieee80211_hdr *header;
572 struct ieee80211_rx_status rx_status;
573 struct iwl_rx_packet *pkt = rxb_addr(rxb);
574 struct iwl_rx_phy_res *phy_res;
575 __le32 rx_pkt_status;
576 struct iwl_rx_mpdu_res_start *amsdu;
577 u32 len;
578 u32 ampdu_status;
579 u32 rate_n_flags;
580
581 /**
582 * REPLY_RX and REPLY_RX_MPDU_CMD are handled differently.
583 * REPLY_RX: physical layer info is in this buffer
584 * REPLY_RX_MPDU_CMD: physical layer info was sent in separate
585 * command and cached in priv->last_phy_res
586 *
587 * Here we set up local variables depending on which command is
588 * received.
589 */
590 if (pkt->hdr.cmd == REPLY_RX) {
591 phy_res = (struct iwl_rx_phy_res *)pkt->u.raw;
592 header = (struct ieee80211_hdr *)(pkt->u.raw + sizeof(*phy_res)
593 + phy_res->cfg_phy_cnt);
594
595 len = le16_to_cpu(phy_res->byte_count);
596 rx_pkt_status = *(__le32 *)(pkt->u.raw + sizeof(*phy_res) +
597 phy_res->cfg_phy_cnt + len);
598 ampdu_status = le32_to_cpu(rx_pkt_status);
599 } else {
600 if (!priv->_4965.last_phy_res_valid) {
601 IWL_ERR(priv, "MPDU frame without cached PHY data\n");
602 return;
603 }
604 phy_res = &priv->_4965.last_phy_res;
605 amsdu = (struct iwl_rx_mpdu_res_start *)pkt->u.raw;
606 header = (struct ieee80211_hdr *)(pkt->u.raw + sizeof(*amsdu));
607 len = le16_to_cpu(amsdu->byte_count);
608 rx_pkt_status = *(__le32 *)(pkt->u.raw + sizeof(*amsdu) + len);
609 ampdu_status = iwl4965_translate_rx_status(priv,
610 le32_to_cpu(rx_pkt_status));
611 }
612
613 if ((unlikely(phy_res->cfg_phy_cnt > 20))) {
614 IWL_DEBUG_DROP(priv, "dsp size out of range [0,20]: %d/n",
615 phy_res->cfg_phy_cnt);
616 return;
617 }
618
619 if (!(rx_pkt_status & RX_RES_STATUS_NO_CRC32_ERROR) ||
620 !(rx_pkt_status & RX_RES_STATUS_NO_RXE_OVERFLOW)) {
621 IWL_DEBUG_RX(priv, "Bad CRC or FIFO: 0x%08X.\n",
622 le32_to_cpu(rx_pkt_status));
623 return;
624 }
625
626 /* This will be used in several places later */
627 rate_n_flags = le32_to_cpu(phy_res->rate_n_flags);
628
629 /* rx_status carries information about the packet to mac80211 */
630 rx_status.mactime = le64_to_cpu(phy_res->timestamp);
631 rx_status.band = (phy_res->phy_flags & RX_RES_PHY_FLAGS_BAND_24_MSK) ?
632 IEEE80211_BAND_2GHZ : IEEE80211_BAND_5GHZ;
633 rx_status.freq =
634 ieee80211_channel_to_frequency(le16_to_cpu(phy_res->channel),
635 rx_status.band);
636 rx_status.rate_idx =
637 iwl4965_hwrate_to_mac80211_idx(rate_n_flags, rx_status.band);
638 rx_status.flag = 0;
639
640 /* TSF isn't reliable. In order to allow smooth user experience,
641 * this W/A doesn't propagate it to the mac80211 */
642 /*rx_status.flag |= RX_FLAG_MACTIME_MPDU;*/
643
644 priv->ucode_beacon_time = le32_to_cpu(phy_res->beacon_time_stamp);
645
646 /* Find max signal strength (dBm) among 3 antenna/receiver chains */
647 rx_status.signal = iwl4965_calc_rssi(priv, phy_res);
648
649 iwl_legacy_dbg_log_rx_data_frame(priv, len, header);
650 IWL_DEBUG_STATS_LIMIT(priv, "Rssi %d, TSF %llu\n",
651 rx_status.signal, (unsigned long long)rx_status.mactime);
652
653 /*
654 * "antenna number"
655 *
656 * It seems that the antenna field in the phy flags value
657 * is actually a bit field. This is undefined by radiotap,
658 * it wants an actual antenna number but I always get "7"
659 * for most legacy frames I receive indicating that the
660 * same frame was received on all three RX chains.
661 *
662 * I think this field should be removed in favor of a
663 * new 802.11n radiotap field "RX chains" that is defined
664 * as a bitmask.
665 */
666 rx_status.antenna =
667 (le16_to_cpu(phy_res->phy_flags) & RX_RES_PHY_FLAGS_ANTENNA_MSK)
668 >> RX_RES_PHY_FLAGS_ANTENNA_POS;
669
670 /* set the preamble flag if appropriate */
671 if (phy_res->phy_flags & RX_RES_PHY_FLAGS_SHORT_PREAMBLE_MSK)
672 rx_status.flag |= RX_FLAG_SHORTPRE;
673
674 /* Set up the HT phy flags */
675 if (rate_n_flags & RATE_MCS_HT_MSK)
676 rx_status.flag |= RX_FLAG_HT;
677 if (rate_n_flags & RATE_MCS_HT40_MSK)
678 rx_status.flag |= RX_FLAG_40MHZ;
679 if (rate_n_flags & RATE_MCS_SGI_MSK)
680 rx_status.flag |= RX_FLAG_SHORT_GI;
681
682 iwl4965_pass_packet_to_mac80211(priv, header, len, ampdu_status,
683 rxb, &rx_status);
684}
685
686/* Cache phy data (Rx signal strength, etc) for HT frame (REPLY_RX_PHY_CMD).
687 * This will be used later in iwl_rx_reply_rx() for REPLY_RX_MPDU_CMD. */
688void iwl4965_rx_reply_rx_phy(struct iwl_priv *priv,
689 struct iwl_rx_mem_buffer *rxb)
690{
691 struct iwl_rx_packet *pkt = rxb_addr(rxb);
692 priv->_4965.last_phy_res_valid = true;
693 memcpy(&priv->_4965.last_phy_res, pkt->u.raw,
694 sizeof(struct iwl_rx_phy_res));
695}
696
697static int iwl4965_get_channels_for_scan(struct iwl_priv *priv,
698 struct ieee80211_vif *vif,
699 enum ieee80211_band band,
700 u8 is_active, u8 n_probes,
701 struct iwl_scan_channel *scan_ch)
702{
703 struct ieee80211_channel *chan;
704 const struct ieee80211_supported_band *sband;
705 const struct iwl_channel_info *ch_info;
706 u16 passive_dwell = 0;
707 u16 active_dwell = 0;
708 int added, i;
709 u16 channel;
710
711 sband = iwl_get_hw_mode(priv, band);
712 if (!sband)
713 return 0;
714
715 active_dwell = iwl_legacy_get_active_dwell_time(priv, band, n_probes);
716 passive_dwell = iwl_legacy_get_passive_dwell_time(priv, band, vif);
717
718 if (passive_dwell <= active_dwell)
719 passive_dwell = active_dwell + 1;
720
721 for (i = 0, added = 0; i < priv->scan_request->n_channels; i++) {
722 chan = priv->scan_request->channels[i];
723
724 if (chan->band != band)
725 continue;
726
727 channel = chan->hw_value;
728 scan_ch->channel = cpu_to_le16(channel);
729
730 ch_info = iwl_legacy_get_channel_info(priv, band, channel);
731 if (!iwl_legacy_is_channel_valid(ch_info)) {
732 IWL_DEBUG_SCAN(priv,
733 "Channel %d is INVALID for this band.\n",
734 channel);
735 continue;
736 }
737
738 if (!is_active || iwl_legacy_is_channel_passive(ch_info) ||
739 (chan->flags & IEEE80211_CHAN_PASSIVE_SCAN))
740 scan_ch->type = SCAN_CHANNEL_TYPE_PASSIVE;
741 else
742 scan_ch->type = SCAN_CHANNEL_TYPE_ACTIVE;
743
744 if (n_probes)
745 scan_ch->type |= IWL_SCAN_PROBE_MASK(n_probes);
746
747 scan_ch->active_dwell = cpu_to_le16(active_dwell);
748 scan_ch->passive_dwell = cpu_to_le16(passive_dwell);
749
750 /* Set txpower levels to defaults */
751 scan_ch->dsp_atten = 110;
752
753 /* NOTE: if we were doing 6Mb OFDM for scans we'd use
754 * power level:
755 * scan_ch->tx_gain = ((1 << 5) | (2 << 3)) | 3;
756 */
757 if (band == IEEE80211_BAND_5GHZ)
758 scan_ch->tx_gain = ((1 << 5) | (3 << 3)) | 3;
759 else
760 scan_ch->tx_gain = ((1 << 5) | (5 << 3));
761
762 IWL_DEBUG_SCAN(priv, "Scanning ch=%d prob=0x%X [%s %d]\n",
763 channel, le32_to_cpu(scan_ch->type),
764 (scan_ch->type & SCAN_CHANNEL_TYPE_ACTIVE) ?
765 "ACTIVE" : "PASSIVE",
766 (scan_ch->type & SCAN_CHANNEL_TYPE_ACTIVE) ?
767 active_dwell : passive_dwell);
768
769 scan_ch++;
770 added++;
771 }
772
773 IWL_DEBUG_SCAN(priv, "total channels to scan %d\n", added);
774 return added;
775}
776
777int iwl4965_request_scan(struct iwl_priv *priv, struct ieee80211_vif *vif)
778{
779 struct iwl_host_cmd cmd = {
780 .id = REPLY_SCAN_CMD,
781 .len = sizeof(struct iwl_scan_cmd),
782 .flags = CMD_SIZE_HUGE,
783 };
784 struct iwl_scan_cmd *scan;
785 struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
786 u32 rate_flags = 0;
787 u16 cmd_len;
788 u16 rx_chain = 0;
789 enum ieee80211_band band;
790 u8 n_probes = 0;
791 u8 rx_ant = priv->hw_params.valid_rx_ant;
792 u8 rate;
793 bool is_active = false;
794 int chan_mod;
795 u8 active_chains;
796 u8 scan_tx_antennas = priv->hw_params.valid_tx_ant;
797 int ret;
798
799 lockdep_assert_held(&priv->mutex);
800
801 if (vif)
802 ctx = iwl_legacy_rxon_ctx_from_vif(vif);
803
804 if (!priv->scan_cmd) {
805 priv->scan_cmd = kmalloc(sizeof(struct iwl_scan_cmd) +
806 IWL_MAX_SCAN_SIZE, GFP_KERNEL);
807 if (!priv->scan_cmd) {
808 IWL_DEBUG_SCAN(priv,
809 "fail to allocate memory for scan\n");
810 return -ENOMEM;
811 }
812 }
813 scan = priv->scan_cmd;
814 memset(scan, 0, sizeof(struct iwl_scan_cmd) + IWL_MAX_SCAN_SIZE);
815
816 scan->quiet_plcp_th = IWL_PLCP_QUIET_THRESH;
817 scan->quiet_time = IWL_ACTIVE_QUIET_TIME;
818
819 if (iwl_legacy_is_any_associated(priv)) {
820 u16 interval;
821 u32 extra;
822 u32 suspend_time = 100;
823 u32 scan_suspend_time = 100;
824
825 IWL_DEBUG_INFO(priv, "Scanning while associated...\n");
826 interval = vif->bss_conf.beacon_int;
827
828 scan->suspend_time = 0;
829 scan->max_out_time = cpu_to_le32(200 * 1024);
830 if (!interval)
831 interval = suspend_time;
832
833 extra = (suspend_time / interval) << 22;
834 scan_suspend_time = (extra |
835 ((suspend_time % interval) * 1024));
836 scan->suspend_time = cpu_to_le32(scan_suspend_time);
837 IWL_DEBUG_SCAN(priv, "suspend_time 0x%X beacon interval %d\n",
838 scan_suspend_time, interval);
839 }
840
841 if (priv->scan_request->n_ssids) {
842 int i, p = 0;
843 IWL_DEBUG_SCAN(priv, "Kicking off active scan\n");
844 for (i = 0; i < priv->scan_request->n_ssids; i++) {
845 /* always does wildcard anyway */
846 if (!priv->scan_request->ssids[i].ssid_len)
847 continue;
848 scan->direct_scan[p].id = WLAN_EID_SSID;
849 scan->direct_scan[p].len =
850 priv->scan_request->ssids[i].ssid_len;
851 memcpy(scan->direct_scan[p].ssid,
852 priv->scan_request->ssids[i].ssid,
853 priv->scan_request->ssids[i].ssid_len);
854 n_probes++;
855 p++;
856 }
857 is_active = true;
858 } else
859 IWL_DEBUG_SCAN(priv, "Start passive scan.\n");
860
861 scan->tx_cmd.tx_flags = TX_CMD_FLG_SEQ_CTL_MSK;
862 scan->tx_cmd.sta_id = ctx->bcast_sta_id;
863 scan->tx_cmd.stop_time.life_time = TX_CMD_LIFE_TIME_INFINITE;
864
865 switch (priv->scan_band) {
866 case IEEE80211_BAND_2GHZ:
867 scan->flags = RXON_FLG_BAND_24G_MSK | RXON_FLG_AUTO_DETECT_MSK;
868 chan_mod = le32_to_cpu(
869 priv->contexts[IWL_RXON_CTX_BSS].active.flags &
870 RXON_FLG_CHANNEL_MODE_MSK)
871 >> RXON_FLG_CHANNEL_MODE_POS;
872 if (chan_mod == CHANNEL_MODE_PURE_40) {
873 rate = IWL_RATE_6M_PLCP;
874 } else {
875 rate = IWL_RATE_1M_PLCP;
876 rate_flags = RATE_MCS_CCK_MSK;
877 }
878 break;
879 case IEEE80211_BAND_5GHZ:
880 rate = IWL_RATE_6M_PLCP;
881 break;
882 default:
883 IWL_WARN(priv, "Invalid scan band\n");
884 return -EIO;
885 }
886
887 /*
888 * If active scanning is requested but a certain channel is
889 * marked passive, we can do active scanning if we detect
890 * transmissions.
891 *
892 * There is an issue with some firmware versions that triggers
893 * a sysassert on a "good CRC threshold" of zero (== disabled),
894 * on a radar channel even though this means that we should NOT
895 * send probes.
896 *
897 * The "good CRC threshold" is the number of frames that we
898 * need to receive during our dwell time on a channel before
899 * sending out probes -- setting this to a huge value will
900 * mean we never reach it, but at the same time work around
901 * the aforementioned issue. Thus use IWL_GOOD_CRC_TH_NEVER
902 * here instead of IWL_GOOD_CRC_TH_DISABLED.
903 */
904 scan->good_CRC_th = is_active ? IWL_GOOD_CRC_TH_DEFAULT :
905 IWL_GOOD_CRC_TH_NEVER;
906
907 band = priv->scan_band;
908
909 if (priv->cfg->scan_rx_antennas[band])
910 rx_ant = priv->cfg->scan_rx_antennas[band];
911
912 priv->scan_tx_ant[band] = iwl4965_toggle_tx_ant(priv,
913 priv->scan_tx_ant[band],
914 scan_tx_antennas);
915 rate_flags |= iwl4965_ant_idx_to_flags(priv->scan_tx_ant[band]);
916 scan->tx_cmd.rate_n_flags = iwl4965_hw_set_rate_n_flags(rate, rate_flags);
917
918 /* In power save mode use one chain, otherwise use all chains */
919 if (test_bit(STATUS_POWER_PMI, &priv->status)) {
920 /* rx_ant has been set to all valid chains previously */
921 active_chains = rx_ant &
922 ((u8)(priv->chain_noise_data.active_chains));
923 if (!active_chains)
924 active_chains = rx_ant;
925
926 IWL_DEBUG_SCAN(priv, "chain_noise_data.active_chains: %u\n",
927 priv->chain_noise_data.active_chains);
928
929 rx_ant = iwl4965_first_antenna(active_chains);
930 }
931
932 /* MIMO is not used here, but value is required */
933 rx_chain |= priv->hw_params.valid_rx_ant << RXON_RX_CHAIN_VALID_POS;
934 rx_chain |= rx_ant << RXON_RX_CHAIN_FORCE_MIMO_SEL_POS;
935 rx_chain |= rx_ant << RXON_RX_CHAIN_FORCE_SEL_POS;
936 rx_chain |= 0x1 << RXON_RX_CHAIN_DRIVER_FORCE_POS;
937 scan->rx_chain = cpu_to_le16(rx_chain);
938
939 cmd_len = iwl_legacy_fill_probe_req(priv,
940 (struct ieee80211_mgmt *)scan->data,
941 vif->addr,
942 priv->scan_request->ie,
943 priv->scan_request->ie_len,
944 IWL_MAX_SCAN_SIZE - sizeof(*scan));
945 scan->tx_cmd.len = cpu_to_le16(cmd_len);
946
947 scan->filter_flags |= (RXON_FILTER_ACCEPT_GRP_MSK |
948 RXON_FILTER_BCON_AWARE_MSK);
949
950 scan->channel_count = iwl4965_get_channels_for_scan(priv, vif, band,
951 is_active, n_probes,
952 (void *)&scan->data[cmd_len]);
953 if (scan->channel_count == 0) {
954 IWL_DEBUG_SCAN(priv, "channel count %d\n", scan->channel_count);
955 return -EIO;
956 }
957
958 cmd.len += le16_to_cpu(scan->tx_cmd.len) +
959 scan->channel_count * sizeof(struct iwl_scan_channel);
960 cmd.data = scan;
961 scan->len = cpu_to_le16(cmd.len);
962
963 set_bit(STATUS_SCAN_HW, &priv->status);
964
965 ret = iwl_legacy_send_cmd_sync(priv, &cmd);
966 if (ret)
967 clear_bit(STATUS_SCAN_HW, &priv->status);
968
969 return ret;
970}
971
972int iwl4965_manage_ibss_station(struct iwl_priv *priv,
973 struct ieee80211_vif *vif, bool add)
974{
975 struct iwl_vif_priv *vif_priv = (void *)vif->drv_priv;
976
977 if (add)
978 return iwl4965_add_bssid_station(priv, vif_priv->ctx,
979 vif->bss_conf.bssid,
980 &vif_priv->ibss_bssid_sta_id);
981 return iwl_legacy_remove_station(priv, vif_priv->ibss_bssid_sta_id,
982 vif->bss_conf.bssid);
983}
984
985void iwl4965_free_tfds_in_queue(struct iwl_priv *priv,
986 int sta_id, int tid, int freed)
987{
988 lockdep_assert_held(&priv->sta_lock);
989
990 if (priv->stations[sta_id].tid[tid].tfds_in_queue >= freed)
991 priv->stations[sta_id].tid[tid].tfds_in_queue -= freed;
992 else {
993 IWL_DEBUG_TX(priv, "free more than tfds_in_queue (%u:%d)\n",
994 priv->stations[sta_id].tid[tid].tfds_in_queue,
995 freed);
996 priv->stations[sta_id].tid[tid].tfds_in_queue = 0;
997 }
998}
999
1000#define IWL_TX_QUEUE_MSK 0xfffff
1001
1002static bool iwl4965_is_single_rx_stream(struct iwl_priv *priv)
1003{
1004 return priv->current_ht_config.smps == IEEE80211_SMPS_STATIC ||
1005 priv->current_ht_config.single_chain_sufficient;
1006}
1007
1008#define IWL_NUM_RX_CHAINS_MULTIPLE 3
1009#define IWL_NUM_RX_CHAINS_SINGLE 2
1010#define IWL_NUM_IDLE_CHAINS_DUAL 2
1011#define IWL_NUM_IDLE_CHAINS_SINGLE 1
1012
1013/*
1014 * Determine how many receiver/antenna chains to use.
1015 *
1016 * More provides better reception via diversity. Fewer saves power
1017 * at the expense of throughput, but only when not in powersave to
1018 * start with.
1019 *
1020 * MIMO (dual stream) requires at least 2, but works better with 3.
1021 * This does not determine *which* chains to use, just how many.
1022 */
1023static int iwl4965_get_active_rx_chain_count(struct iwl_priv *priv)
1024{
1025 /* # of Rx chains to use when expecting MIMO. */
1026 if (iwl4965_is_single_rx_stream(priv))
1027 return IWL_NUM_RX_CHAINS_SINGLE;
1028 else
1029 return IWL_NUM_RX_CHAINS_MULTIPLE;
1030}
1031
1032/*
1033 * When we are in power saving mode, unless device support spatial
1034 * multiplexing power save, use the active count for rx chain count.
1035 */
1036static int
1037iwl4965_get_idle_rx_chain_count(struct iwl_priv *priv, int active_cnt)
1038{
1039 /* # Rx chains when idling, depending on SMPS mode */
1040 switch (priv->current_ht_config.smps) {
1041 case IEEE80211_SMPS_STATIC:
1042 case IEEE80211_SMPS_DYNAMIC:
1043 return IWL_NUM_IDLE_CHAINS_SINGLE;
1044 case IEEE80211_SMPS_OFF:
1045 return active_cnt;
1046 default:
1047 WARN(1, "invalid SMPS mode %d",
1048 priv->current_ht_config.smps);
1049 return active_cnt;
1050 }
1051}
1052
1053/* up to 4 chains */
1054static u8 iwl4965_count_chain_bitmap(u32 chain_bitmap)
1055{
1056 u8 res;
1057 res = (chain_bitmap & BIT(0)) >> 0;
1058 res += (chain_bitmap & BIT(1)) >> 1;
1059 res += (chain_bitmap & BIT(2)) >> 2;
1060 res += (chain_bitmap & BIT(3)) >> 3;
1061 return res;
1062}
1063
1064/**
1065 * iwl4965_set_rxon_chain - Set up Rx chain usage in "staging" RXON image
1066 *
1067 * Selects how many and which Rx receivers/antennas/chains to use.
1068 * This should not be used for scan command ... it puts data in wrong place.
1069 */
1070void iwl4965_set_rxon_chain(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
1071{
1072 bool is_single = iwl4965_is_single_rx_stream(priv);
1073 bool is_cam = !test_bit(STATUS_POWER_PMI, &priv->status);
1074 u8 idle_rx_cnt, active_rx_cnt, valid_rx_cnt;
1075 u32 active_chains;
1076 u16 rx_chain;
1077
1078 /* Tell uCode which antennas are actually connected.
1079 * Before first association, we assume all antennas are connected.
1080 * Just after first association, iwl4965_chain_noise_calibration()
1081 * checks which antennas actually *are* connected. */
1082 if (priv->chain_noise_data.active_chains)
1083 active_chains = priv->chain_noise_data.active_chains;
1084 else
1085 active_chains = priv->hw_params.valid_rx_ant;
1086
1087 rx_chain = active_chains << RXON_RX_CHAIN_VALID_POS;
1088
1089 /* How many receivers should we use? */
1090 active_rx_cnt = iwl4965_get_active_rx_chain_count(priv);
1091 idle_rx_cnt = iwl4965_get_idle_rx_chain_count(priv, active_rx_cnt);
1092
1093
1094 /* correct rx chain count according hw settings
1095 * and chain noise calibration
1096 */
1097 valid_rx_cnt = iwl4965_count_chain_bitmap(active_chains);
1098 if (valid_rx_cnt < active_rx_cnt)
1099 active_rx_cnt = valid_rx_cnt;
1100
1101 if (valid_rx_cnt < idle_rx_cnt)
1102 idle_rx_cnt = valid_rx_cnt;
1103
1104 rx_chain |= active_rx_cnt << RXON_RX_CHAIN_MIMO_CNT_POS;
1105 rx_chain |= idle_rx_cnt << RXON_RX_CHAIN_CNT_POS;
1106
1107 ctx->staging.rx_chain = cpu_to_le16(rx_chain);
1108
1109 if (!is_single && (active_rx_cnt >= IWL_NUM_RX_CHAINS_SINGLE) && is_cam)
1110 ctx->staging.rx_chain |= RXON_RX_CHAIN_MIMO_FORCE_MSK;
1111 else
1112 ctx->staging.rx_chain &= ~RXON_RX_CHAIN_MIMO_FORCE_MSK;
1113
1114 IWL_DEBUG_ASSOC(priv, "rx_chain=0x%X active=%d idle=%d\n",
1115 ctx->staging.rx_chain,
1116 active_rx_cnt, idle_rx_cnt);
1117
1118 WARN_ON(active_rx_cnt == 0 || idle_rx_cnt == 0 ||
1119 active_rx_cnt < idle_rx_cnt);
1120}
1121
1122u8 iwl4965_toggle_tx_ant(struct iwl_priv *priv, u8 ant, u8 valid)
1123{
1124 int i;
1125 u8 ind = ant;
1126
1127 for (i = 0; i < RATE_ANT_NUM - 1; i++) {
1128 ind = (ind + 1) < RATE_ANT_NUM ? ind + 1 : 0;
1129 if (valid & BIT(ind))
1130 return ind;
1131 }
1132 return ant;
1133}
1134
1135static const char *iwl4965_get_fh_string(int cmd)
1136{
1137 switch (cmd) {
1138 IWL_CMD(FH_RSCSR_CHNL0_STTS_WPTR_REG);
1139 IWL_CMD(FH_RSCSR_CHNL0_RBDCB_BASE_REG);
1140 IWL_CMD(FH_RSCSR_CHNL0_WPTR);
1141 IWL_CMD(FH_MEM_RCSR_CHNL0_CONFIG_REG);
1142 IWL_CMD(FH_MEM_RSSR_SHARED_CTRL_REG);
1143 IWL_CMD(FH_MEM_RSSR_RX_STATUS_REG);
1144 IWL_CMD(FH_MEM_RSSR_RX_ENABLE_ERR_IRQ2DRV);
1145 IWL_CMD(FH_TSSR_TX_STATUS_REG);
1146 IWL_CMD(FH_TSSR_TX_ERROR_REG);
1147 default:
1148 return "UNKNOWN";
1149 }
1150}
1151
1152int iwl4965_dump_fh(struct iwl_priv *priv, char **buf, bool display)
1153{
1154 int i;
1155#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
1156 int pos = 0;
1157 size_t bufsz = 0;
1158#endif
1159 static const u32 fh_tbl[] = {
1160 FH_RSCSR_CHNL0_STTS_WPTR_REG,
1161 FH_RSCSR_CHNL0_RBDCB_BASE_REG,
1162 FH_RSCSR_CHNL0_WPTR,
1163 FH_MEM_RCSR_CHNL0_CONFIG_REG,
1164 FH_MEM_RSSR_SHARED_CTRL_REG,
1165 FH_MEM_RSSR_RX_STATUS_REG,
1166 FH_MEM_RSSR_RX_ENABLE_ERR_IRQ2DRV,
1167 FH_TSSR_TX_STATUS_REG,
1168 FH_TSSR_TX_ERROR_REG
1169 };
1170#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
1171 if (display) {
1172 bufsz = ARRAY_SIZE(fh_tbl) * 48 + 40;
1173 *buf = kmalloc(bufsz, GFP_KERNEL);
1174 if (!*buf)
1175 return -ENOMEM;
1176 pos += scnprintf(*buf + pos, bufsz - pos,
1177 "FH register values:\n");
1178 for (i = 0; i < ARRAY_SIZE(fh_tbl); i++) {
1179 pos += scnprintf(*buf + pos, bufsz - pos,
1180 " %34s: 0X%08x\n",
1181 iwl4965_get_fh_string(fh_tbl[i]),
1182 iwl_legacy_read_direct32(priv, fh_tbl[i]));
1183 }
1184 return pos;
1185 }
1186#endif
1187 IWL_ERR(priv, "FH register values:\n");
1188 for (i = 0; i < ARRAY_SIZE(fh_tbl); i++) {
1189 IWL_ERR(priv, " %34s: 0X%08x\n",
1190 iwl4965_get_fh_string(fh_tbl[i]),
1191 iwl_legacy_read_direct32(priv, fh_tbl[i]));
1192 }
1193 return 0;
1194}
diff --git a/drivers/net/wireless/iwlegacy/iwl-4965-rs.c b/drivers/net/wireless/iwlegacy/iwl-4965-rs.c
new file mode 100644
index 00000000000..9b65153bdd0
--- /dev/null
+++ b/drivers/net/wireless/iwlegacy/iwl-4965-rs.c
@@ -0,0 +1,2872 @@
1/******************************************************************************
2 *
3 * Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of version 2 of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
17 *
18 * The full GNU General Public License is included in this distribution in the
19 * file called LICENSE.
20 *
21 * Contact Information:
22 * Intel Linux Wireless <ilw@linux.intel.com>
23 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
24 *
25 *****************************************************************************/
26#include <linux/kernel.h>
27#include <linux/init.h>
28#include <linux/skbuff.h>
29#include <linux/slab.h>
30#include <linux/wireless.h>
31#include <net/mac80211.h>
32
33#include <linux/netdevice.h>
34#include <linux/etherdevice.h>
35#include <linux/delay.h>
36
37#include <linux/workqueue.h>
38
39#include "iwl-dev.h"
40#include "iwl-sta.h"
41#include "iwl-core.h"
42#include "iwl-4965.h"
43
44#define IWL4965_RS_NAME "iwl-4965-rs"
45
46#define NUM_TRY_BEFORE_ANT_TOGGLE 1
47#define IWL_NUMBER_TRY 1
48#define IWL_HT_NUMBER_TRY 3
49
50#define IWL_RATE_MAX_WINDOW 62 /* # tx in history window */
51#define IWL_RATE_MIN_FAILURE_TH 6 /* min failures to calc tpt */
52#define IWL_RATE_MIN_SUCCESS_TH 8 /* min successes to calc tpt */
53
54/* max allowed rate miss before sync LQ cmd */
55#define IWL_MISSED_RATE_MAX 15
56/* max time to accum history 2 seconds */
57#define IWL_RATE_SCALE_FLUSH_INTVL (3*HZ)
58
59static u8 rs_ht_to_legacy[] = {
60 IWL_RATE_6M_INDEX, IWL_RATE_6M_INDEX,
61 IWL_RATE_6M_INDEX, IWL_RATE_6M_INDEX,
62 IWL_RATE_6M_INDEX,
63 IWL_RATE_6M_INDEX, IWL_RATE_9M_INDEX,
64 IWL_RATE_12M_INDEX, IWL_RATE_18M_INDEX,
65 IWL_RATE_24M_INDEX, IWL_RATE_36M_INDEX,
66 IWL_RATE_48M_INDEX, IWL_RATE_54M_INDEX
67};
68
69static const u8 ant_toggle_lookup[] = {
70 /*ANT_NONE -> */ ANT_NONE,
71 /*ANT_A -> */ ANT_B,
72 /*ANT_B -> */ ANT_C,
73 /*ANT_AB -> */ ANT_BC,
74 /*ANT_C -> */ ANT_A,
75 /*ANT_AC -> */ ANT_AB,
76 /*ANT_BC -> */ ANT_AC,
77 /*ANT_ABC -> */ ANT_ABC,
78};
79
80#define IWL_DECLARE_RATE_INFO(r, s, ip, in, rp, rn, pp, np) \
81 [IWL_RATE_##r##M_INDEX] = { IWL_RATE_##r##M_PLCP, \
82 IWL_RATE_SISO_##s##M_PLCP, \
83 IWL_RATE_MIMO2_##s##M_PLCP,\
84 IWL_RATE_##r##M_IEEE, \
85 IWL_RATE_##ip##M_INDEX, \
86 IWL_RATE_##in##M_INDEX, \
87 IWL_RATE_##rp##M_INDEX, \
88 IWL_RATE_##rn##M_INDEX, \
89 IWL_RATE_##pp##M_INDEX, \
90 IWL_RATE_##np##M_INDEX }
91
92/*
93 * Parameter order:
94 * rate, ht rate, prev rate, next rate, prev tgg rate, next tgg rate
95 *
96 * If there isn't a valid next or previous rate then INV is used which
97 * maps to IWL_RATE_INVALID
98 *
99 */
100const struct iwl_rate_info iwlegacy_rates[IWL_RATE_COUNT] = {
101 IWL_DECLARE_RATE_INFO(1, INV, INV, 2, INV, 2, INV, 2), /* 1mbps */
102 IWL_DECLARE_RATE_INFO(2, INV, 1, 5, 1, 5, 1, 5), /* 2mbps */
103 IWL_DECLARE_RATE_INFO(5, INV, 2, 6, 2, 11, 2, 11), /*5.5mbps */
104 IWL_DECLARE_RATE_INFO(11, INV, 9, 12, 9, 12, 5, 18), /* 11mbps */
105 IWL_DECLARE_RATE_INFO(6, 6, 5, 9, 5, 11, 5, 11), /* 6mbps */
106 IWL_DECLARE_RATE_INFO(9, 6, 6, 11, 6, 11, 5, 11), /* 9mbps */
107 IWL_DECLARE_RATE_INFO(12, 12, 11, 18, 11, 18, 11, 18), /* 12mbps */
108 IWL_DECLARE_RATE_INFO(18, 18, 12, 24, 12, 24, 11, 24), /* 18mbps */
109 IWL_DECLARE_RATE_INFO(24, 24, 18, 36, 18, 36, 18, 36), /* 24mbps */
110 IWL_DECLARE_RATE_INFO(36, 36, 24, 48, 24, 48, 24, 48), /* 36mbps */
111 IWL_DECLARE_RATE_INFO(48, 48, 36, 54, 36, 54, 36, 54), /* 48mbps */
112 IWL_DECLARE_RATE_INFO(54, 54, 48, INV, 48, INV, 48, INV),/* 54mbps */
113 IWL_DECLARE_RATE_INFO(60, 60, 48, INV, 48, INV, 48, INV),/* 60mbps */
114};
115
116static int iwl4965_hwrate_to_plcp_idx(u32 rate_n_flags)
117{
118 int idx = 0;
119
120 /* HT rate format */
121 if (rate_n_flags & RATE_MCS_HT_MSK) {
122 idx = (rate_n_flags & 0xff);
123
124 if (idx >= IWL_RATE_MIMO2_6M_PLCP)
125 idx = idx - IWL_RATE_MIMO2_6M_PLCP;
126
127 idx += IWL_FIRST_OFDM_RATE;
128 /* skip 9M not supported in ht*/
129 if (idx >= IWL_RATE_9M_INDEX)
130 idx += 1;
131 if ((idx >= IWL_FIRST_OFDM_RATE) && (idx <= IWL_LAST_OFDM_RATE))
132 return idx;
133
134 /* legacy rate format, search for match in table */
135 } else {
136 for (idx = 0; idx < ARRAY_SIZE(iwlegacy_rates); idx++)
137 if (iwlegacy_rates[idx].plcp == (rate_n_flags & 0xFF))
138 return idx;
139 }
140
141 return -1;
142}
143
144static void iwl4965_rs_rate_scale_perform(struct iwl_priv *priv,
145 struct sk_buff *skb,
146 struct ieee80211_sta *sta,
147 struct iwl_lq_sta *lq_sta);
148static void iwl4965_rs_fill_link_cmd(struct iwl_priv *priv,
149 struct iwl_lq_sta *lq_sta, u32 rate_n_flags);
150static void iwl4965_rs_stay_in_table(struct iwl_lq_sta *lq_sta,
151 bool force_search);
152
153#ifdef CONFIG_MAC80211_DEBUGFS
154static void iwl4965_rs_dbgfs_set_mcs(struct iwl_lq_sta *lq_sta,
155 u32 *rate_n_flags, int index);
156#else
157static void iwl4965_rs_dbgfs_set_mcs(struct iwl_lq_sta *lq_sta,
158 u32 *rate_n_flags, int index)
159{}
160#endif
161
162/**
163 * The following tables contain the expected throughput metrics for all rates
164 *
165 * 1, 2, 5.5, 11, 6, 9, 12, 18, 24, 36, 48, 54, 60 MBits
166 *
167 * where invalid entries are zeros.
168 *
169 * CCK rates are only valid in legacy table and will only be used in G
170 * (2.4 GHz) band.
171 */
172
173static s32 expected_tpt_legacy[IWL_RATE_COUNT] = {
174 7, 13, 35, 58, 40, 57, 72, 98, 121, 154, 177, 186, 0
175};
176
177static s32 expected_tpt_siso20MHz[4][IWL_RATE_COUNT] = {
178 {0, 0, 0, 0, 42, 0, 76, 102, 124, 158, 183, 193, 202}, /* Norm */
179 {0, 0, 0, 0, 46, 0, 82, 110, 132, 167, 192, 202, 210}, /* SGI */
180 {0, 0, 0, 0, 48, 0, 93, 135, 176, 251, 319, 351, 381}, /* AGG */
181 {0, 0, 0, 0, 53, 0, 102, 149, 193, 275, 348, 381, 413}, /* AGG+SGI */
182};
183
184static s32 expected_tpt_siso40MHz[4][IWL_RATE_COUNT] = {
185 {0, 0, 0, 0, 77, 0, 127, 160, 184, 220, 242, 250, 257}, /* Norm */
186 {0, 0, 0, 0, 83, 0, 135, 169, 193, 229, 250, 257, 264}, /* SGI */
187 {0, 0, 0, 0, 96, 0, 182, 259, 328, 451, 553, 598, 640}, /* AGG */
188 {0, 0, 0, 0, 106, 0, 199, 282, 357, 487, 593, 640, 683}, /* AGG+SGI */
189};
190
191static s32 expected_tpt_mimo2_20MHz[4][IWL_RATE_COUNT] = {
192 {0, 0, 0, 0, 74, 0, 123, 155, 179, 213, 235, 243, 250}, /* Norm */
193 {0, 0, 0, 0, 81, 0, 131, 164, 187, 221, 242, 250, 256}, /* SGI */
194 {0, 0, 0, 0, 92, 0, 175, 250, 317, 436, 534, 578, 619}, /* AGG */
195 {0, 0, 0, 0, 102, 0, 192, 273, 344, 470, 573, 619, 660}, /* AGG+SGI*/
196};
197
198static s32 expected_tpt_mimo2_40MHz[4][IWL_RATE_COUNT] = {
199 {0, 0, 0, 0, 123, 0, 182, 214, 235, 264, 279, 285, 289}, /* Norm */
200 {0, 0, 0, 0, 131, 0, 191, 222, 242, 270, 284, 289, 293}, /* SGI */
201 {0, 0, 0, 0, 180, 0, 327, 446, 545, 708, 828, 878, 922}, /* AGG */
202 {0, 0, 0, 0, 197, 0, 355, 481, 584, 752, 872, 922, 966}, /* AGG+SGI */
203};
204
205/* mbps, mcs */
206static const struct iwl_rate_mcs_info iwl_rate_mcs[IWL_RATE_COUNT] = {
207 { "1", "BPSK DSSS"},
208 { "2", "QPSK DSSS"},
209 {"5.5", "BPSK CCK"},
210 { "11", "QPSK CCK"},
211 { "6", "BPSK 1/2"},
212 { "9", "BPSK 1/2"},
213 { "12", "QPSK 1/2"},
214 { "18", "QPSK 3/4"},
215 { "24", "16QAM 1/2"},
216 { "36", "16QAM 3/4"},
217 { "48", "64QAM 2/3"},
218 { "54", "64QAM 3/4"},
219 { "60", "64QAM 5/6"},
220};
221
222#define MCS_INDEX_PER_STREAM (8)
223
224static inline u8 iwl4965_rs_extract_rate(u32 rate_n_flags)
225{
226 return (u8)(rate_n_flags & 0xFF);
227}
228
229static void
230iwl4965_rs_rate_scale_clear_window(struct iwl_rate_scale_data *window)
231{
232 window->data = 0;
233 window->success_counter = 0;
234 window->success_ratio = IWL_INVALID_VALUE;
235 window->counter = 0;
236 window->average_tpt = IWL_INVALID_VALUE;
237 window->stamp = 0;
238}
239
240static inline u8 iwl4965_rs_is_valid_ant(u8 valid_antenna, u8 ant_type)
241{
242 return (ant_type & valid_antenna) == ant_type;
243}
244
245/*
246 * removes the old data from the statistics. All data that is older than
247 * TID_MAX_TIME_DIFF, will be deleted.
248 */
249static void
250iwl4965_rs_tl_rm_old_stats(struct iwl_traffic_load *tl, u32 curr_time)
251{
252 /* The oldest age we want to keep */
253 u32 oldest_time = curr_time - TID_MAX_TIME_DIFF;
254
255 while (tl->queue_count &&
256 (tl->time_stamp < oldest_time)) {
257 tl->total -= tl->packet_count[tl->head];
258 tl->packet_count[tl->head] = 0;
259 tl->time_stamp += TID_QUEUE_CELL_SPACING;
260 tl->queue_count--;
261 tl->head++;
262 if (tl->head >= TID_QUEUE_MAX_SIZE)
263 tl->head = 0;
264 }
265}
266
267/*
268 * increment traffic load value for tid and also remove
269 * any old values if passed the certain time period
270 */
271static u8 iwl4965_rs_tl_add_packet(struct iwl_lq_sta *lq_data,
272 struct ieee80211_hdr *hdr)
273{
274 u32 curr_time = jiffies_to_msecs(jiffies);
275 u32 time_diff;
276 s32 index;
277 struct iwl_traffic_load *tl = NULL;
278 u8 tid;
279
280 if (ieee80211_is_data_qos(hdr->frame_control)) {
281 u8 *qc = ieee80211_get_qos_ctl(hdr);
282 tid = qc[0] & 0xf;
283 } else
284 return MAX_TID_COUNT;
285
286 if (unlikely(tid >= TID_MAX_LOAD_COUNT))
287 return MAX_TID_COUNT;
288
289 tl = &lq_data->load[tid];
290
291 curr_time -= curr_time % TID_ROUND_VALUE;
292
293 /* Happens only for the first packet. Initialize the data */
294 if (!(tl->queue_count)) {
295 tl->total = 1;
296 tl->time_stamp = curr_time;
297 tl->queue_count = 1;
298 tl->head = 0;
299 tl->packet_count[0] = 1;
300 return MAX_TID_COUNT;
301 }
302
303 time_diff = TIME_WRAP_AROUND(tl->time_stamp, curr_time);
304 index = time_diff / TID_QUEUE_CELL_SPACING;
305
306 /* The history is too long: remove data that is older than */
307 /* TID_MAX_TIME_DIFF */
308 if (index >= TID_QUEUE_MAX_SIZE)
309 iwl4965_rs_tl_rm_old_stats(tl, curr_time);
310
311 index = (tl->head + index) % TID_QUEUE_MAX_SIZE;
312 tl->packet_count[index] = tl->packet_count[index] + 1;
313 tl->total = tl->total + 1;
314
315 if ((index + 1) > tl->queue_count)
316 tl->queue_count = index + 1;
317
318 return tid;
319}
320
321/*
322 get the traffic load value for tid
323*/
324static u32 iwl4965_rs_tl_get_load(struct iwl_lq_sta *lq_data, u8 tid)
325{
326 u32 curr_time = jiffies_to_msecs(jiffies);
327 u32 time_diff;
328 s32 index;
329 struct iwl_traffic_load *tl = NULL;
330
331 if (tid >= TID_MAX_LOAD_COUNT)
332 return 0;
333
334 tl = &(lq_data->load[tid]);
335
336 curr_time -= curr_time % TID_ROUND_VALUE;
337
338 if (!(tl->queue_count))
339 return 0;
340
341 time_diff = TIME_WRAP_AROUND(tl->time_stamp, curr_time);
342 index = time_diff / TID_QUEUE_CELL_SPACING;
343
344 /* The history is too long: remove data that is older than */
345 /* TID_MAX_TIME_DIFF */
346 if (index >= TID_QUEUE_MAX_SIZE)
347 iwl4965_rs_tl_rm_old_stats(tl, curr_time);
348
349 return tl->total;
350}
351
352static int iwl4965_rs_tl_turn_on_agg_for_tid(struct iwl_priv *priv,
353 struct iwl_lq_sta *lq_data, u8 tid,
354 struct ieee80211_sta *sta)
355{
356 int ret = -EAGAIN;
357 u32 load;
358
359 load = iwl4965_rs_tl_get_load(lq_data, tid);
360
361 if (load > IWL_AGG_LOAD_THRESHOLD) {
362 IWL_DEBUG_HT(priv, "Starting Tx agg: STA: %pM tid: %d\n",
363 sta->addr, tid);
364 ret = ieee80211_start_tx_ba_session(sta, tid, 5000);
365 if (ret == -EAGAIN) {
366 /*
367 * driver and mac80211 is out of sync
368 * this might be cause by reloading firmware
369 * stop the tx ba session here
370 */
371 IWL_ERR(priv, "Fail start Tx agg on tid: %d\n",
372 tid);
373 ieee80211_stop_tx_ba_session(sta, tid);
374 }
375 } else {
376 IWL_ERR(priv, "Aggregation not enabled for tid %d "
377 "because load = %u\n", tid, load);
378 }
379 return ret;
380}
381
382static void iwl4965_rs_tl_turn_on_agg(struct iwl_priv *priv, u8 tid,
383 struct iwl_lq_sta *lq_data,
384 struct ieee80211_sta *sta)
385{
386 if (tid < TID_MAX_LOAD_COUNT)
387 iwl4965_rs_tl_turn_on_agg_for_tid(priv, lq_data, tid, sta);
388 else
389 IWL_ERR(priv, "tid exceeds max load count: %d/%d\n",
390 tid, TID_MAX_LOAD_COUNT);
391}
392
393static inline int iwl4965_get_iwl4965_num_of_ant_from_rate(u32 rate_n_flags)
394{
395 return !!(rate_n_flags & RATE_MCS_ANT_A_MSK) +
396 !!(rate_n_flags & RATE_MCS_ANT_B_MSK) +
397 !!(rate_n_flags & RATE_MCS_ANT_C_MSK);
398}
399
400/*
401 * Static function to get the expected throughput from an iwl_scale_tbl_info
402 * that wraps a NULL pointer check
403 */
404static s32
405iwl4965_get_expected_tpt(struct iwl_scale_tbl_info *tbl, int rs_index)
406{
407 if (tbl->expected_tpt)
408 return tbl->expected_tpt[rs_index];
409 return 0;
410}
411
412/**
413 * iwl4965_rs_collect_tx_data - Update the success/failure sliding window
414 *
415 * We keep a sliding window of the last 62 packets transmitted
416 * at this rate. window->data contains the bitmask of successful
417 * packets.
418 */
419static int iwl4965_rs_collect_tx_data(struct iwl_scale_tbl_info *tbl,
420 int scale_index, int attempts, int successes)
421{
422 struct iwl_rate_scale_data *window = NULL;
423 static const u64 mask = (((u64)1) << (IWL_RATE_MAX_WINDOW - 1));
424 s32 fail_count, tpt;
425
426 if (scale_index < 0 || scale_index >= IWL_RATE_COUNT)
427 return -EINVAL;
428
429 /* Select window for current tx bit rate */
430 window = &(tbl->win[scale_index]);
431
432 /* Get expected throughput */
433 tpt = iwl4965_get_expected_tpt(tbl, scale_index);
434
435 /*
436 * Keep track of only the latest 62 tx frame attempts in this rate's
437 * history window; anything older isn't really relevant any more.
438 * If we have filled up the sliding window, drop the oldest attempt;
439 * if the oldest attempt (highest bit in bitmap) shows "success",
440 * subtract "1" from the success counter (this is the main reason
441 * we keep these bitmaps!).
442 */
443 while (attempts > 0) {
444 if (window->counter >= IWL_RATE_MAX_WINDOW) {
445
446 /* remove earliest */
447 window->counter = IWL_RATE_MAX_WINDOW - 1;
448
449 if (window->data & mask) {
450 window->data &= ~mask;
451 window->success_counter--;
452 }
453 }
454
455 /* Increment frames-attempted counter */
456 window->counter++;
457
458 /* Shift bitmap by one frame to throw away oldest history */
459 window->data <<= 1;
460
461 /* Mark the most recent #successes attempts as successful */
462 if (successes > 0) {
463 window->success_counter++;
464 window->data |= 0x1;
465 successes--;
466 }
467
468 attempts--;
469 }
470
471 /* Calculate current success ratio, avoid divide-by-0! */
472 if (window->counter > 0)
473 window->success_ratio = 128 * (100 * window->success_counter)
474 / window->counter;
475 else
476 window->success_ratio = IWL_INVALID_VALUE;
477
478 fail_count = window->counter - window->success_counter;
479
480 /* Calculate average throughput, if we have enough history. */
481 if ((fail_count >= IWL_RATE_MIN_FAILURE_TH) ||
482 (window->success_counter >= IWL_RATE_MIN_SUCCESS_TH))
483 window->average_tpt = (window->success_ratio * tpt + 64) / 128;
484 else
485 window->average_tpt = IWL_INVALID_VALUE;
486
487 /* Tag this window as having been updated */
488 window->stamp = jiffies;
489
490 return 0;
491}
492
493/*
494 * Fill uCode API rate_n_flags field, based on "search" or "active" table.
495 */
496static u32 iwl4965_rate_n_flags_from_tbl(struct iwl_priv *priv,
497 struct iwl_scale_tbl_info *tbl,
498 int index, u8 use_green)
499{
500 u32 rate_n_flags = 0;
501
502 if (is_legacy(tbl->lq_type)) {
503 rate_n_flags = iwlegacy_rates[index].plcp;
504 if (index >= IWL_FIRST_CCK_RATE && index <= IWL_LAST_CCK_RATE)
505 rate_n_flags |= RATE_MCS_CCK_MSK;
506
507 } else if (is_Ht(tbl->lq_type)) {
508 if (index > IWL_LAST_OFDM_RATE) {
509 IWL_ERR(priv, "Invalid HT rate index %d\n", index);
510 index = IWL_LAST_OFDM_RATE;
511 }
512 rate_n_flags = RATE_MCS_HT_MSK;
513
514 if (is_siso(tbl->lq_type))
515 rate_n_flags |= iwlegacy_rates[index].plcp_siso;
516 else
517 rate_n_flags |= iwlegacy_rates[index].plcp_mimo2;
518 } else {
519 IWL_ERR(priv, "Invalid tbl->lq_type %d\n", tbl->lq_type);
520 }
521
522 rate_n_flags |= ((tbl->ant_type << RATE_MCS_ANT_POS) &
523 RATE_MCS_ANT_ABC_MSK);
524
525 if (is_Ht(tbl->lq_type)) {
526 if (tbl->is_ht40) {
527 if (tbl->is_dup)
528 rate_n_flags |= RATE_MCS_DUP_MSK;
529 else
530 rate_n_flags |= RATE_MCS_HT40_MSK;
531 }
532 if (tbl->is_SGI)
533 rate_n_flags |= RATE_MCS_SGI_MSK;
534
535 if (use_green) {
536 rate_n_flags |= RATE_MCS_GF_MSK;
537 if (is_siso(tbl->lq_type) && tbl->is_SGI) {
538 rate_n_flags &= ~RATE_MCS_SGI_MSK;
539 IWL_ERR(priv, "GF was set with SGI:SISO\n");
540 }
541 }
542 }
543 return rate_n_flags;
544}
545
546/*
547 * Interpret uCode API's rate_n_flags format,
548 * fill "search" or "active" tx mode table.
549 */
550static int iwl4965_rs_get_tbl_info_from_mcs(const u32 rate_n_flags,
551 enum ieee80211_band band,
552 struct iwl_scale_tbl_info *tbl,
553 int *rate_idx)
554{
555 u32 ant_msk = (rate_n_flags & RATE_MCS_ANT_ABC_MSK);
556 u8 iwl4965_num_of_ant = iwl4965_get_iwl4965_num_of_ant_from_rate(rate_n_flags);
557 u8 mcs;
558
559 memset(tbl, 0, sizeof(struct iwl_scale_tbl_info));
560 *rate_idx = iwl4965_hwrate_to_plcp_idx(rate_n_flags);
561
562 if (*rate_idx == IWL_RATE_INVALID) {
563 *rate_idx = -1;
564 return -EINVAL;
565 }
566 tbl->is_SGI = 0; /* default legacy setup */
567 tbl->is_ht40 = 0;
568 tbl->is_dup = 0;
569 tbl->ant_type = (ant_msk >> RATE_MCS_ANT_POS);
570 tbl->lq_type = LQ_NONE;
571 tbl->max_search = IWL_MAX_SEARCH;
572
573 /* legacy rate format */
574 if (!(rate_n_flags & RATE_MCS_HT_MSK)) {
575 if (iwl4965_num_of_ant == 1) {
576 if (band == IEEE80211_BAND_5GHZ)
577 tbl->lq_type = LQ_A;
578 else
579 tbl->lq_type = LQ_G;
580 }
581 /* HT rate format */
582 } else {
583 if (rate_n_flags & RATE_MCS_SGI_MSK)
584 tbl->is_SGI = 1;
585
586 if ((rate_n_flags & RATE_MCS_HT40_MSK) ||
587 (rate_n_flags & RATE_MCS_DUP_MSK))
588 tbl->is_ht40 = 1;
589
590 if (rate_n_flags & RATE_MCS_DUP_MSK)
591 tbl->is_dup = 1;
592
593 mcs = iwl4965_rs_extract_rate(rate_n_flags);
594
595 /* SISO */
596 if (mcs <= IWL_RATE_SISO_60M_PLCP) {
597 if (iwl4965_num_of_ant == 1)
598 tbl->lq_type = LQ_SISO; /*else NONE*/
599 /* MIMO2 */
600 } else {
601 if (iwl4965_num_of_ant == 2)
602 tbl->lq_type = LQ_MIMO2;
603 }
604 }
605 return 0;
606}
607
608/* switch to another antenna/antennas and return 1 */
609/* if no other valid antenna found, return 0 */
610static int iwl4965_rs_toggle_antenna(u32 valid_ant, u32 *rate_n_flags,
611 struct iwl_scale_tbl_info *tbl)
612{
613 u8 new_ant_type;
614
615 if (!tbl->ant_type || tbl->ant_type > ANT_ABC)
616 return 0;
617
618 if (!iwl4965_rs_is_valid_ant(valid_ant, tbl->ant_type))
619 return 0;
620
621 new_ant_type = ant_toggle_lookup[tbl->ant_type];
622
623 while ((new_ant_type != tbl->ant_type) &&
624 !iwl4965_rs_is_valid_ant(valid_ant, new_ant_type))
625 new_ant_type = ant_toggle_lookup[new_ant_type];
626
627 if (new_ant_type == tbl->ant_type)
628 return 0;
629
630 tbl->ant_type = new_ant_type;
631 *rate_n_flags &= ~RATE_MCS_ANT_ABC_MSK;
632 *rate_n_flags |= new_ant_type << RATE_MCS_ANT_POS;
633 return 1;
634}
635
636/**
637 * Green-field mode is valid if the station supports it and
638 * there are no non-GF stations present in the BSS.
639 */
640static bool iwl4965_rs_use_green(struct ieee80211_sta *sta)
641{
642 struct iwl_station_priv *sta_priv = (void *)sta->drv_priv;
643 struct iwl_rxon_context *ctx = sta_priv->common.ctx;
644
645 return (sta->ht_cap.cap & IEEE80211_HT_CAP_GRN_FLD) &&
646 !(ctx->ht.non_gf_sta_present);
647}
648
649/**
650 * iwl4965_rs_get_supported_rates - get the available rates
651 *
652 * if management frame or broadcast frame only return
653 * basic available rates.
654 *
655 */
656static u16 iwl4965_rs_get_supported_rates(struct iwl_lq_sta *lq_sta,
657 struct ieee80211_hdr *hdr,
658 enum iwl_table_type rate_type)
659{
660 if (is_legacy(rate_type)) {
661 return lq_sta->active_legacy_rate;
662 } else {
663 if (is_siso(rate_type))
664 return lq_sta->active_siso_rate;
665 else
666 return lq_sta->active_mimo2_rate;
667 }
668}
669
670static u16
671iwl4965_rs_get_adjacent_rate(struct iwl_priv *priv, u8 index, u16 rate_mask,
672 int rate_type)
673{
674 u8 high = IWL_RATE_INVALID;
675 u8 low = IWL_RATE_INVALID;
676
677 /* 802.11A or ht walks to the next literal adjacent rate in
678 * the rate table */
679 if (is_a_band(rate_type) || !is_legacy(rate_type)) {
680 int i;
681 u32 mask;
682
683 /* Find the previous rate that is in the rate mask */
684 i = index - 1;
685 for (mask = (1 << i); i >= 0; i--, mask >>= 1) {
686 if (rate_mask & mask) {
687 low = i;
688 break;
689 }
690 }
691
692 /* Find the next rate that is in the rate mask */
693 i = index + 1;
694 for (mask = (1 << i); i < IWL_RATE_COUNT; i++, mask <<= 1) {
695 if (rate_mask & mask) {
696 high = i;
697 break;
698 }
699 }
700
701 return (high << 8) | low;
702 }
703
704 low = index;
705 while (low != IWL_RATE_INVALID) {
706 low = iwlegacy_rates[low].prev_rs;
707 if (low == IWL_RATE_INVALID)
708 break;
709 if (rate_mask & (1 << low))
710 break;
711 IWL_DEBUG_RATE(priv, "Skipping masked lower rate: %d\n", low);
712 }
713
714 high = index;
715 while (high != IWL_RATE_INVALID) {
716 high = iwlegacy_rates[high].next_rs;
717 if (high == IWL_RATE_INVALID)
718 break;
719 if (rate_mask & (1 << high))
720 break;
721 IWL_DEBUG_RATE(priv, "Skipping masked higher rate: %d\n", high);
722 }
723
724 return (high << 8) | low;
725}
726
727static u32 iwl4965_rs_get_lower_rate(struct iwl_lq_sta *lq_sta,
728 struct iwl_scale_tbl_info *tbl,
729 u8 scale_index, u8 ht_possible)
730{
731 s32 low;
732 u16 rate_mask;
733 u16 high_low;
734 u8 switch_to_legacy = 0;
735 u8 is_green = lq_sta->is_green;
736 struct iwl_priv *priv = lq_sta->drv;
737
738 /* check if we need to switch from HT to legacy rates.
739 * assumption is that mandatory rates (1Mbps or 6Mbps)
740 * are always supported (spec demand) */
741 if (!is_legacy(tbl->lq_type) && (!ht_possible || !scale_index)) {
742 switch_to_legacy = 1;
743 scale_index = rs_ht_to_legacy[scale_index];
744 if (lq_sta->band == IEEE80211_BAND_5GHZ)
745 tbl->lq_type = LQ_A;
746 else
747 tbl->lq_type = LQ_G;
748
749 if (iwl4965_num_of_ant(tbl->ant_type) > 1)
750 tbl->ant_type =
751 iwl4965_first_antenna(priv->hw_params.valid_tx_ant);
752
753 tbl->is_ht40 = 0;
754 tbl->is_SGI = 0;
755 tbl->max_search = IWL_MAX_SEARCH;
756 }
757
758 rate_mask = iwl4965_rs_get_supported_rates(lq_sta, NULL, tbl->lq_type);
759
760 /* Mask with station rate restriction */
761 if (is_legacy(tbl->lq_type)) {
762 /* supp_rates has no CCK bits in A mode */
763 if (lq_sta->band == IEEE80211_BAND_5GHZ)
764 rate_mask = (u16)(rate_mask &
765 (lq_sta->supp_rates << IWL_FIRST_OFDM_RATE));
766 else
767 rate_mask = (u16)(rate_mask & lq_sta->supp_rates);
768 }
769
770 /* If we switched from HT to legacy, check current rate */
771 if (switch_to_legacy && (rate_mask & (1 << scale_index))) {
772 low = scale_index;
773 goto out;
774 }
775
776 high_low = iwl4965_rs_get_adjacent_rate(lq_sta->drv,
777 scale_index, rate_mask,
778 tbl->lq_type);
779 low = high_low & 0xff;
780
781 if (low == IWL_RATE_INVALID)
782 low = scale_index;
783
784out:
785 return iwl4965_rate_n_flags_from_tbl(lq_sta->drv, tbl, low, is_green);
786}
787
788/*
789 * Simple function to compare two rate scale table types
790 */
791static bool iwl4965_table_type_matches(struct iwl_scale_tbl_info *a,
792 struct iwl_scale_tbl_info *b)
793{
794 return (a->lq_type == b->lq_type) && (a->ant_type == b->ant_type) &&
795 (a->is_SGI == b->is_SGI);
796}
797
798/*
799 * mac80211 sends us Tx status
800 */
801static void
802iwl4965_rs_tx_status(void *priv_r, struct ieee80211_supported_band *sband,
803 struct ieee80211_sta *sta, void *priv_sta,
804 struct sk_buff *skb)
805{
806 int legacy_success;
807 int retries;
808 int rs_index, mac_index, i;
809 struct iwl_lq_sta *lq_sta = priv_sta;
810 struct iwl_link_quality_cmd *table;
811 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
812 struct iwl_priv *priv = (struct iwl_priv *)priv_r;
813 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
814 enum mac80211_rate_control_flags mac_flags;
815 u32 tx_rate;
816 struct iwl_scale_tbl_info tbl_type;
817 struct iwl_scale_tbl_info *curr_tbl, *other_tbl, *tmp_tbl;
818 struct iwl_station_priv *sta_priv = (void *)sta->drv_priv;
819 struct iwl_rxon_context *ctx = sta_priv->common.ctx;
820
821 IWL_DEBUG_RATE_LIMIT(priv,
822 "get frame ack response, update rate scale window\n");
823
824 /* Treat uninitialized rate scaling data same as non-existing. */
825 if (!lq_sta) {
826 IWL_DEBUG_RATE(priv, "Station rate scaling not created yet.\n");
827 return;
828 } else if (!lq_sta->drv) {
829 IWL_DEBUG_RATE(priv, "Rate scaling not initialized yet.\n");
830 return;
831 }
832
833 if (!ieee80211_is_data(hdr->frame_control) ||
834 info->flags & IEEE80211_TX_CTL_NO_ACK)
835 return;
836
837 /* This packet was aggregated but doesn't carry status info */
838 if ((info->flags & IEEE80211_TX_CTL_AMPDU) &&
839 !(info->flags & IEEE80211_TX_STAT_AMPDU))
840 return;
841
842 /*
843 * Ignore this Tx frame response if its initial rate doesn't match
844 * that of latest Link Quality command. There may be stragglers
845 * from a previous Link Quality command, but we're no longer interested
846 * in those; they're either from the "active" mode while we're trying
847 * to check "search" mode, or a prior "search" mode after we've moved
848 * to a new "search" mode (which might become the new "active" mode).
849 */
850 table = &lq_sta->lq;
851 tx_rate = le32_to_cpu(table->rs_table[0].rate_n_flags);
852 iwl4965_rs_get_tbl_info_from_mcs(tx_rate,
853 priv->band, &tbl_type, &rs_index);
854 if (priv->band == IEEE80211_BAND_5GHZ)
855 rs_index -= IWL_FIRST_OFDM_RATE;
856 mac_flags = info->status.rates[0].flags;
857 mac_index = info->status.rates[0].idx;
858 /* For HT packets, map MCS to PLCP */
859 if (mac_flags & IEEE80211_TX_RC_MCS) {
860 mac_index &= RATE_MCS_CODE_MSK; /* Remove # of streams */
861 if (mac_index >= (IWL_RATE_9M_INDEX - IWL_FIRST_OFDM_RATE))
862 mac_index++;
863 /*
864 * mac80211 HT index is always zero-indexed; we need to move
865 * HT OFDM rates after CCK rates in 2.4 GHz band
866 */
867 if (priv->band == IEEE80211_BAND_2GHZ)
868 mac_index += IWL_FIRST_OFDM_RATE;
869 }
870 /* Here we actually compare this rate to the latest LQ command */
871 if ((mac_index < 0) ||
872 (tbl_type.is_SGI !=
873 !!(mac_flags & IEEE80211_TX_RC_SHORT_GI)) ||
874 (tbl_type.is_ht40 !=
875 !!(mac_flags & IEEE80211_TX_RC_40_MHZ_WIDTH)) ||
876 (tbl_type.is_dup !=
877 !!(mac_flags & IEEE80211_TX_RC_DUP_DATA)) ||
878 (tbl_type.ant_type != info->antenna_sel_tx) ||
879 (!!(tx_rate & RATE_MCS_HT_MSK) !=
880 !!(mac_flags & IEEE80211_TX_RC_MCS)) ||
881 (!!(tx_rate & RATE_MCS_GF_MSK) !=
882 !!(mac_flags & IEEE80211_TX_RC_GREEN_FIELD)) ||
883 (rs_index != mac_index)) {
884 IWL_DEBUG_RATE(priv,
885 "initial rate %d does not match %d (0x%x)\n",
886 mac_index, rs_index, tx_rate);
887 /*
888 * Since rates mis-match, the last LQ command may have failed.
889 * After IWL_MISSED_RATE_MAX mis-matches, resync the uCode with
890 * ... driver.
891 */
892 lq_sta->missed_rate_counter++;
893 if (lq_sta->missed_rate_counter > IWL_MISSED_RATE_MAX) {
894 lq_sta->missed_rate_counter = 0;
895 iwl_legacy_send_lq_cmd(priv, ctx, &lq_sta->lq,
896 CMD_ASYNC, false);
897 }
898 /* Regardless, ignore this status info for outdated rate */
899 return;
900 } else
901 /* Rate did match, so reset the missed_rate_counter */
902 lq_sta->missed_rate_counter = 0;
903
904 /* Figure out if rate scale algorithm is in active or search table */
905 if (iwl4965_table_type_matches(&tbl_type,
906 &(lq_sta->lq_info[lq_sta->active_tbl]))) {
907 curr_tbl = &(lq_sta->lq_info[lq_sta->active_tbl]);
908 other_tbl = &(lq_sta->lq_info[1 - lq_sta->active_tbl]);
909 } else if (iwl4965_table_type_matches(&tbl_type,
910 &lq_sta->lq_info[1 - lq_sta->active_tbl])) {
911 curr_tbl = &(lq_sta->lq_info[1 - lq_sta->active_tbl]);
912 other_tbl = &(lq_sta->lq_info[lq_sta->active_tbl]);
913 } else {
914 IWL_DEBUG_RATE(priv,
915 "Neither active nor search matches tx rate\n");
916 tmp_tbl = &(lq_sta->lq_info[lq_sta->active_tbl]);
917 IWL_DEBUG_RATE(priv, "active- lq:%x, ant:%x, SGI:%d\n",
918 tmp_tbl->lq_type, tmp_tbl->ant_type, tmp_tbl->is_SGI);
919 tmp_tbl = &(lq_sta->lq_info[1 - lq_sta->active_tbl]);
920 IWL_DEBUG_RATE(priv, "search- lq:%x, ant:%x, SGI:%d\n",
921 tmp_tbl->lq_type, tmp_tbl->ant_type, tmp_tbl->is_SGI);
922 IWL_DEBUG_RATE(priv, "actual- lq:%x, ant:%x, SGI:%d\n",
923 tbl_type.lq_type, tbl_type.ant_type, tbl_type.is_SGI);
924 /*
925 * no matching table found, let's by-pass the data collection
926 * and continue to perform rate scale to find the rate table
927 */
928 iwl4965_rs_stay_in_table(lq_sta, true);
929 goto done;
930 }
931
932 /*
933 * Updating the frame history depends on whether packets were
934 * aggregated.
935 *
936 * For aggregation, all packets were transmitted at the same rate, the
937 * first index into rate scale table.
938 */
939 if (info->flags & IEEE80211_TX_STAT_AMPDU) {
940 tx_rate = le32_to_cpu(table->rs_table[0].rate_n_flags);
941 iwl4965_rs_get_tbl_info_from_mcs(tx_rate, priv->band, &tbl_type,
942 &rs_index);
943 iwl4965_rs_collect_tx_data(curr_tbl, rs_index,
944 info->status.ampdu_len,
945 info->status.ampdu_ack_len);
946
947 /* Update success/fail counts if not searching for new mode */
948 if (lq_sta->stay_in_tbl) {
949 lq_sta->total_success += info->status.ampdu_ack_len;
950 lq_sta->total_failed += (info->status.ampdu_len -
951 info->status.ampdu_ack_len);
952 }
953 } else {
954 /*
955 * For legacy, update frame history with for each Tx retry.
956 */
957 retries = info->status.rates[0].count - 1;
958 /* HW doesn't send more than 15 retries */
959 retries = min(retries, 15);
960
961 /* The last transmission may have been successful */
962 legacy_success = !!(info->flags & IEEE80211_TX_STAT_ACK);
963 /* Collect data for each rate used during failed TX attempts */
964 for (i = 0; i <= retries; ++i) {
965 tx_rate = le32_to_cpu(table->rs_table[i].rate_n_flags);
966 iwl4965_rs_get_tbl_info_from_mcs(tx_rate, priv->band,
967 &tbl_type, &rs_index);
968 /*
969 * Only collect stats if retried rate is in the same RS
970 * table as active/search.
971 */
972 if (iwl4965_table_type_matches(&tbl_type, curr_tbl))
973 tmp_tbl = curr_tbl;
974 else if (iwl4965_table_type_matches(&tbl_type,
975 other_tbl))
976 tmp_tbl = other_tbl;
977 else
978 continue;
979 iwl4965_rs_collect_tx_data(tmp_tbl, rs_index, 1,
980 i < retries ? 0 : legacy_success);
981 }
982
983 /* Update success/fail counts if not searching for new mode */
984 if (lq_sta->stay_in_tbl) {
985 lq_sta->total_success += legacy_success;
986 lq_sta->total_failed += retries + (1 - legacy_success);
987 }
988 }
989 /* The last TX rate is cached in lq_sta; it's set in if/else above */
990 lq_sta->last_rate_n_flags = tx_rate;
991done:
992 /* See if there's a better rate or modulation mode to try. */
993 if (sta && sta->supp_rates[sband->band])
994 iwl4965_rs_rate_scale_perform(priv, skb, sta, lq_sta);
995}
996
997/*
998 * Begin a period of staying with a selected modulation mode.
999 * Set "stay_in_tbl" flag to prevent any mode switches.
1000 * Set frame tx success limits according to legacy vs. high-throughput,
1001 * and reset overall (spanning all rates) tx success history statistics.
1002 * These control how long we stay using same modulation mode before
1003 * searching for a new mode.
1004 */
1005static void iwl4965_rs_set_stay_in_table(struct iwl_priv *priv, u8 is_legacy,
1006 struct iwl_lq_sta *lq_sta)
1007{
1008 IWL_DEBUG_RATE(priv, "we are staying in the same table\n");
1009 lq_sta->stay_in_tbl = 1; /* only place this gets set */
1010 if (is_legacy) {
1011 lq_sta->table_count_limit = IWL_LEGACY_TABLE_COUNT;
1012 lq_sta->max_failure_limit = IWL_LEGACY_FAILURE_LIMIT;
1013 lq_sta->max_success_limit = IWL_LEGACY_SUCCESS_LIMIT;
1014 } else {
1015 lq_sta->table_count_limit = IWL_NONE_LEGACY_TABLE_COUNT;
1016 lq_sta->max_failure_limit = IWL_NONE_LEGACY_FAILURE_LIMIT;
1017 lq_sta->max_success_limit = IWL_NONE_LEGACY_SUCCESS_LIMIT;
1018 }
1019 lq_sta->table_count = 0;
1020 lq_sta->total_failed = 0;
1021 lq_sta->total_success = 0;
1022 lq_sta->flush_timer = jiffies;
1023 lq_sta->action_counter = 0;
1024}
1025
1026/*
1027 * Find correct throughput table for given mode of modulation
1028 */
1029static void iwl4965_rs_set_expected_tpt_table(struct iwl_lq_sta *lq_sta,
1030 struct iwl_scale_tbl_info *tbl)
1031{
1032 /* Used to choose among HT tables */
1033 s32 (*ht_tbl_pointer)[IWL_RATE_COUNT];
1034
1035 /* Check for invalid LQ type */
1036 if (WARN_ON_ONCE(!is_legacy(tbl->lq_type) && !is_Ht(tbl->lq_type))) {
1037 tbl->expected_tpt = expected_tpt_legacy;
1038 return;
1039 }
1040
1041 /* Legacy rates have only one table */
1042 if (is_legacy(tbl->lq_type)) {
1043 tbl->expected_tpt = expected_tpt_legacy;
1044 return;
1045 }
1046
1047 /* Choose among many HT tables depending on number of streams
1048 * (SISO/MIMO2), channel width (20/40), SGI, and aggregation
1049 * status */
1050 if (is_siso(tbl->lq_type) && (!tbl->is_ht40 || lq_sta->is_dup))
1051 ht_tbl_pointer = expected_tpt_siso20MHz;
1052 else if (is_siso(tbl->lq_type))
1053 ht_tbl_pointer = expected_tpt_siso40MHz;
1054 else if (is_mimo2(tbl->lq_type) && (!tbl->is_ht40 || lq_sta->is_dup))
1055 ht_tbl_pointer = expected_tpt_mimo2_20MHz;
1056 else /* if (is_mimo2(tbl->lq_type)) <-- must be true */
1057 ht_tbl_pointer = expected_tpt_mimo2_40MHz;
1058
1059 if (!tbl->is_SGI && !lq_sta->is_agg) /* Normal */
1060 tbl->expected_tpt = ht_tbl_pointer[0];
1061 else if (tbl->is_SGI && !lq_sta->is_agg) /* SGI */
1062 tbl->expected_tpt = ht_tbl_pointer[1];
1063 else if (!tbl->is_SGI && lq_sta->is_agg) /* AGG */
1064 tbl->expected_tpt = ht_tbl_pointer[2];
1065 else /* AGG+SGI */
1066 tbl->expected_tpt = ht_tbl_pointer[3];
1067}
1068
1069/*
1070 * Find starting rate for new "search" high-throughput mode of modulation.
1071 * Goal is to find lowest expected rate (under perfect conditions) that is
1072 * above the current measured throughput of "active" mode, to give new mode
1073 * a fair chance to prove itself without too many challenges.
1074 *
1075 * This gets called when transitioning to more aggressive modulation
1076 * (i.e. legacy to SISO or MIMO, or SISO to MIMO), as well as less aggressive
1077 * (i.e. MIMO to SISO). When moving to MIMO, bit rate will typically need
1078 * to decrease to match "active" throughput. When moving from MIMO to SISO,
1079 * bit rate will typically need to increase, but not if performance was bad.
1080 */
1081static s32 iwl4965_rs_get_best_rate(struct iwl_priv *priv,
1082 struct iwl_lq_sta *lq_sta,
1083 struct iwl_scale_tbl_info *tbl, /* "search" */
1084 u16 rate_mask, s8 index)
1085{
1086 /* "active" values */
1087 struct iwl_scale_tbl_info *active_tbl =
1088 &(lq_sta->lq_info[lq_sta->active_tbl]);
1089 s32 active_sr = active_tbl->win[index].success_ratio;
1090 s32 active_tpt = active_tbl->expected_tpt[index];
1091
1092 /* expected "search" throughput */
1093 s32 *tpt_tbl = tbl->expected_tpt;
1094
1095 s32 new_rate, high, low, start_hi;
1096 u16 high_low;
1097 s8 rate = index;
1098
1099 new_rate = high = low = start_hi = IWL_RATE_INVALID;
1100
1101 for (; ;) {
1102 high_low = iwl4965_rs_get_adjacent_rate(priv, rate, rate_mask,
1103 tbl->lq_type);
1104
1105 low = high_low & 0xff;
1106 high = (high_low >> 8) & 0xff;
1107
1108 /*
1109 * Lower the "search" bit rate, to give new "search" mode
1110 * approximately the same throughput as "active" if:
1111 *
1112 * 1) "Active" mode has been working modestly well (but not
1113 * great), and expected "search" throughput (under perfect
1114 * conditions) at candidate rate is above the actual
1115 * measured "active" throughput (but less than expected
1116 * "active" throughput under perfect conditions).
1117 * OR
1118 * 2) "Active" mode has been working perfectly or very well
1119 * and expected "search" throughput (under perfect
1120 * conditions) at candidate rate is above expected
1121 * "active" throughput (under perfect conditions).
1122 */
1123 if ((((100 * tpt_tbl[rate]) > lq_sta->last_tpt) &&
1124 ((active_sr > IWL_RATE_DECREASE_TH) &&
1125 (active_sr <= IWL_RATE_HIGH_TH) &&
1126 (tpt_tbl[rate] <= active_tpt))) ||
1127 ((active_sr >= IWL_RATE_SCALE_SWITCH) &&
1128 (tpt_tbl[rate] > active_tpt))) {
1129
1130 /* (2nd or later pass)
1131 * If we've already tried to raise the rate, and are
1132 * now trying to lower it, use the higher rate. */
1133 if (start_hi != IWL_RATE_INVALID) {
1134 new_rate = start_hi;
1135 break;
1136 }
1137
1138 new_rate = rate;
1139
1140 /* Loop again with lower rate */
1141 if (low != IWL_RATE_INVALID)
1142 rate = low;
1143
1144 /* Lower rate not available, use the original */
1145 else
1146 break;
1147
1148 /* Else try to raise the "search" rate to match "active" */
1149 } else {
1150 /* (2nd or later pass)
1151 * If we've already tried to lower the rate, and are
1152 * now trying to raise it, use the lower rate. */
1153 if (new_rate != IWL_RATE_INVALID)
1154 break;
1155
1156 /* Loop again with higher rate */
1157 else if (high != IWL_RATE_INVALID) {
1158 start_hi = high;
1159 rate = high;
1160
1161 /* Higher rate not available, use the original */
1162 } else {
1163 new_rate = rate;
1164 break;
1165 }
1166 }
1167 }
1168
1169 return new_rate;
1170}
1171
1172/*
1173 * Set up search table for MIMO2
1174 */
1175static int iwl4965_rs_switch_to_mimo2(struct iwl_priv *priv,
1176 struct iwl_lq_sta *lq_sta,
1177 struct ieee80211_conf *conf,
1178 struct ieee80211_sta *sta,
1179 struct iwl_scale_tbl_info *tbl, int index)
1180{
1181 u16 rate_mask;
1182 s32 rate;
1183 s8 is_green = lq_sta->is_green;
1184 struct iwl_station_priv *sta_priv = (void *)sta->drv_priv;
1185 struct iwl_rxon_context *ctx = sta_priv->common.ctx;
1186
1187 if (!conf_is_ht(conf) || !sta->ht_cap.ht_supported)
1188 return -1;
1189
1190 if (((sta->ht_cap.cap & IEEE80211_HT_CAP_SM_PS) >> 2)
1191 == WLAN_HT_CAP_SM_PS_STATIC)
1192 return -1;
1193
1194 /* Need both Tx chains/antennas to support MIMO */
1195 if (priv->hw_params.tx_chains_num < 2)
1196 return -1;
1197
1198 IWL_DEBUG_RATE(priv, "LQ: try to switch to MIMO2\n");
1199
1200 tbl->lq_type = LQ_MIMO2;
1201 tbl->is_dup = lq_sta->is_dup;
1202 tbl->action = 0;
1203 tbl->max_search = IWL_MAX_SEARCH;
1204 rate_mask = lq_sta->active_mimo2_rate;
1205
1206 if (iwl_legacy_is_ht40_tx_allowed(priv, ctx, &sta->ht_cap))
1207 tbl->is_ht40 = 1;
1208 else
1209 tbl->is_ht40 = 0;
1210
1211 iwl4965_rs_set_expected_tpt_table(lq_sta, tbl);
1212
1213 rate = iwl4965_rs_get_best_rate(priv, lq_sta, tbl, rate_mask, index);
1214
1215 IWL_DEBUG_RATE(priv, "LQ: MIMO2 best rate %d mask %X\n",
1216 rate, rate_mask);
1217 if ((rate == IWL_RATE_INVALID) || !((1 << rate) & rate_mask)) {
1218 IWL_DEBUG_RATE(priv,
1219 "Can't switch with index %d rate mask %x\n",
1220 rate, rate_mask);
1221 return -1;
1222 }
1223 tbl->current_rate = iwl4965_rate_n_flags_from_tbl(priv,
1224 tbl, rate, is_green);
1225
1226 IWL_DEBUG_RATE(priv, "LQ: Switch to new mcs %X index is green %X\n",
1227 tbl->current_rate, is_green);
1228 return 0;
1229}
1230
1231/*
1232 * Set up search table for SISO
1233 */
1234static int iwl4965_rs_switch_to_siso(struct iwl_priv *priv,
1235 struct iwl_lq_sta *lq_sta,
1236 struct ieee80211_conf *conf,
1237 struct ieee80211_sta *sta,
1238 struct iwl_scale_tbl_info *tbl, int index)
1239{
1240 u16 rate_mask;
1241 u8 is_green = lq_sta->is_green;
1242 s32 rate;
1243 struct iwl_station_priv *sta_priv = (void *)sta->drv_priv;
1244 struct iwl_rxon_context *ctx = sta_priv->common.ctx;
1245
1246 if (!conf_is_ht(conf) || !sta->ht_cap.ht_supported)
1247 return -1;
1248
1249 IWL_DEBUG_RATE(priv, "LQ: try to switch to SISO\n");
1250
1251 tbl->is_dup = lq_sta->is_dup;
1252 tbl->lq_type = LQ_SISO;
1253 tbl->action = 0;
1254 tbl->max_search = IWL_MAX_SEARCH;
1255 rate_mask = lq_sta->active_siso_rate;
1256
1257 if (iwl_legacy_is_ht40_tx_allowed(priv, ctx, &sta->ht_cap))
1258 tbl->is_ht40 = 1;
1259 else
1260 tbl->is_ht40 = 0;
1261
1262 if (is_green)
1263 tbl->is_SGI = 0; /*11n spec: no SGI in SISO+Greenfield*/
1264
1265 iwl4965_rs_set_expected_tpt_table(lq_sta, tbl);
1266 rate = iwl4965_rs_get_best_rate(priv, lq_sta, tbl, rate_mask, index);
1267
1268 IWL_DEBUG_RATE(priv, "LQ: get best rate %d mask %X\n", rate, rate_mask);
1269 if ((rate == IWL_RATE_INVALID) || !((1 << rate) & rate_mask)) {
1270 IWL_DEBUG_RATE(priv,
1271 "can not switch with index %d rate mask %x\n",
1272 rate, rate_mask);
1273 return -1;
1274 }
1275 tbl->current_rate = iwl4965_rate_n_flags_from_tbl(priv,
1276 tbl, rate, is_green);
1277 IWL_DEBUG_RATE(priv, "LQ: Switch to new mcs %X index is green %X\n",
1278 tbl->current_rate, is_green);
1279 return 0;
1280}
1281
1282/*
1283 * Try to switch to new modulation mode from legacy
1284 */
1285static int iwl4965_rs_move_legacy_other(struct iwl_priv *priv,
1286 struct iwl_lq_sta *lq_sta,
1287 struct ieee80211_conf *conf,
1288 struct ieee80211_sta *sta,
1289 int index)
1290{
1291 struct iwl_scale_tbl_info *tbl = &(lq_sta->lq_info[lq_sta->active_tbl]);
1292 struct iwl_scale_tbl_info *search_tbl =
1293 &(lq_sta->lq_info[(1 - lq_sta->active_tbl)]);
1294 struct iwl_rate_scale_data *window = &(tbl->win[index]);
1295 u32 sz = (sizeof(struct iwl_scale_tbl_info) -
1296 (sizeof(struct iwl_rate_scale_data) * IWL_RATE_COUNT));
1297 u8 start_action;
1298 u8 valid_tx_ant = priv->hw_params.valid_tx_ant;
1299 u8 tx_chains_num = priv->hw_params.tx_chains_num;
1300 int ret = 0;
1301 u8 update_search_tbl_counter = 0;
1302
1303 tbl->action = IWL_LEGACY_SWITCH_SISO;
1304
1305 start_action = tbl->action;
1306 for (; ;) {
1307 lq_sta->action_counter++;
1308 switch (tbl->action) {
1309 case IWL_LEGACY_SWITCH_ANTENNA1:
1310 case IWL_LEGACY_SWITCH_ANTENNA2:
1311 IWL_DEBUG_RATE(priv, "LQ: Legacy toggle Antenna\n");
1312
1313 if ((tbl->action == IWL_LEGACY_SWITCH_ANTENNA1 &&
1314 tx_chains_num <= 1) ||
1315 (tbl->action == IWL_LEGACY_SWITCH_ANTENNA2 &&
1316 tx_chains_num <= 2))
1317 break;
1318
1319 /* Don't change antenna if success has been great */
1320 if (window->success_ratio >= IWL_RS_GOOD_RATIO)
1321 break;
1322
1323 /* Set up search table to try other antenna */
1324 memcpy(search_tbl, tbl, sz);
1325
1326 if (iwl4965_rs_toggle_antenna(valid_tx_ant,
1327 &search_tbl->current_rate, search_tbl)) {
1328 update_search_tbl_counter = 1;
1329 iwl4965_rs_set_expected_tpt_table(lq_sta,
1330 search_tbl);
1331 goto out;
1332 }
1333 break;
1334 case IWL_LEGACY_SWITCH_SISO:
1335 IWL_DEBUG_RATE(priv, "LQ: Legacy switch to SISO\n");
1336
1337 /* Set up search table to try SISO */
1338 memcpy(search_tbl, tbl, sz);
1339 search_tbl->is_SGI = 0;
1340 ret = iwl4965_rs_switch_to_siso(priv, lq_sta, conf, sta,
1341 search_tbl, index);
1342 if (!ret) {
1343 lq_sta->action_counter = 0;
1344 goto out;
1345 }
1346
1347 break;
1348 case IWL_LEGACY_SWITCH_MIMO2_AB:
1349 case IWL_LEGACY_SWITCH_MIMO2_AC:
1350 case IWL_LEGACY_SWITCH_MIMO2_BC:
1351 IWL_DEBUG_RATE(priv, "LQ: Legacy switch to MIMO2\n");
1352
1353 /* Set up search table to try MIMO */
1354 memcpy(search_tbl, tbl, sz);
1355 search_tbl->is_SGI = 0;
1356
1357 if (tbl->action == IWL_LEGACY_SWITCH_MIMO2_AB)
1358 search_tbl->ant_type = ANT_AB;
1359 else if (tbl->action == IWL_LEGACY_SWITCH_MIMO2_AC)
1360 search_tbl->ant_type = ANT_AC;
1361 else
1362 search_tbl->ant_type = ANT_BC;
1363
1364 if (!iwl4965_rs_is_valid_ant(valid_tx_ant,
1365 search_tbl->ant_type))
1366 break;
1367
1368 ret = iwl4965_rs_switch_to_mimo2(priv, lq_sta,
1369 conf, sta,
1370 search_tbl, index);
1371 if (!ret) {
1372 lq_sta->action_counter = 0;
1373 goto out;
1374 }
1375 break;
1376 }
1377 tbl->action++;
1378 if (tbl->action > IWL_LEGACY_SWITCH_MIMO2_BC)
1379 tbl->action = IWL_LEGACY_SWITCH_ANTENNA1;
1380
1381 if (tbl->action == start_action)
1382 break;
1383
1384 }
1385 search_tbl->lq_type = LQ_NONE;
1386 return 0;
1387
1388out:
1389 lq_sta->search_better_tbl = 1;
1390 tbl->action++;
1391 if (tbl->action > IWL_LEGACY_SWITCH_MIMO2_BC)
1392 tbl->action = IWL_LEGACY_SWITCH_ANTENNA1;
1393 if (update_search_tbl_counter)
1394 search_tbl->action = tbl->action;
1395 return 0;
1396
1397}
1398
1399/*
1400 * Try to switch to new modulation mode from SISO
1401 */
1402static int iwl4965_rs_move_siso_to_other(struct iwl_priv *priv,
1403 struct iwl_lq_sta *lq_sta,
1404 struct ieee80211_conf *conf,
1405 struct ieee80211_sta *sta, int index)
1406{
1407 u8 is_green = lq_sta->is_green;
1408 struct iwl_scale_tbl_info *tbl = &(lq_sta->lq_info[lq_sta->active_tbl]);
1409 struct iwl_scale_tbl_info *search_tbl =
1410 &(lq_sta->lq_info[(1 - lq_sta->active_tbl)]);
1411 struct iwl_rate_scale_data *window = &(tbl->win[index]);
1412 struct ieee80211_sta_ht_cap *ht_cap = &sta->ht_cap;
1413 u32 sz = (sizeof(struct iwl_scale_tbl_info) -
1414 (sizeof(struct iwl_rate_scale_data) * IWL_RATE_COUNT));
1415 u8 start_action;
1416 u8 valid_tx_ant = priv->hw_params.valid_tx_ant;
1417 u8 tx_chains_num = priv->hw_params.tx_chains_num;
1418 u8 update_search_tbl_counter = 0;
1419 int ret;
1420
1421 start_action = tbl->action;
1422
1423 for (;;) {
1424 lq_sta->action_counter++;
1425 switch (tbl->action) {
1426 case IWL_SISO_SWITCH_ANTENNA1:
1427 case IWL_SISO_SWITCH_ANTENNA2:
1428 IWL_DEBUG_RATE(priv, "LQ: SISO toggle Antenna\n");
1429 if ((tbl->action == IWL_SISO_SWITCH_ANTENNA1 &&
1430 tx_chains_num <= 1) ||
1431 (tbl->action == IWL_SISO_SWITCH_ANTENNA2 &&
1432 tx_chains_num <= 2))
1433 break;
1434
1435 if (window->success_ratio >= IWL_RS_GOOD_RATIO)
1436 break;
1437
1438 memcpy(search_tbl, tbl, sz);
1439 if (iwl4965_rs_toggle_antenna(valid_tx_ant,
1440 &search_tbl->current_rate, search_tbl)) {
1441 update_search_tbl_counter = 1;
1442 goto out;
1443 }
1444 break;
1445 case IWL_SISO_SWITCH_MIMO2_AB:
1446 case IWL_SISO_SWITCH_MIMO2_AC:
1447 case IWL_SISO_SWITCH_MIMO2_BC:
1448 IWL_DEBUG_RATE(priv, "LQ: SISO switch to MIMO2\n");
1449 memcpy(search_tbl, tbl, sz);
1450 search_tbl->is_SGI = 0;
1451
1452 if (tbl->action == IWL_SISO_SWITCH_MIMO2_AB)
1453 search_tbl->ant_type = ANT_AB;
1454 else if (tbl->action == IWL_SISO_SWITCH_MIMO2_AC)
1455 search_tbl->ant_type = ANT_AC;
1456 else
1457 search_tbl->ant_type = ANT_BC;
1458
1459 if (!iwl4965_rs_is_valid_ant(valid_tx_ant,
1460 search_tbl->ant_type))
1461 break;
1462
1463 ret = iwl4965_rs_switch_to_mimo2(priv, lq_sta,
1464 conf, sta,
1465 search_tbl, index);
1466 if (!ret)
1467 goto out;
1468 break;
1469 case IWL_SISO_SWITCH_GI:
1470 if (!tbl->is_ht40 && !(ht_cap->cap &
1471 IEEE80211_HT_CAP_SGI_20))
1472 break;
1473 if (tbl->is_ht40 && !(ht_cap->cap &
1474 IEEE80211_HT_CAP_SGI_40))
1475 break;
1476
1477 IWL_DEBUG_RATE(priv, "LQ: SISO toggle SGI/NGI\n");
1478
1479 memcpy(search_tbl, tbl, sz);
1480 if (is_green) {
1481 if (!tbl->is_SGI)
1482 break;
1483 else
1484 IWL_ERR(priv,
1485 "SGI was set in GF+SISO\n");
1486 }
1487 search_tbl->is_SGI = !tbl->is_SGI;
1488 iwl4965_rs_set_expected_tpt_table(lq_sta, search_tbl);
1489 if (tbl->is_SGI) {
1490 s32 tpt = lq_sta->last_tpt / 100;
1491 if (tpt >= search_tbl->expected_tpt[index])
1492 break;
1493 }
1494 search_tbl->current_rate =
1495 iwl4965_rate_n_flags_from_tbl(priv, search_tbl,
1496 index, is_green);
1497 update_search_tbl_counter = 1;
1498 goto out;
1499 }
1500 tbl->action++;
1501 if (tbl->action > IWL_SISO_SWITCH_GI)
1502 tbl->action = IWL_SISO_SWITCH_ANTENNA1;
1503
1504 if (tbl->action == start_action)
1505 break;
1506 }
1507 search_tbl->lq_type = LQ_NONE;
1508 return 0;
1509
1510 out:
1511 lq_sta->search_better_tbl = 1;
1512 tbl->action++;
1513 if (tbl->action > IWL_SISO_SWITCH_GI)
1514 tbl->action = IWL_SISO_SWITCH_ANTENNA1;
1515 if (update_search_tbl_counter)
1516 search_tbl->action = tbl->action;
1517
1518 return 0;
1519}
1520
1521/*
1522 * Try to switch to new modulation mode from MIMO2
1523 */
1524static int iwl4965_rs_move_mimo2_to_other(struct iwl_priv *priv,
1525 struct iwl_lq_sta *lq_sta,
1526 struct ieee80211_conf *conf,
1527 struct ieee80211_sta *sta, int index)
1528{
1529 s8 is_green = lq_sta->is_green;
1530 struct iwl_scale_tbl_info *tbl = &(lq_sta->lq_info[lq_sta->active_tbl]);
1531 struct iwl_scale_tbl_info *search_tbl =
1532 &(lq_sta->lq_info[(1 - lq_sta->active_tbl)]);
1533 struct iwl_rate_scale_data *window = &(tbl->win[index]);
1534 struct ieee80211_sta_ht_cap *ht_cap = &sta->ht_cap;
1535 u32 sz = (sizeof(struct iwl_scale_tbl_info) -
1536 (sizeof(struct iwl_rate_scale_data) * IWL_RATE_COUNT));
1537 u8 start_action;
1538 u8 valid_tx_ant = priv->hw_params.valid_tx_ant;
1539 u8 tx_chains_num = priv->hw_params.tx_chains_num;
1540 u8 update_search_tbl_counter = 0;
1541 int ret;
1542
1543 start_action = tbl->action;
1544 for (;;) {
1545 lq_sta->action_counter++;
1546 switch (tbl->action) {
1547 case IWL_MIMO2_SWITCH_ANTENNA1:
1548 case IWL_MIMO2_SWITCH_ANTENNA2:
1549 IWL_DEBUG_RATE(priv, "LQ: MIMO2 toggle Antennas\n");
1550
1551 if (tx_chains_num <= 2)
1552 break;
1553
1554 if (window->success_ratio >= IWL_RS_GOOD_RATIO)
1555 break;
1556
1557 memcpy(search_tbl, tbl, sz);
1558 if (iwl4965_rs_toggle_antenna(valid_tx_ant,
1559 &search_tbl->current_rate, search_tbl)) {
1560 update_search_tbl_counter = 1;
1561 goto out;
1562 }
1563 break;
1564 case IWL_MIMO2_SWITCH_SISO_A:
1565 case IWL_MIMO2_SWITCH_SISO_B:
1566 case IWL_MIMO2_SWITCH_SISO_C:
1567 IWL_DEBUG_RATE(priv, "LQ: MIMO2 switch to SISO\n");
1568
1569 /* Set up new search table for SISO */
1570 memcpy(search_tbl, tbl, sz);
1571
1572 if (tbl->action == IWL_MIMO2_SWITCH_SISO_A)
1573 search_tbl->ant_type = ANT_A;
1574 else if (tbl->action == IWL_MIMO2_SWITCH_SISO_B)
1575 search_tbl->ant_type = ANT_B;
1576 else
1577 search_tbl->ant_type = ANT_C;
1578
1579 if (!iwl4965_rs_is_valid_ant(valid_tx_ant,
1580 search_tbl->ant_type))
1581 break;
1582
1583 ret = iwl4965_rs_switch_to_siso(priv, lq_sta,
1584 conf, sta,
1585 search_tbl, index);
1586 if (!ret)
1587 goto out;
1588
1589 break;
1590
1591 case IWL_MIMO2_SWITCH_GI:
1592 if (!tbl->is_ht40 && !(ht_cap->cap &
1593 IEEE80211_HT_CAP_SGI_20))
1594 break;
1595 if (tbl->is_ht40 && !(ht_cap->cap &
1596 IEEE80211_HT_CAP_SGI_40))
1597 break;
1598
1599 IWL_DEBUG_RATE(priv, "LQ: MIMO2 toggle SGI/NGI\n");
1600
1601 /* Set up new search table for MIMO2 */
1602 memcpy(search_tbl, tbl, sz);
1603 search_tbl->is_SGI = !tbl->is_SGI;
1604 iwl4965_rs_set_expected_tpt_table(lq_sta, search_tbl);
1605 /*
1606 * If active table already uses the fastest possible
1607 * modulation (dual stream with short guard interval),
1608 * and it's working well, there's no need to look
1609 * for a better type of modulation!
1610 */
1611 if (tbl->is_SGI) {
1612 s32 tpt = lq_sta->last_tpt / 100;
1613 if (tpt >= search_tbl->expected_tpt[index])
1614 break;
1615 }
1616 search_tbl->current_rate =
1617 iwl4965_rate_n_flags_from_tbl(priv, search_tbl,
1618 index, is_green);
1619 update_search_tbl_counter = 1;
1620 goto out;
1621
1622 }
1623 tbl->action++;
1624 if (tbl->action > IWL_MIMO2_SWITCH_GI)
1625 tbl->action = IWL_MIMO2_SWITCH_ANTENNA1;
1626
1627 if (tbl->action == start_action)
1628 break;
1629 }
1630 search_tbl->lq_type = LQ_NONE;
1631 return 0;
1632 out:
1633 lq_sta->search_better_tbl = 1;
1634 tbl->action++;
1635 if (tbl->action > IWL_MIMO2_SWITCH_GI)
1636 tbl->action = IWL_MIMO2_SWITCH_ANTENNA1;
1637 if (update_search_tbl_counter)
1638 search_tbl->action = tbl->action;
1639
1640 return 0;
1641
1642}
1643
1644/*
1645 * Check whether we should continue using same modulation mode, or
1646 * begin search for a new mode, based on:
1647 * 1) # tx successes or failures while using this mode
1648 * 2) # times calling this function
1649 * 3) elapsed time in this mode (not used, for now)
1650 */
1651static void
1652iwl4965_rs_stay_in_table(struct iwl_lq_sta *lq_sta, bool force_search)
1653{
1654 struct iwl_scale_tbl_info *tbl;
1655 int i;
1656 int active_tbl;
1657 int flush_interval_passed = 0;
1658 struct iwl_priv *priv;
1659
1660 priv = lq_sta->drv;
1661 active_tbl = lq_sta->active_tbl;
1662
1663 tbl = &(lq_sta->lq_info[active_tbl]);
1664
1665 /* If we've been disallowing search, see if we should now allow it */
1666 if (lq_sta->stay_in_tbl) {
1667
1668 /* Elapsed time using current modulation mode */
1669 if (lq_sta->flush_timer)
1670 flush_interval_passed =
1671 time_after(jiffies,
1672 (unsigned long)(lq_sta->flush_timer +
1673 IWL_RATE_SCALE_FLUSH_INTVL));
1674
1675 /*
1676 * Check if we should allow search for new modulation mode.
1677 * If many frames have failed or succeeded, or we've used
1678 * this same modulation for a long time, allow search, and
1679 * reset history stats that keep track of whether we should
1680 * allow a new search. Also (below) reset all bitmaps and
1681 * stats in active history.
1682 */
1683 if (force_search ||
1684 (lq_sta->total_failed > lq_sta->max_failure_limit) ||
1685 (lq_sta->total_success > lq_sta->max_success_limit) ||
1686 ((!lq_sta->search_better_tbl) && (lq_sta->flush_timer)
1687 && (flush_interval_passed))) {
1688 IWL_DEBUG_RATE(priv, "LQ: stay is expired %d %d %d\n:",
1689 lq_sta->total_failed,
1690 lq_sta->total_success,
1691 flush_interval_passed);
1692
1693 /* Allow search for new mode */
1694 lq_sta->stay_in_tbl = 0; /* only place reset */
1695 lq_sta->total_failed = 0;
1696 lq_sta->total_success = 0;
1697 lq_sta->flush_timer = 0;
1698
1699 /*
1700 * Else if we've used this modulation mode enough repetitions
1701 * (regardless of elapsed time or success/failure), reset
1702 * history bitmaps and rate-specific stats for all rates in
1703 * active table.
1704 */
1705 } else {
1706 lq_sta->table_count++;
1707 if (lq_sta->table_count >=
1708 lq_sta->table_count_limit) {
1709 lq_sta->table_count = 0;
1710
1711 IWL_DEBUG_RATE(priv,
1712 "LQ: stay in table clear win\n");
1713 for (i = 0; i < IWL_RATE_COUNT; i++)
1714 iwl4965_rs_rate_scale_clear_window(
1715 &(tbl->win[i]));
1716 }
1717 }
1718
1719 /* If transitioning to allow "search", reset all history
1720 * bitmaps and stats in active table (this will become the new
1721 * "search" table). */
1722 if (!lq_sta->stay_in_tbl) {
1723 for (i = 0; i < IWL_RATE_COUNT; i++)
1724 iwl4965_rs_rate_scale_clear_window(
1725 &(tbl->win[i]));
1726 }
1727 }
1728}
1729
1730/*
1731 * setup rate table in uCode
1732 * return rate_n_flags as used in the table
1733 */
1734static u32 iwl4965_rs_update_rate_tbl(struct iwl_priv *priv,
1735 struct iwl_rxon_context *ctx,
1736 struct iwl_lq_sta *lq_sta,
1737 struct iwl_scale_tbl_info *tbl,
1738 int index, u8 is_green)
1739{
1740 u32 rate;
1741
1742 /* Update uCode's rate table. */
1743 rate = iwl4965_rate_n_flags_from_tbl(priv, tbl, index, is_green);
1744 iwl4965_rs_fill_link_cmd(priv, lq_sta, rate);
1745 iwl_legacy_send_lq_cmd(priv, ctx, &lq_sta->lq, CMD_ASYNC, false);
1746
1747 return rate;
1748}
1749
1750/*
1751 * Do rate scaling and search for new modulation mode.
1752 */
1753static void iwl4965_rs_rate_scale_perform(struct iwl_priv *priv,
1754 struct sk_buff *skb,
1755 struct ieee80211_sta *sta,
1756 struct iwl_lq_sta *lq_sta)
1757{
1758 struct ieee80211_hw *hw = priv->hw;
1759 struct ieee80211_conf *conf = &hw->conf;
1760 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
1761 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
1762 int low = IWL_RATE_INVALID;
1763 int high = IWL_RATE_INVALID;
1764 int index;
1765 int i;
1766 struct iwl_rate_scale_data *window = NULL;
1767 int current_tpt = IWL_INVALID_VALUE;
1768 int low_tpt = IWL_INVALID_VALUE;
1769 int high_tpt = IWL_INVALID_VALUE;
1770 u32 fail_count;
1771 s8 scale_action = 0;
1772 u16 rate_mask;
1773 u8 update_lq = 0;
1774 struct iwl_scale_tbl_info *tbl, *tbl1;
1775 u16 rate_scale_index_msk = 0;
1776 u32 rate;
1777 u8 is_green = 0;
1778 u8 active_tbl = 0;
1779 u8 done_search = 0;
1780 u16 high_low;
1781 s32 sr;
1782 u8 tid = MAX_TID_COUNT;
1783 struct iwl_tid_data *tid_data;
1784 struct iwl_station_priv *sta_priv = (void *)sta->drv_priv;
1785 struct iwl_rxon_context *ctx = sta_priv->common.ctx;
1786
1787 IWL_DEBUG_RATE(priv, "rate scale calculate new rate for skb\n");
1788
1789 /* Send management frames and NO_ACK data using lowest rate. */
1790 /* TODO: this could probably be improved.. */
1791 if (!ieee80211_is_data(hdr->frame_control) ||
1792 info->flags & IEEE80211_TX_CTL_NO_ACK)
1793 return;
1794
1795 if (!sta || !lq_sta)
1796 return;
1797
1798 lq_sta->supp_rates = sta->supp_rates[lq_sta->band];
1799
1800 tid = iwl4965_rs_tl_add_packet(lq_sta, hdr);
1801 if ((tid != MAX_TID_COUNT) && (lq_sta->tx_agg_tid_en & (1 << tid))) {
1802 tid_data = &priv->stations[lq_sta->lq.sta_id].tid[tid];
1803 if (tid_data->agg.state == IWL_AGG_OFF)
1804 lq_sta->is_agg = 0;
1805 else
1806 lq_sta->is_agg = 1;
1807 } else
1808 lq_sta->is_agg = 0;
1809
1810 /*
1811 * Select rate-scale / modulation-mode table to work with in
1812 * the rest of this function: "search" if searching for better
1813 * modulation mode, or "active" if doing rate scaling within a mode.
1814 */
1815 if (!lq_sta->search_better_tbl)
1816 active_tbl = lq_sta->active_tbl;
1817 else
1818 active_tbl = 1 - lq_sta->active_tbl;
1819
1820 tbl = &(lq_sta->lq_info[active_tbl]);
1821 if (is_legacy(tbl->lq_type))
1822 lq_sta->is_green = 0;
1823 else
1824 lq_sta->is_green = iwl4965_rs_use_green(sta);
1825 is_green = lq_sta->is_green;
1826
1827 /* current tx rate */
1828 index = lq_sta->last_txrate_idx;
1829
1830 IWL_DEBUG_RATE(priv, "Rate scale index %d for type %d\n", index,
1831 tbl->lq_type);
1832
1833 /* rates available for this association, and for modulation mode */
1834 rate_mask = iwl4965_rs_get_supported_rates(lq_sta, hdr, tbl->lq_type);
1835
1836 IWL_DEBUG_RATE(priv, "mask 0x%04X\n", rate_mask);
1837
1838 /* mask with station rate restriction */
1839 if (is_legacy(tbl->lq_type)) {
1840 if (lq_sta->band == IEEE80211_BAND_5GHZ)
1841 /* supp_rates has no CCK bits in A mode */
1842 rate_scale_index_msk = (u16) (rate_mask &
1843 (lq_sta->supp_rates << IWL_FIRST_OFDM_RATE));
1844 else
1845 rate_scale_index_msk = (u16) (rate_mask &
1846 lq_sta->supp_rates);
1847
1848 } else
1849 rate_scale_index_msk = rate_mask;
1850
1851 if (!rate_scale_index_msk)
1852 rate_scale_index_msk = rate_mask;
1853
1854 if (!((1 << index) & rate_scale_index_msk)) {
1855 IWL_ERR(priv, "Current Rate is not valid\n");
1856 if (lq_sta->search_better_tbl) {
1857 /* revert to active table if search table is not valid*/
1858 tbl->lq_type = LQ_NONE;
1859 lq_sta->search_better_tbl = 0;
1860 tbl = &(lq_sta->lq_info[lq_sta->active_tbl]);
1861 /* get "active" rate info */
1862 index = iwl4965_hwrate_to_plcp_idx(tbl->current_rate);
1863 rate = iwl4965_rs_update_rate_tbl(priv, ctx, lq_sta,
1864 tbl, index, is_green);
1865 }
1866 return;
1867 }
1868
1869 /* Get expected throughput table and history window for current rate */
1870 if (!tbl->expected_tpt) {
1871 IWL_ERR(priv, "tbl->expected_tpt is NULL\n");
1872 return;
1873 }
1874
1875 /* force user max rate if set by user */
1876 if ((lq_sta->max_rate_idx != -1) &&
1877 (lq_sta->max_rate_idx < index)) {
1878 index = lq_sta->max_rate_idx;
1879 update_lq = 1;
1880 window = &(tbl->win[index]);
1881 goto lq_update;
1882 }
1883
1884 window = &(tbl->win[index]);
1885
1886 /*
1887 * If there is not enough history to calculate actual average
1888 * throughput, keep analyzing results of more tx frames, without
1889 * changing rate or mode (bypass most of the rest of this function).
1890 * Set up new rate table in uCode only if old rate is not supported
1891 * in current association (use new rate found above).
1892 */
1893 fail_count = window->counter - window->success_counter;
1894 if ((fail_count < IWL_RATE_MIN_FAILURE_TH) &&
1895 (window->success_counter < IWL_RATE_MIN_SUCCESS_TH)) {
1896 IWL_DEBUG_RATE(priv, "LQ: still below TH. succ=%d total=%d "
1897 "for index %d\n",
1898 window->success_counter, window->counter, index);
1899
1900 /* Can't calculate this yet; not enough history */
1901 window->average_tpt = IWL_INVALID_VALUE;
1902
1903 /* Should we stay with this modulation mode,
1904 * or search for a new one? */
1905 iwl4965_rs_stay_in_table(lq_sta, false);
1906
1907 goto out;
1908 }
1909 /* Else we have enough samples; calculate estimate of
1910 * actual average throughput */
1911 if (window->average_tpt != ((window->success_ratio *
1912 tbl->expected_tpt[index] + 64) / 128)) {
1913 IWL_ERR(priv,
1914 "expected_tpt should have been calculated by now\n");
1915 window->average_tpt = ((window->success_ratio *
1916 tbl->expected_tpt[index] + 64) / 128);
1917 }
1918
1919 /* If we are searching for better modulation mode, check success. */
1920 if (lq_sta->search_better_tbl) {
1921 /* If good success, continue using the "search" mode;
1922 * no need to send new link quality command, since we're
1923 * continuing to use the setup that we've been trying. */
1924 if (window->average_tpt > lq_sta->last_tpt) {
1925
1926 IWL_DEBUG_RATE(priv, "LQ: SWITCHING TO NEW TABLE "
1927 "suc=%d cur-tpt=%d old-tpt=%d\n",
1928 window->success_ratio,
1929 window->average_tpt,
1930 lq_sta->last_tpt);
1931
1932 if (!is_legacy(tbl->lq_type))
1933 lq_sta->enable_counter = 1;
1934
1935 /* Swap tables; "search" becomes "active" */
1936 lq_sta->active_tbl = active_tbl;
1937 current_tpt = window->average_tpt;
1938
1939 /* Else poor success; go back to mode in "active" table */
1940 } else {
1941
1942 IWL_DEBUG_RATE(priv, "LQ: GOING BACK TO THE OLD TABLE "
1943 "suc=%d cur-tpt=%d old-tpt=%d\n",
1944 window->success_ratio,
1945 window->average_tpt,
1946 lq_sta->last_tpt);
1947
1948 /* Nullify "search" table */
1949 tbl->lq_type = LQ_NONE;
1950
1951 /* Revert to "active" table */
1952 active_tbl = lq_sta->active_tbl;
1953 tbl = &(lq_sta->lq_info[active_tbl]);
1954
1955 /* Revert to "active" rate and throughput info */
1956 index = iwl4965_hwrate_to_plcp_idx(tbl->current_rate);
1957 current_tpt = lq_sta->last_tpt;
1958
1959 /* Need to set up a new rate table in uCode */
1960 update_lq = 1;
1961 }
1962
1963 /* Either way, we've made a decision; modulation mode
1964 * search is done, allow rate adjustment next time. */
1965 lq_sta->search_better_tbl = 0;
1966 done_search = 1; /* Don't switch modes below! */
1967 goto lq_update;
1968 }
1969
1970 /* (Else) not in search of better modulation mode, try for better
1971 * starting rate, while staying in this mode. */
1972 high_low = iwl4965_rs_get_adjacent_rate(priv, index,
1973 rate_scale_index_msk,
1974 tbl->lq_type);
1975 low = high_low & 0xff;
1976 high = (high_low >> 8) & 0xff;
1977
1978 /* If user set max rate, dont allow higher than user constrain */
1979 if ((lq_sta->max_rate_idx != -1) &&
1980 (lq_sta->max_rate_idx < high))
1981 high = IWL_RATE_INVALID;
1982
1983 sr = window->success_ratio;
1984
1985 /* Collect measured throughputs for current and adjacent rates */
1986 current_tpt = window->average_tpt;
1987 if (low != IWL_RATE_INVALID)
1988 low_tpt = tbl->win[low].average_tpt;
1989 if (high != IWL_RATE_INVALID)
1990 high_tpt = tbl->win[high].average_tpt;
1991
1992 scale_action = 0;
1993
1994 /* Too many failures, decrease rate */
1995 if ((sr <= IWL_RATE_DECREASE_TH) || (current_tpt == 0)) {
1996 IWL_DEBUG_RATE(priv,
1997 "decrease rate because of low success_ratio\n");
1998 scale_action = -1;
1999
2000 /* No throughput measured yet for adjacent rates; try increase. */
2001 } else if ((low_tpt == IWL_INVALID_VALUE) &&
2002 (high_tpt == IWL_INVALID_VALUE)) {
2003
2004 if (high != IWL_RATE_INVALID && sr >= IWL_RATE_INCREASE_TH)
2005 scale_action = 1;
2006 else if (low != IWL_RATE_INVALID)
2007 scale_action = 0;
2008 }
2009
2010 /* Both adjacent throughputs are measured, but neither one has better
2011 * throughput; we're using the best rate, don't change it! */
2012 else if ((low_tpt != IWL_INVALID_VALUE) &&
2013 (high_tpt != IWL_INVALID_VALUE) &&
2014 (low_tpt < current_tpt) &&
2015 (high_tpt < current_tpt))
2016 scale_action = 0;
2017
2018 /* At least one adjacent rate's throughput is measured,
2019 * and may have better performance. */
2020 else {
2021 /* Higher adjacent rate's throughput is measured */
2022 if (high_tpt != IWL_INVALID_VALUE) {
2023 /* Higher rate has better throughput */
2024 if (high_tpt > current_tpt &&
2025 sr >= IWL_RATE_INCREASE_TH) {
2026 scale_action = 1;
2027 } else {
2028 scale_action = 0;
2029 }
2030
2031 /* Lower adjacent rate's throughput is measured */
2032 } else if (low_tpt != IWL_INVALID_VALUE) {
2033 /* Lower rate has better throughput */
2034 if (low_tpt > current_tpt) {
2035 IWL_DEBUG_RATE(priv,
2036 "decrease rate because of low tpt\n");
2037 scale_action = -1;
2038 } else if (sr >= IWL_RATE_INCREASE_TH) {
2039 scale_action = 1;
2040 }
2041 }
2042 }
2043
2044 /* Sanity check; asked for decrease, but success rate or throughput
2045 * has been good at old rate. Don't change it. */
2046 if ((scale_action == -1) && (low != IWL_RATE_INVALID) &&
2047 ((sr > IWL_RATE_HIGH_TH) ||
2048 (current_tpt > (100 * tbl->expected_tpt[low]))))
2049 scale_action = 0;
2050
2051 switch (scale_action) {
2052 case -1:
2053 /* Decrease starting rate, update uCode's rate table */
2054 if (low != IWL_RATE_INVALID) {
2055 update_lq = 1;
2056 index = low;
2057 }
2058
2059 break;
2060 case 1:
2061 /* Increase starting rate, update uCode's rate table */
2062 if (high != IWL_RATE_INVALID) {
2063 update_lq = 1;
2064 index = high;
2065 }
2066
2067 break;
2068 case 0:
2069 /* No change */
2070 default:
2071 break;
2072 }
2073
2074 IWL_DEBUG_RATE(priv, "choose rate scale index %d action %d low %d "
2075 "high %d type %d\n",
2076 index, scale_action, low, high, tbl->lq_type);
2077
2078lq_update:
2079 /* Replace uCode's rate table for the destination station. */
2080 if (update_lq)
2081 rate = iwl4965_rs_update_rate_tbl(priv, ctx, lq_sta,
2082 tbl, index, is_green);
2083
2084 /* Should we stay with this modulation mode,
2085 * or search for a new one? */
2086 iwl4965_rs_stay_in_table(lq_sta, false);
2087
2088 /*
2089 * Search for new modulation mode if we're:
2090 * 1) Not changing rates right now
2091 * 2) Not just finishing up a search
2092 * 3) Allowing a new search
2093 */
2094 if (!update_lq && !done_search &&
2095 !lq_sta->stay_in_tbl && window->counter) {
2096 /* Save current throughput to compare with "search" throughput*/
2097 lq_sta->last_tpt = current_tpt;
2098
2099 /* Select a new "search" modulation mode to try.
2100 * If one is found, set up the new "search" table. */
2101 if (is_legacy(tbl->lq_type))
2102 iwl4965_rs_move_legacy_other(priv, lq_sta,
2103 conf, sta, index);
2104 else if (is_siso(tbl->lq_type))
2105 iwl4965_rs_move_siso_to_other(priv, lq_sta,
2106 conf, sta, index);
2107 else /* (is_mimo2(tbl->lq_type)) */
2108 iwl4965_rs_move_mimo2_to_other(priv, lq_sta,
2109 conf, sta, index);
2110
2111 /* If new "search" mode was selected, set up in uCode table */
2112 if (lq_sta->search_better_tbl) {
2113 /* Access the "search" table, clear its history. */
2114 tbl = &(lq_sta->lq_info[(1 - lq_sta->active_tbl)]);
2115 for (i = 0; i < IWL_RATE_COUNT; i++)
2116 iwl4965_rs_rate_scale_clear_window(
2117 &(tbl->win[i]));
2118
2119 /* Use new "search" start rate */
2120 index = iwl4965_hwrate_to_plcp_idx(tbl->current_rate);
2121
2122 IWL_DEBUG_RATE(priv,
2123 "Switch current mcs: %X index: %d\n",
2124 tbl->current_rate, index);
2125 iwl4965_rs_fill_link_cmd(priv, lq_sta,
2126 tbl->current_rate);
2127 iwl_legacy_send_lq_cmd(priv, ctx,
2128 &lq_sta->lq, CMD_ASYNC, false);
2129 } else
2130 done_search = 1;
2131 }
2132
2133 if (done_search && !lq_sta->stay_in_tbl) {
2134 /* If the "active" (non-search) mode was legacy,
2135 * and we've tried switching antennas,
2136 * but we haven't been able to try HT modes (not available),
2137 * stay with best antenna legacy modulation for a while
2138 * before next round of mode comparisons. */
2139 tbl1 = &(lq_sta->lq_info[lq_sta->active_tbl]);
2140 if (is_legacy(tbl1->lq_type) && !conf_is_ht(conf) &&
2141 lq_sta->action_counter > tbl1->max_search) {
2142 IWL_DEBUG_RATE(priv, "LQ: STAY in legacy table\n");
2143 iwl4965_rs_set_stay_in_table(priv, 1, lq_sta);
2144 }
2145
2146 /* If we're in an HT mode, and all 3 mode switch actions
2147 * have been tried and compared, stay in this best modulation
2148 * mode for a while before next round of mode comparisons. */
2149 if (lq_sta->enable_counter &&
2150 (lq_sta->action_counter >= tbl1->max_search)) {
2151 if ((lq_sta->last_tpt > IWL_AGG_TPT_THREHOLD) &&
2152 (lq_sta->tx_agg_tid_en & (1 << tid)) &&
2153 (tid != MAX_TID_COUNT)) {
2154 tid_data =
2155 &priv->stations[lq_sta->lq.sta_id].tid[tid];
2156 if (tid_data->agg.state == IWL_AGG_OFF) {
2157 IWL_DEBUG_RATE(priv,
2158 "try to aggregate tid %d\n",
2159 tid);
2160 iwl4965_rs_tl_turn_on_agg(priv, tid,
2161 lq_sta, sta);
2162 }
2163 }
2164 iwl4965_rs_set_stay_in_table(priv, 0, lq_sta);
2165 }
2166 }
2167
2168out:
2169 tbl->current_rate = iwl4965_rate_n_flags_from_tbl(priv, tbl,
2170 index, is_green);
2171 i = index;
2172 lq_sta->last_txrate_idx = i;
2173}
2174
2175/**
2176 * iwl4965_rs_initialize_lq - Initialize a station's hardware rate table
2177 *
2178 * The uCode's station table contains a table of fallback rates
2179 * for automatic fallback during transmission.
2180 *
2181 * NOTE: This sets up a default set of values. These will be replaced later
2182 * if the driver's iwl-4965-rs rate scaling algorithm is used, instead of
2183 * rc80211_simple.
2184 *
2185 * NOTE: Run REPLY_ADD_STA command to set up station table entry, before
2186 * calling this function (which runs REPLY_TX_LINK_QUALITY_CMD,
2187 * which requires station table entry to exist).
2188 */
2189static void iwl4965_rs_initialize_lq(struct iwl_priv *priv,
2190 struct ieee80211_conf *conf,
2191 struct ieee80211_sta *sta,
2192 struct iwl_lq_sta *lq_sta)
2193{
2194 struct iwl_scale_tbl_info *tbl;
2195 int rate_idx;
2196 int i;
2197 u32 rate;
2198 u8 use_green = iwl4965_rs_use_green(sta);
2199 u8 active_tbl = 0;
2200 u8 valid_tx_ant;
2201 struct iwl_station_priv *sta_priv;
2202 struct iwl_rxon_context *ctx;
2203
2204 if (!sta || !lq_sta)
2205 return;
2206
2207 sta_priv = (void *)sta->drv_priv;
2208 ctx = sta_priv->common.ctx;
2209
2210 i = lq_sta->last_txrate_idx;
2211
2212 valid_tx_ant = priv->hw_params.valid_tx_ant;
2213
2214 if (!lq_sta->search_better_tbl)
2215 active_tbl = lq_sta->active_tbl;
2216 else
2217 active_tbl = 1 - lq_sta->active_tbl;
2218
2219 tbl = &(lq_sta->lq_info[active_tbl]);
2220
2221 if ((i < 0) || (i >= IWL_RATE_COUNT))
2222 i = 0;
2223
2224 rate = iwlegacy_rates[i].plcp;
2225 tbl->ant_type = iwl4965_first_antenna(valid_tx_ant);
2226 rate |= tbl->ant_type << RATE_MCS_ANT_POS;
2227
2228 if (i >= IWL_FIRST_CCK_RATE && i <= IWL_LAST_CCK_RATE)
2229 rate |= RATE_MCS_CCK_MSK;
2230
2231 iwl4965_rs_get_tbl_info_from_mcs(rate, priv->band, tbl, &rate_idx);
2232 if (!iwl4965_rs_is_valid_ant(valid_tx_ant, tbl->ant_type))
2233 iwl4965_rs_toggle_antenna(valid_tx_ant, &rate, tbl);
2234
2235 rate = iwl4965_rate_n_flags_from_tbl(priv, tbl, rate_idx, use_green);
2236 tbl->current_rate = rate;
2237 iwl4965_rs_set_expected_tpt_table(lq_sta, tbl);
2238 iwl4965_rs_fill_link_cmd(NULL, lq_sta, rate);
2239 priv->stations[lq_sta->lq.sta_id].lq = &lq_sta->lq;
2240 iwl_legacy_send_lq_cmd(priv, ctx, &lq_sta->lq, CMD_SYNC, true);
2241}
2242
2243static void
2244iwl4965_rs_get_rate(void *priv_r, struct ieee80211_sta *sta, void *priv_sta,
2245 struct ieee80211_tx_rate_control *txrc)
2246{
2247
2248 struct sk_buff *skb = txrc->skb;
2249 struct ieee80211_supported_band *sband = txrc->sband;
2250 struct iwl_priv *priv __maybe_unused = (struct iwl_priv *)priv_r;
2251 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
2252 struct iwl_lq_sta *lq_sta = priv_sta;
2253 int rate_idx;
2254
2255 IWL_DEBUG_RATE_LIMIT(priv, "rate scale calculate new rate for skb\n");
2256
2257 /* Get max rate if user set max rate */
2258 if (lq_sta) {
2259 lq_sta->max_rate_idx = txrc->max_rate_idx;
2260 if ((sband->band == IEEE80211_BAND_5GHZ) &&
2261 (lq_sta->max_rate_idx != -1))
2262 lq_sta->max_rate_idx += IWL_FIRST_OFDM_RATE;
2263 if ((lq_sta->max_rate_idx < 0) ||
2264 (lq_sta->max_rate_idx >= IWL_RATE_COUNT))
2265 lq_sta->max_rate_idx = -1;
2266 }
2267
2268 /* Treat uninitialized rate scaling data same as non-existing. */
2269 if (lq_sta && !lq_sta->drv) {
2270 IWL_DEBUG_RATE(priv, "Rate scaling not initialized yet.\n");
2271 priv_sta = NULL;
2272 }
2273
2274 /* Send management frames and NO_ACK data using lowest rate. */
2275 if (rate_control_send_low(sta, priv_sta, txrc))
2276 return;
2277
2278 if (!lq_sta)
2279 return;
2280
2281 rate_idx = lq_sta->last_txrate_idx;
2282
2283 if (lq_sta->last_rate_n_flags & RATE_MCS_HT_MSK) {
2284 rate_idx -= IWL_FIRST_OFDM_RATE;
2285 /* 6M and 9M shared same MCS index */
2286 rate_idx = (rate_idx > 0) ? (rate_idx - 1) : 0;
2287 if (iwl4965_rs_extract_rate(lq_sta->last_rate_n_flags) >=
2288 IWL_RATE_MIMO2_6M_PLCP)
2289 rate_idx = rate_idx + MCS_INDEX_PER_STREAM;
2290 info->control.rates[0].flags = IEEE80211_TX_RC_MCS;
2291 if (lq_sta->last_rate_n_flags & RATE_MCS_SGI_MSK)
2292 info->control.rates[0].flags |=
2293 IEEE80211_TX_RC_SHORT_GI;
2294 if (lq_sta->last_rate_n_flags & RATE_MCS_DUP_MSK)
2295 info->control.rates[0].flags |=
2296 IEEE80211_TX_RC_DUP_DATA;
2297 if (lq_sta->last_rate_n_flags & RATE_MCS_HT40_MSK)
2298 info->control.rates[0].flags |=
2299 IEEE80211_TX_RC_40_MHZ_WIDTH;
2300 if (lq_sta->last_rate_n_flags & RATE_MCS_GF_MSK)
2301 info->control.rates[0].flags |=
2302 IEEE80211_TX_RC_GREEN_FIELD;
2303 } else {
2304 /* Check for invalid rates */
2305 if ((rate_idx < 0) || (rate_idx >= IWL_RATE_COUNT_LEGACY) ||
2306 ((sband->band == IEEE80211_BAND_5GHZ) &&
2307 (rate_idx < IWL_FIRST_OFDM_RATE)))
2308 rate_idx = rate_lowest_index(sband, sta);
2309 /* On valid 5 GHz rate, adjust index */
2310 else if (sband->band == IEEE80211_BAND_5GHZ)
2311 rate_idx -= IWL_FIRST_OFDM_RATE;
2312 info->control.rates[0].flags = 0;
2313 }
2314 info->control.rates[0].idx = rate_idx;
2315
2316}
2317
2318static void *iwl4965_rs_alloc_sta(void *priv_rate, struct ieee80211_sta *sta,
2319 gfp_t gfp)
2320{
2321 struct iwl_lq_sta *lq_sta;
2322 struct iwl_station_priv *sta_priv =
2323 (struct iwl_station_priv *) sta->drv_priv;
2324 struct iwl_priv *priv;
2325
2326 priv = (struct iwl_priv *)priv_rate;
2327 IWL_DEBUG_RATE(priv, "create station rate scale window\n");
2328
2329 lq_sta = &sta_priv->lq_sta;
2330
2331 return lq_sta;
2332}
2333
2334/*
2335 * Called after adding a new station to initialize rate scaling
2336 */
2337void
2338iwl4965_rs_rate_init(struct iwl_priv *priv,
2339 struct ieee80211_sta *sta,
2340 u8 sta_id)
2341{
2342 int i, j;
2343 struct ieee80211_hw *hw = priv->hw;
2344 struct ieee80211_conf *conf = &priv->hw->conf;
2345 struct ieee80211_sta_ht_cap *ht_cap = &sta->ht_cap;
2346 struct iwl_station_priv *sta_priv;
2347 struct iwl_lq_sta *lq_sta;
2348 struct ieee80211_supported_band *sband;
2349
2350 sta_priv = (struct iwl_station_priv *) sta->drv_priv;
2351 lq_sta = &sta_priv->lq_sta;
2352 sband = hw->wiphy->bands[conf->channel->band];
2353
2354
2355 lq_sta->lq.sta_id = sta_id;
2356
2357 for (j = 0; j < LQ_SIZE; j++)
2358 for (i = 0; i < IWL_RATE_COUNT; i++)
2359 iwl4965_rs_rate_scale_clear_window(
2360 &lq_sta->lq_info[j].win[i]);
2361
2362 lq_sta->flush_timer = 0;
2363 lq_sta->supp_rates = sta->supp_rates[sband->band];
2364 for (j = 0; j < LQ_SIZE; j++)
2365 for (i = 0; i < IWL_RATE_COUNT; i++)
2366 iwl4965_rs_rate_scale_clear_window(
2367 &lq_sta->lq_info[j].win[i]);
2368
2369 IWL_DEBUG_RATE(priv, "LQ:"
2370 "*** rate scale station global init for station %d ***\n",
2371 sta_id);
2372 /* TODO: what is a good starting rate for STA? About middle? Maybe not
2373 * the lowest or the highest rate.. Could consider using RSSI from
2374 * previous packets? Need to have IEEE 802.1X auth succeed immediately
2375 * after assoc.. */
2376
2377 lq_sta->is_dup = 0;
2378 lq_sta->max_rate_idx = -1;
2379 lq_sta->missed_rate_counter = IWL_MISSED_RATE_MAX;
2380 lq_sta->is_green = iwl4965_rs_use_green(sta);
2381 lq_sta->active_legacy_rate = priv->active_rate & ~(0x1000);
2382 lq_sta->band = priv->band;
2383 /*
2384 * active_siso_rate mask includes 9 MBits (bit 5), and CCK (bits 0-3),
2385 * supp_rates[] does not; shift to convert format, force 9 MBits off.
2386 */
2387 lq_sta->active_siso_rate = ht_cap->mcs.rx_mask[0] << 1;
2388 lq_sta->active_siso_rate |= ht_cap->mcs.rx_mask[0] & 0x1;
2389 lq_sta->active_siso_rate &= ~((u16)0x2);
2390 lq_sta->active_siso_rate <<= IWL_FIRST_OFDM_RATE;
2391
2392 /* Same here */
2393 lq_sta->active_mimo2_rate = ht_cap->mcs.rx_mask[1] << 1;
2394 lq_sta->active_mimo2_rate |= ht_cap->mcs.rx_mask[1] & 0x1;
2395 lq_sta->active_mimo2_rate &= ~((u16)0x2);
2396 lq_sta->active_mimo2_rate <<= IWL_FIRST_OFDM_RATE;
2397
2398 /* These values will be overridden later */
2399 lq_sta->lq.general_params.single_stream_ant_msk =
2400 iwl4965_first_antenna(priv->hw_params.valid_tx_ant);
2401 lq_sta->lq.general_params.dual_stream_ant_msk =
2402 priv->hw_params.valid_tx_ant &
2403 ~iwl4965_first_antenna(priv->hw_params.valid_tx_ant);
2404 if (!lq_sta->lq.general_params.dual_stream_ant_msk) {
2405 lq_sta->lq.general_params.dual_stream_ant_msk = ANT_AB;
2406 } else if (iwl4965_num_of_ant(priv->hw_params.valid_tx_ant) == 2) {
2407 lq_sta->lq.general_params.dual_stream_ant_msk =
2408 priv->hw_params.valid_tx_ant;
2409 }
2410
2411 /* as default allow aggregation for all tids */
2412 lq_sta->tx_agg_tid_en = IWL_AGG_ALL_TID;
2413 lq_sta->drv = priv;
2414
2415 /* Set last_txrate_idx to lowest rate */
2416 lq_sta->last_txrate_idx = rate_lowest_index(sband, sta);
2417 if (sband->band == IEEE80211_BAND_5GHZ)
2418 lq_sta->last_txrate_idx += IWL_FIRST_OFDM_RATE;
2419 lq_sta->is_agg = 0;
2420
2421#ifdef CONFIG_MAC80211_DEBUGFS
2422 lq_sta->dbg_fixed_rate = 0;
2423#endif
2424
2425 iwl4965_rs_initialize_lq(priv, conf, sta, lq_sta);
2426}
2427
2428static void iwl4965_rs_fill_link_cmd(struct iwl_priv *priv,
2429 struct iwl_lq_sta *lq_sta, u32 new_rate)
2430{
2431 struct iwl_scale_tbl_info tbl_type;
2432 int index = 0;
2433 int rate_idx;
2434 int repeat_rate = 0;
2435 u8 ant_toggle_cnt = 0;
2436 u8 use_ht_possible = 1;
2437 u8 valid_tx_ant = 0;
2438 struct iwl_link_quality_cmd *lq_cmd = &lq_sta->lq;
2439
2440 /* Override starting rate (index 0) if needed for debug purposes */
2441 iwl4965_rs_dbgfs_set_mcs(lq_sta, &new_rate, index);
2442
2443 /* Interpret new_rate (rate_n_flags) */
2444 iwl4965_rs_get_tbl_info_from_mcs(new_rate, lq_sta->band,
2445 &tbl_type, &rate_idx);
2446
2447 /* How many times should we repeat the initial rate? */
2448 if (is_legacy(tbl_type.lq_type)) {
2449 ant_toggle_cnt = 1;
2450 repeat_rate = IWL_NUMBER_TRY;
2451 } else {
2452 repeat_rate = IWL_HT_NUMBER_TRY;
2453 }
2454
2455 lq_cmd->general_params.mimo_delimiter =
2456 is_mimo(tbl_type.lq_type) ? 1 : 0;
2457
2458 /* Fill 1st table entry (index 0) */
2459 lq_cmd->rs_table[index].rate_n_flags = cpu_to_le32(new_rate);
2460
2461 if (iwl4965_num_of_ant(tbl_type.ant_type) == 1) {
2462 lq_cmd->general_params.single_stream_ant_msk =
2463 tbl_type.ant_type;
2464 } else if (iwl4965_num_of_ant(tbl_type.ant_type) == 2) {
2465 lq_cmd->general_params.dual_stream_ant_msk =
2466 tbl_type.ant_type;
2467 } /* otherwise we don't modify the existing value */
2468
2469 index++;
2470 repeat_rate--;
2471 if (priv)
2472 valid_tx_ant = priv->hw_params.valid_tx_ant;
2473
2474 /* Fill rest of rate table */
2475 while (index < LINK_QUAL_MAX_RETRY_NUM) {
2476 /* Repeat initial/next rate.
2477 * For legacy IWL_NUMBER_TRY == 1, this loop will not execute.
2478 * For HT IWL_HT_NUMBER_TRY == 3, this executes twice. */
2479 while (repeat_rate > 0 && (index < LINK_QUAL_MAX_RETRY_NUM)) {
2480 if (is_legacy(tbl_type.lq_type)) {
2481 if (ant_toggle_cnt < NUM_TRY_BEFORE_ANT_TOGGLE)
2482 ant_toggle_cnt++;
2483 else if (priv &&
2484 iwl4965_rs_toggle_antenna(valid_tx_ant,
2485 &new_rate, &tbl_type))
2486 ant_toggle_cnt = 1;
2487 }
2488
2489 /* Override next rate if needed for debug purposes */
2490 iwl4965_rs_dbgfs_set_mcs(lq_sta, &new_rate, index);
2491
2492 /* Fill next table entry */
2493 lq_cmd->rs_table[index].rate_n_flags =
2494 cpu_to_le32(new_rate);
2495 repeat_rate--;
2496 index++;
2497 }
2498
2499 iwl4965_rs_get_tbl_info_from_mcs(new_rate,
2500 lq_sta->band, &tbl_type,
2501 &rate_idx);
2502
2503 /* Indicate to uCode which entries might be MIMO.
2504 * If initial rate was MIMO, this will finally end up
2505 * as (IWL_HT_NUMBER_TRY * 2), after 2nd pass, otherwise 0. */
2506 if (is_mimo(tbl_type.lq_type))
2507 lq_cmd->general_params.mimo_delimiter = index;
2508
2509 /* Get next rate */
2510 new_rate = iwl4965_rs_get_lower_rate(lq_sta,
2511 &tbl_type, rate_idx,
2512 use_ht_possible);
2513
2514 /* How many times should we repeat the next rate? */
2515 if (is_legacy(tbl_type.lq_type)) {
2516 if (ant_toggle_cnt < NUM_TRY_BEFORE_ANT_TOGGLE)
2517 ant_toggle_cnt++;
2518 else if (priv &&
2519 iwl4965_rs_toggle_antenna(valid_tx_ant,
2520 &new_rate, &tbl_type))
2521 ant_toggle_cnt = 1;
2522
2523 repeat_rate = IWL_NUMBER_TRY;
2524 } else {
2525 repeat_rate = IWL_HT_NUMBER_TRY;
2526 }
2527
2528 /* Don't allow HT rates after next pass.
2529 * iwl4965_rs_get_lower_rate() will change type to LQ_A or LQ_G. */
2530 use_ht_possible = 0;
2531
2532 /* Override next rate if needed for debug purposes */
2533 iwl4965_rs_dbgfs_set_mcs(lq_sta, &new_rate, index);
2534
2535 /* Fill next table entry */
2536 lq_cmd->rs_table[index].rate_n_flags = cpu_to_le32(new_rate);
2537
2538 index++;
2539 repeat_rate--;
2540 }
2541
2542 lq_cmd->agg_params.agg_frame_cnt_limit = LINK_QUAL_AGG_FRAME_LIMIT_DEF;
2543 lq_cmd->agg_params.agg_dis_start_th = LINK_QUAL_AGG_DISABLE_START_DEF;
2544
2545 lq_cmd->agg_params.agg_time_limit =
2546 cpu_to_le16(LINK_QUAL_AGG_TIME_LIMIT_DEF);
2547}
2548
2549static void
2550*iwl4965_rs_alloc(struct ieee80211_hw *hw, struct dentry *debugfsdir)
2551{
2552 return hw->priv;
2553}
2554/* rate scale requires free function to be implemented */
2555static void iwl4965_rs_free(void *priv_rate)
2556{
2557 return;
2558}
2559
2560static void iwl4965_rs_free_sta(void *priv_r, struct ieee80211_sta *sta,
2561 void *priv_sta)
2562{
2563 struct iwl_priv *priv __maybe_unused = priv_r;
2564
2565 IWL_DEBUG_RATE(priv, "enter\n");
2566 IWL_DEBUG_RATE(priv, "leave\n");
2567}
2568
2569
2570#ifdef CONFIG_MAC80211_DEBUGFS
2571static int iwl4965_open_file_generic(struct inode *inode, struct file *file)
2572{
2573 file->private_data = inode->i_private;
2574 return 0;
2575}
2576static void iwl4965_rs_dbgfs_set_mcs(struct iwl_lq_sta *lq_sta,
2577 u32 *rate_n_flags, int index)
2578{
2579 struct iwl_priv *priv;
2580 u8 valid_tx_ant;
2581 u8 ant_sel_tx;
2582
2583 priv = lq_sta->drv;
2584 valid_tx_ant = priv->hw_params.valid_tx_ant;
2585 if (lq_sta->dbg_fixed_rate) {
2586 ant_sel_tx =
2587 ((lq_sta->dbg_fixed_rate & RATE_MCS_ANT_ABC_MSK)
2588 >> RATE_MCS_ANT_POS);
2589 if ((valid_tx_ant & ant_sel_tx) == ant_sel_tx) {
2590 *rate_n_flags = lq_sta->dbg_fixed_rate;
2591 IWL_DEBUG_RATE(priv, "Fixed rate ON\n");
2592 } else {
2593 lq_sta->dbg_fixed_rate = 0;
2594 IWL_ERR(priv,
2595 "Invalid antenna selection 0x%X, Valid is 0x%X\n",
2596 ant_sel_tx, valid_tx_ant);
2597 IWL_DEBUG_RATE(priv, "Fixed rate OFF\n");
2598 }
2599 } else {
2600 IWL_DEBUG_RATE(priv, "Fixed rate OFF\n");
2601 }
2602}
2603
2604static ssize_t iwl4965_rs_sta_dbgfs_scale_table_write(struct file *file,
2605 const char __user *user_buf, size_t count, loff_t *ppos)
2606{
2607 struct iwl_lq_sta *lq_sta = file->private_data;
2608 struct iwl_priv *priv;
2609 char buf[64];
2610 size_t buf_size;
2611 u32 parsed_rate;
2612 struct iwl_station_priv *sta_priv =
2613 container_of(lq_sta, struct iwl_station_priv, lq_sta);
2614 struct iwl_rxon_context *ctx = sta_priv->common.ctx;
2615
2616 priv = lq_sta->drv;
2617 memset(buf, 0, sizeof(buf));
2618 buf_size = min(count, sizeof(buf) - 1);
2619 if (copy_from_user(buf, user_buf, buf_size))
2620 return -EFAULT;
2621
2622 if (sscanf(buf, "%x", &parsed_rate) == 1)
2623 lq_sta->dbg_fixed_rate = parsed_rate;
2624 else
2625 lq_sta->dbg_fixed_rate = 0;
2626
2627 lq_sta->active_legacy_rate = 0x0FFF; /* 1 - 54 MBits, includes CCK */
2628 lq_sta->active_siso_rate = 0x1FD0; /* 6 - 60 MBits, no 9, no CCK */
2629 lq_sta->active_mimo2_rate = 0x1FD0; /* 6 - 60 MBits, no 9, no CCK */
2630
2631 IWL_DEBUG_RATE(priv, "sta_id %d rate 0x%X\n",
2632 lq_sta->lq.sta_id, lq_sta->dbg_fixed_rate);
2633
2634 if (lq_sta->dbg_fixed_rate) {
2635 iwl4965_rs_fill_link_cmd(NULL, lq_sta, lq_sta->dbg_fixed_rate);
2636 iwl_legacy_send_lq_cmd(lq_sta->drv, ctx, &lq_sta->lq, CMD_ASYNC,
2637 false);
2638 }
2639
2640 return count;
2641}
2642
2643static ssize_t iwl4965_rs_sta_dbgfs_scale_table_read(struct file *file,
2644 char __user *user_buf, size_t count, loff_t *ppos)
2645{
2646 char *buff;
2647 int desc = 0;
2648 int i = 0;
2649 int index = 0;
2650 ssize_t ret;
2651
2652 struct iwl_lq_sta *lq_sta = file->private_data;
2653 struct iwl_priv *priv;
2654 struct iwl_scale_tbl_info *tbl = &(lq_sta->lq_info[lq_sta->active_tbl]);
2655
2656 priv = lq_sta->drv;
2657 buff = kmalloc(1024, GFP_KERNEL);
2658 if (!buff)
2659 return -ENOMEM;
2660
2661 desc += sprintf(buff+desc, "sta_id %d\n", lq_sta->lq.sta_id);
2662 desc += sprintf(buff+desc, "failed=%d success=%d rate=0%X\n",
2663 lq_sta->total_failed, lq_sta->total_success,
2664 lq_sta->active_legacy_rate);
2665 desc += sprintf(buff+desc, "fixed rate 0x%X\n",
2666 lq_sta->dbg_fixed_rate);
2667 desc += sprintf(buff+desc, "valid_tx_ant %s%s%s\n",
2668 (priv->hw_params.valid_tx_ant & ANT_A) ? "ANT_A," : "",
2669 (priv->hw_params.valid_tx_ant & ANT_B) ? "ANT_B," : "",
2670 (priv->hw_params.valid_tx_ant & ANT_C) ? "ANT_C" : "");
2671 desc += sprintf(buff+desc, "lq type %s\n",
2672 (is_legacy(tbl->lq_type)) ? "legacy" : "HT");
2673 if (is_Ht(tbl->lq_type)) {
2674 desc += sprintf(buff+desc, " %s",
2675 (is_siso(tbl->lq_type)) ? "SISO" : "MIMO2");
2676 desc += sprintf(buff+desc, " %s",
2677 (tbl->is_ht40) ? "40MHz" : "20MHz");
2678 desc += sprintf(buff+desc, " %s %s %s\n",
2679 (tbl->is_SGI) ? "SGI" : "",
2680 (lq_sta->is_green) ? "GF enabled" : "",
2681 (lq_sta->is_agg) ? "AGG on" : "");
2682 }
2683 desc += sprintf(buff+desc, "last tx rate=0x%X\n",
2684 lq_sta->last_rate_n_flags);
2685 desc += sprintf(buff+desc, "general:"
2686 "flags=0x%X mimo-d=%d s-ant0x%x d-ant=0x%x\n",
2687 lq_sta->lq.general_params.flags,
2688 lq_sta->lq.general_params.mimo_delimiter,
2689 lq_sta->lq.general_params.single_stream_ant_msk,
2690 lq_sta->lq.general_params.dual_stream_ant_msk);
2691
2692 desc += sprintf(buff+desc, "agg:"
2693 "time_limit=%d dist_start_th=%d frame_cnt_limit=%d\n",
2694 le16_to_cpu(lq_sta->lq.agg_params.agg_time_limit),
2695 lq_sta->lq.agg_params.agg_dis_start_th,
2696 lq_sta->lq.agg_params.agg_frame_cnt_limit);
2697
2698 desc += sprintf(buff+desc,
2699 "Start idx [0]=0x%x [1]=0x%x [2]=0x%x [3]=0x%x\n",
2700 lq_sta->lq.general_params.start_rate_index[0],
2701 lq_sta->lq.general_params.start_rate_index[1],
2702 lq_sta->lq.general_params.start_rate_index[2],
2703 lq_sta->lq.general_params.start_rate_index[3]);
2704
2705 for (i = 0; i < LINK_QUAL_MAX_RETRY_NUM; i++) {
2706 index = iwl4965_hwrate_to_plcp_idx(
2707 le32_to_cpu(lq_sta->lq.rs_table[i].rate_n_flags));
2708 if (is_legacy(tbl->lq_type)) {
2709 desc += sprintf(buff+desc, " rate[%d] 0x%X %smbps\n",
2710 i,
2711 le32_to_cpu(lq_sta->lq.rs_table[i].rate_n_flags),
2712 iwl_rate_mcs[index].mbps);
2713 } else {
2714 desc += sprintf(buff+desc,
2715 " rate[%d] 0x%X %smbps (%s)\n",
2716 i,
2717 le32_to_cpu(lq_sta->lq.rs_table[i].rate_n_flags),
2718 iwl_rate_mcs[index].mbps, iwl_rate_mcs[index].mcs);
2719 }
2720 }
2721
2722 ret = simple_read_from_buffer(user_buf, count, ppos, buff, desc);
2723 kfree(buff);
2724 return ret;
2725}
2726
2727static const struct file_operations rs_sta_dbgfs_scale_table_ops = {
2728 .write = iwl4965_rs_sta_dbgfs_scale_table_write,
2729 .read = iwl4965_rs_sta_dbgfs_scale_table_read,
2730 .open = iwl4965_open_file_generic,
2731 .llseek = default_llseek,
2732};
2733static ssize_t iwl4965_rs_sta_dbgfs_stats_table_read(struct file *file,
2734 char __user *user_buf, size_t count, loff_t *ppos)
2735{
2736 char *buff;
2737 int desc = 0;
2738 int i, j;
2739 ssize_t ret;
2740
2741 struct iwl_lq_sta *lq_sta = file->private_data;
2742
2743 buff = kmalloc(1024, GFP_KERNEL);
2744 if (!buff)
2745 return -ENOMEM;
2746
2747 for (i = 0; i < LQ_SIZE; i++) {
2748 desc += sprintf(buff+desc,
2749 "%s type=%d SGI=%d HT40=%d DUP=%d GF=%d\n"
2750 "rate=0x%X\n",
2751 lq_sta->active_tbl == i ? "*" : "x",
2752 lq_sta->lq_info[i].lq_type,
2753 lq_sta->lq_info[i].is_SGI,
2754 lq_sta->lq_info[i].is_ht40,
2755 lq_sta->lq_info[i].is_dup,
2756 lq_sta->is_green,
2757 lq_sta->lq_info[i].current_rate);
2758 for (j = 0; j < IWL_RATE_COUNT; j++) {
2759 desc += sprintf(buff+desc,
2760 "counter=%d success=%d %%=%d\n",
2761 lq_sta->lq_info[i].win[j].counter,
2762 lq_sta->lq_info[i].win[j].success_counter,
2763 lq_sta->lq_info[i].win[j].success_ratio);
2764 }
2765 }
2766 ret = simple_read_from_buffer(user_buf, count, ppos, buff, desc);
2767 kfree(buff);
2768 return ret;
2769}
2770
2771static const struct file_operations rs_sta_dbgfs_stats_table_ops = {
2772 .read = iwl4965_rs_sta_dbgfs_stats_table_read,
2773 .open = iwl4965_open_file_generic,
2774 .llseek = default_llseek,
2775};
2776
2777static ssize_t iwl4965_rs_sta_dbgfs_rate_scale_data_read(struct file *file,
2778 char __user *user_buf, size_t count, loff_t *ppos)
2779{
2780 char buff[120];
2781 int desc = 0;
2782 ssize_t ret;
2783
2784 struct iwl_lq_sta *lq_sta = file->private_data;
2785 struct iwl_priv *priv;
2786 struct iwl_scale_tbl_info *tbl = &lq_sta->lq_info[lq_sta->active_tbl];
2787
2788 priv = lq_sta->drv;
2789
2790 if (is_Ht(tbl->lq_type))
2791 desc += sprintf(buff+desc,
2792 "Bit Rate= %d Mb/s\n",
2793 tbl->expected_tpt[lq_sta->last_txrate_idx]);
2794 else
2795 desc += sprintf(buff+desc,
2796 "Bit Rate= %d Mb/s\n",
2797 iwlegacy_rates[lq_sta->last_txrate_idx].ieee >> 1);
2798
2799 ret = simple_read_from_buffer(user_buf, count, ppos, buff, desc);
2800 return ret;
2801}
2802
2803static const struct file_operations rs_sta_dbgfs_rate_scale_data_ops = {
2804 .read = iwl4965_rs_sta_dbgfs_rate_scale_data_read,
2805 .open = iwl4965_open_file_generic,
2806 .llseek = default_llseek,
2807};
2808
2809static void iwl4965_rs_add_debugfs(void *priv, void *priv_sta,
2810 struct dentry *dir)
2811{
2812 struct iwl_lq_sta *lq_sta = priv_sta;
2813 lq_sta->rs_sta_dbgfs_scale_table_file =
2814 debugfs_create_file("rate_scale_table", S_IRUSR | S_IWUSR, dir,
2815 lq_sta, &rs_sta_dbgfs_scale_table_ops);
2816 lq_sta->rs_sta_dbgfs_stats_table_file =
2817 debugfs_create_file("rate_stats_table", S_IRUSR, dir,
2818 lq_sta, &rs_sta_dbgfs_stats_table_ops);
2819 lq_sta->rs_sta_dbgfs_rate_scale_data_file =
2820 debugfs_create_file("rate_scale_data", S_IRUSR, dir,
2821 lq_sta, &rs_sta_dbgfs_rate_scale_data_ops);
2822 lq_sta->rs_sta_dbgfs_tx_agg_tid_en_file =
2823 debugfs_create_u8("tx_agg_tid_enable", S_IRUSR | S_IWUSR, dir,
2824 &lq_sta->tx_agg_tid_en);
2825
2826}
2827
2828static void iwl4965_rs_remove_debugfs(void *priv, void *priv_sta)
2829{
2830 struct iwl_lq_sta *lq_sta = priv_sta;
2831 debugfs_remove(lq_sta->rs_sta_dbgfs_scale_table_file);
2832 debugfs_remove(lq_sta->rs_sta_dbgfs_stats_table_file);
2833 debugfs_remove(lq_sta->rs_sta_dbgfs_rate_scale_data_file);
2834 debugfs_remove(lq_sta->rs_sta_dbgfs_tx_agg_tid_en_file);
2835}
2836#endif
2837
2838/*
2839 * Initialization of rate scaling information is done by driver after
2840 * the station is added. Since mac80211 calls this function before a
2841 * station is added we ignore it.
2842 */
2843static void
2844iwl4965_rs_rate_init_stub(void *priv_r, struct ieee80211_supported_band *sband,
2845 struct ieee80211_sta *sta, void *priv_sta)
2846{
2847}
2848static struct rate_control_ops rs_4965_ops = {
2849 .module = NULL,
2850 .name = IWL4965_RS_NAME,
2851 .tx_status = iwl4965_rs_tx_status,
2852 .get_rate = iwl4965_rs_get_rate,
2853 .rate_init = iwl4965_rs_rate_init_stub,
2854 .alloc = iwl4965_rs_alloc,
2855 .free = iwl4965_rs_free,
2856 .alloc_sta = iwl4965_rs_alloc_sta,
2857 .free_sta = iwl4965_rs_free_sta,
2858#ifdef CONFIG_MAC80211_DEBUGFS
2859 .add_sta_debugfs = iwl4965_rs_add_debugfs,
2860 .remove_sta_debugfs = iwl4965_rs_remove_debugfs,
2861#endif
2862};
2863
2864int iwl4965_rate_control_register(void)
2865{
2866 return ieee80211_rate_control_register(&rs_4965_ops);
2867}
2868
2869void iwl4965_rate_control_unregister(void)
2870{
2871 ieee80211_rate_control_unregister(&rs_4965_ops);
2872}
diff --git a/drivers/net/wireless/iwlegacy/iwl-4965-rx.c b/drivers/net/wireless/iwlegacy/iwl-4965-rx.c
new file mode 100644
index 00000000000..2b144bbfc3c
--- /dev/null
+++ b/drivers/net/wireless/iwlegacy/iwl-4965-rx.c
@@ -0,0 +1,215 @@
1/******************************************************************************
2 *
3 * GPL LICENSE SUMMARY
4 *
5 * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of version 2 of the GNU General Public License as
9 * published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful, but
12 * WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
19 * USA
20 *
21 * The full GNU General Public License is included in this distribution
22 * in the file called LICENSE.GPL.
23 *
24 * Contact Information:
25 * Intel Linux Wireless <ilw@linux.intel.com>
26 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
27 *
28 *****************************************************************************/
29
30#include <linux/kernel.h>
31#include <linux/module.h>
32#include <linux/init.h>
33#include <linux/sched.h>
34
35#include "iwl-dev.h"
36#include "iwl-core.h"
37#include "iwl-4965-calib.h"
38#include "iwl-sta.h"
39#include "iwl-io.h"
40#include "iwl-helpers.h"
41#include "iwl-4965-hw.h"
42#include "iwl-4965.h"
43
44void iwl4965_rx_missed_beacon_notif(struct iwl_priv *priv,
45 struct iwl_rx_mem_buffer *rxb)
46
47{
48 struct iwl_rx_packet *pkt = rxb_addr(rxb);
49 struct iwl_missed_beacon_notif *missed_beacon;
50
51 missed_beacon = &pkt->u.missed_beacon;
52 if (le32_to_cpu(missed_beacon->consecutive_missed_beacons) >
53 priv->missed_beacon_threshold) {
54 IWL_DEBUG_CALIB(priv,
55 "missed bcn cnsq %d totl %d rcd %d expctd %d\n",
56 le32_to_cpu(missed_beacon->consecutive_missed_beacons),
57 le32_to_cpu(missed_beacon->total_missed_becons),
58 le32_to_cpu(missed_beacon->num_recvd_beacons),
59 le32_to_cpu(missed_beacon->num_expected_beacons));
60 if (!test_bit(STATUS_SCANNING, &priv->status))
61 iwl4965_init_sensitivity(priv);
62 }
63}
64
65/* Calculate noise level, based on measurements during network silence just
66 * before arriving beacon. This measurement can be done only if we know
67 * exactly when to expect beacons, therefore only when we're associated. */
68static void iwl4965_rx_calc_noise(struct iwl_priv *priv)
69{
70 struct statistics_rx_non_phy *rx_info;
71 int num_active_rx = 0;
72 int total_silence = 0;
73 int bcn_silence_a, bcn_silence_b, bcn_silence_c;
74 int last_rx_noise;
75
76 rx_info = &(priv->_4965.statistics.rx.general);
77 bcn_silence_a =
78 le32_to_cpu(rx_info->beacon_silence_rssi_a) & IN_BAND_FILTER;
79 bcn_silence_b =
80 le32_to_cpu(rx_info->beacon_silence_rssi_b) & IN_BAND_FILTER;
81 bcn_silence_c =
82 le32_to_cpu(rx_info->beacon_silence_rssi_c) & IN_BAND_FILTER;
83
84 if (bcn_silence_a) {
85 total_silence += bcn_silence_a;
86 num_active_rx++;
87 }
88 if (bcn_silence_b) {
89 total_silence += bcn_silence_b;
90 num_active_rx++;
91 }
92 if (bcn_silence_c) {
93 total_silence += bcn_silence_c;
94 num_active_rx++;
95 }
96
97 /* Average among active antennas */
98 if (num_active_rx)
99 last_rx_noise = (total_silence / num_active_rx) - 107;
100 else
101 last_rx_noise = IWL_NOISE_MEAS_NOT_AVAILABLE;
102
103 IWL_DEBUG_CALIB(priv, "inband silence a %u, b %u, c %u, dBm %d\n",
104 bcn_silence_a, bcn_silence_b, bcn_silence_c,
105 last_rx_noise);
106}
107
108#ifdef CONFIG_IWLWIFI_LEGACY_DEBUGFS
109/*
110 * based on the assumption of all statistics counter are in DWORD
111 * FIXME: This function is for debugging, do not deal with
112 * the case of counters roll-over.
113 */
114static void iwl4965_accumulative_statistics(struct iwl_priv *priv,
115 __le32 *stats)
116{
117 int i, size;
118 __le32 *prev_stats;
119 u32 *accum_stats;
120 u32 *delta, *max_delta;
121 struct statistics_general_common *general, *accum_general;
122 struct statistics_tx *tx, *accum_tx;
123
124 prev_stats = (__le32 *)&priv->_4965.statistics;
125 accum_stats = (u32 *)&priv->_4965.accum_statistics;
126 size = sizeof(struct iwl_notif_statistics);
127 general = &priv->_4965.statistics.general.common;
128 accum_general = &priv->_4965.accum_statistics.general.common;
129 tx = &priv->_4965.statistics.tx;
130 accum_tx = &priv->_4965.accum_statistics.tx;
131 delta = (u32 *)&priv->_4965.delta_statistics;
132 max_delta = (u32 *)&priv->_4965.max_delta;
133
134 for (i = sizeof(__le32); i < size;
135 i += sizeof(__le32), stats++, prev_stats++, delta++,
136 max_delta++, accum_stats++) {
137 if (le32_to_cpu(*stats) > le32_to_cpu(*prev_stats)) {
138 *delta = (le32_to_cpu(*stats) -
139 le32_to_cpu(*prev_stats));
140 *accum_stats += *delta;
141 if (*delta > *max_delta)
142 *max_delta = *delta;
143 }
144 }
145
146 /* reset accumulative statistics for "no-counter" type statistics */
147 accum_general->temperature = general->temperature;
148 accum_general->ttl_timestamp = general->ttl_timestamp;
149}
150#endif
151
152#define REG_RECALIB_PERIOD (60)
153
154void iwl4965_rx_statistics(struct iwl_priv *priv,
155 struct iwl_rx_mem_buffer *rxb)
156{
157 int change;
158 struct iwl_rx_packet *pkt = rxb_addr(rxb);
159
160 IWL_DEBUG_RX(priv,
161 "Statistics notification received (%d vs %d).\n",
162 (int)sizeof(struct iwl_notif_statistics),
163 le32_to_cpu(pkt->len_n_flags) &
164 FH_RSCSR_FRAME_SIZE_MSK);
165
166 change = ((priv->_4965.statistics.general.common.temperature !=
167 pkt->u.stats.general.common.temperature) ||
168 ((priv->_4965.statistics.flag &
169 STATISTICS_REPLY_FLG_HT40_MODE_MSK) !=
170 (pkt->u.stats.flag &
171 STATISTICS_REPLY_FLG_HT40_MODE_MSK)));
172#ifdef CONFIG_IWLWIFI_LEGACY_DEBUGFS
173 iwl4965_accumulative_statistics(priv, (__le32 *)&pkt->u.stats);
174#endif
175
176 /* TODO: reading some of statistics is unneeded */
177 memcpy(&priv->_4965.statistics, &pkt->u.stats,
178 sizeof(priv->_4965.statistics));
179
180 set_bit(STATUS_STATISTICS, &priv->status);
181
182 /* Reschedule the statistics timer to occur in
183 * REG_RECALIB_PERIOD seconds to ensure we get a
184 * thermal update even if the uCode doesn't give
185 * us one */
186 mod_timer(&priv->statistics_periodic, jiffies +
187 msecs_to_jiffies(REG_RECALIB_PERIOD * 1000));
188
189 if (unlikely(!test_bit(STATUS_SCANNING, &priv->status)) &&
190 (pkt->hdr.cmd == STATISTICS_NOTIFICATION)) {
191 iwl4965_rx_calc_noise(priv);
192 queue_work(priv->workqueue, &priv->run_time_calib_work);
193 }
194 if (priv->cfg->ops->lib->temp_ops.temperature && change)
195 priv->cfg->ops->lib->temp_ops.temperature(priv);
196}
197
198void iwl4965_reply_statistics(struct iwl_priv *priv,
199 struct iwl_rx_mem_buffer *rxb)
200{
201 struct iwl_rx_packet *pkt = rxb_addr(rxb);
202
203 if (le32_to_cpu(pkt->u.stats.flag) & UCODE_STATISTICS_CLEAR_MSK) {
204#ifdef CONFIG_IWLWIFI_LEGACY_DEBUGFS
205 memset(&priv->_4965.accum_statistics, 0,
206 sizeof(struct iwl_notif_statistics));
207 memset(&priv->_4965.delta_statistics, 0,
208 sizeof(struct iwl_notif_statistics));
209 memset(&priv->_4965.max_delta, 0,
210 sizeof(struct iwl_notif_statistics));
211#endif
212 IWL_DEBUG_RX(priv, "Statistics have been cleared\n");
213 }
214 iwl4965_rx_statistics(priv, rxb);
215}
diff --git a/drivers/net/wireless/iwlegacy/iwl-4965-sta.c b/drivers/net/wireless/iwlegacy/iwl-4965-sta.c
new file mode 100644
index 00000000000..a262c23553d
--- /dev/null
+++ b/drivers/net/wireless/iwlegacy/iwl-4965-sta.c
@@ -0,0 +1,721 @@
1/******************************************************************************
2 *
3 * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved.
4 *
5 * Portions of this file are derived from the ipw3945 project, as well
6 * as portions of the ieee80211 subsystem header files.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of version 2 of the GNU General Public License as
10 * published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it will be useful, but WITHOUT
13 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 * more details.
16 *
17 * You should have received a copy of the GNU General Public License along with
18 * this program; if not, write to the Free Software Foundation, Inc.,
19 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
20 *
21 * The full GNU General Public License is included in this distribution in the
22 * file called LICENSE.
23 *
24 * Contact Information:
25 * Intel Linux Wireless <ilw@linux.intel.com>
26 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
27 *
28 *****************************************************************************/
29
30#include <net/mac80211.h>
31
32#include "iwl-dev.h"
33#include "iwl-core.h"
34#include "iwl-sta.h"
35#include "iwl-4965.h"
36
37static struct iwl_link_quality_cmd *
38iwl4965_sta_alloc_lq(struct iwl_priv *priv, u8 sta_id)
39{
40 int i, r;
41 struct iwl_link_quality_cmd *link_cmd;
42 u32 rate_flags = 0;
43 __le32 rate_n_flags;
44
45 link_cmd = kzalloc(sizeof(struct iwl_link_quality_cmd), GFP_KERNEL);
46 if (!link_cmd) {
47 IWL_ERR(priv, "Unable to allocate memory for LQ cmd.\n");
48 return NULL;
49 }
50 /* Set up the rate scaling to start at selected rate, fall back
51 * all the way down to 1M in IEEE order, and then spin on 1M */
52 if (priv->band == IEEE80211_BAND_5GHZ)
53 r = IWL_RATE_6M_INDEX;
54 else
55 r = IWL_RATE_1M_INDEX;
56
57 if (r >= IWL_FIRST_CCK_RATE && r <= IWL_LAST_CCK_RATE)
58 rate_flags |= RATE_MCS_CCK_MSK;
59
60 rate_flags |= iwl4965_first_antenna(priv->hw_params.valid_tx_ant) <<
61 RATE_MCS_ANT_POS;
62 rate_n_flags = iwl4965_hw_set_rate_n_flags(iwlegacy_rates[r].plcp,
63 rate_flags);
64 for (i = 0; i < LINK_QUAL_MAX_RETRY_NUM; i++)
65 link_cmd->rs_table[i].rate_n_flags = rate_n_flags;
66
67 link_cmd->general_params.single_stream_ant_msk =
68 iwl4965_first_antenna(priv->hw_params.valid_tx_ant);
69
70 link_cmd->general_params.dual_stream_ant_msk =
71 priv->hw_params.valid_tx_ant &
72 ~iwl4965_first_antenna(priv->hw_params.valid_tx_ant);
73 if (!link_cmd->general_params.dual_stream_ant_msk) {
74 link_cmd->general_params.dual_stream_ant_msk = ANT_AB;
75 } else if (iwl4965_num_of_ant(priv->hw_params.valid_tx_ant) == 2) {
76 link_cmd->general_params.dual_stream_ant_msk =
77 priv->hw_params.valid_tx_ant;
78 }
79
80 link_cmd->agg_params.agg_dis_start_th = LINK_QUAL_AGG_DISABLE_START_DEF;
81 link_cmd->agg_params.agg_time_limit =
82 cpu_to_le16(LINK_QUAL_AGG_TIME_LIMIT_DEF);
83
84 link_cmd->sta_id = sta_id;
85
86 return link_cmd;
87}
88
89/*
90 * iwl4965_add_bssid_station - Add the special IBSS BSSID station
91 *
92 * Function sleeps.
93 */
94int
95iwl4965_add_bssid_station(struct iwl_priv *priv, struct iwl_rxon_context *ctx,
96 const u8 *addr, u8 *sta_id_r)
97{
98 int ret;
99 u8 sta_id;
100 struct iwl_link_quality_cmd *link_cmd;
101 unsigned long flags;
102
103 if (sta_id_r)
104 *sta_id_r = IWL_INVALID_STATION;
105
106 ret = iwl_legacy_add_station_common(priv, ctx, addr, 0, NULL, &sta_id);
107 if (ret) {
108 IWL_ERR(priv, "Unable to add station %pM\n", addr);
109 return ret;
110 }
111
112 if (sta_id_r)
113 *sta_id_r = sta_id;
114
115 spin_lock_irqsave(&priv->sta_lock, flags);
116 priv->stations[sta_id].used |= IWL_STA_LOCAL;
117 spin_unlock_irqrestore(&priv->sta_lock, flags);
118
119 /* Set up default rate scaling table in device's station table */
120 link_cmd = iwl4965_sta_alloc_lq(priv, sta_id);
121 if (!link_cmd) {
122 IWL_ERR(priv,
123 "Unable to initialize rate scaling for station %pM.\n",
124 addr);
125 return -ENOMEM;
126 }
127
128 ret = iwl_legacy_send_lq_cmd(priv, ctx, link_cmd, CMD_SYNC, true);
129 if (ret)
130 IWL_ERR(priv, "Link quality command failed (%d)\n", ret);
131
132 spin_lock_irqsave(&priv->sta_lock, flags);
133 priv->stations[sta_id].lq = link_cmd;
134 spin_unlock_irqrestore(&priv->sta_lock, flags);
135
136 return 0;
137}
138
139static int iwl4965_static_wepkey_cmd(struct iwl_priv *priv,
140 struct iwl_rxon_context *ctx,
141 bool send_if_empty)
142{
143 int i, not_empty = 0;
144 u8 buff[sizeof(struct iwl_wep_cmd) +
145 sizeof(struct iwl_wep_key) * WEP_KEYS_MAX];
146 struct iwl_wep_cmd *wep_cmd = (struct iwl_wep_cmd *)buff;
147 size_t cmd_size = sizeof(struct iwl_wep_cmd);
148 struct iwl_host_cmd cmd = {
149 .id = ctx->wep_key_cmd,
150 .data = wep_cmd,
151 .flags = CMD_SYNC,
152 };
153
154 might_sleep();
155
156 memset(wep_cmd, 0, cmd_size +
157 (sizeof(struct iwl_wep_key) * WEP_KEYS_MAX));
158
159 for (i = 0; i < WEP_KEYS_MAX ; i++) {
160 wep_cmd->key[i].key_index = i;
161 if (ctx->wep_keys[i].key_size) {
162 wep_cmd->key[i].key_offset = i;
163 not_empty = 1;
164 } else {
165 wep_cmd->key[i].key_offset = WEP_INVALID_OFFSET;
166 }
167
168 wep_cmd->key[i].key_size = ctx->wep_keys[i].key_size;
169 memcpy(&wep_cmd->key[i].key[3], ctx->wep_keys[i].key,
170 ctx->wep_keys[i].key_size);
171 }
172
173 wep_cmd->global_key_type = WEP_KEY_WEP_TYPE;
174 wep_cmd->num_keys = WEP_KEYS_MAX;
175
176 cmd_size += sizeof(struct iwl_wep_key) * WEP_KEYS_MAX;
177
178 cmd.len = cmd_size;
179
180 if (not_empty || send_if_empty)
181 return iwl_legacy_send_cmd(priv, &cmd);
182 else
183 return 0;
184}
185
186int iwl4965_restore_default_wep_keys(struct iwl_priv *priv,
187 struct iwl_rxon_context *ctx)
188{
189 lockdep_assert_held(&priv->mutex);
190
191 return iwl4965_static_wepkey_cmd(priv, ctx, false);
192}
193
194int iwl4965_remove_default_wep_key(struct iwl_priv *priv,
195 struct iwl_rxon_context *ctx,
196 struct ieee80211_key_conf *keyconf)
197{
198 int ret;
199
200 lockdep_assert_held(&priv->mutex);
201
202 IWL_DEBUG_WEP(priv, "Removing default WEP key: idx=%d\n",
203 keyconf->keyidx);
204
205 memset(&ctx->wep_keys[keyconf->keyidx], 0, sizeof(ctx->wep_keys[0]));
206 if (iwl_legacy_is_rfkill(priv)) {
207 IWL_DEBUG_WEP(priv,
208 "Not sending REPLY_WEPKEY command due to RFKILL.\n");
209 /* but keys in device are clear anyway so return success */
210 return 0;
211 }
212 ret = iwl4965_static_wepkey_cmd(priv, ctx, 1);
213 IWL_DEBUG_WEP(priv, "Remove default WEP key: idx=%d ret=%d\n",
214 keyconf->keyidx, ret);
215
216 return ret;
217}
218
219int iwl4965_set_default_wep_key(struct iwl_priv *priv,
220 struct iwl_rxon_context *ctx,
221 struct ieee80211_key_conf *keyconf)
222{
223 int ret;
224
225 lockdep_assert_held(&priv->mutex);
226
227 if (keyconf->keylen != WEP_KEY_LEN_128 &&
228 keyconf->keylen != WEP_KEY_LEN_64) {
229 IWL_DEBUG_WEP(priv, "Bad WEP key length %d\n", keyconf->keylen);
230 return -EINVAL;
231 }
232
233 keyconf->flags &= ~IEEE80211_KEY_FLAG_GENERATE_IV;
234 keyconf->hw_key_idx = HW_KEY_DEFAULT;
235 priv->stations[ctx->ap_sta_id].keyinfo.cipher = keyconf->cipher;
236
237 ctx->wep_keys[keyconf->keyidx].key_size = keyconf->keylen;
238 memcpy(&ctx->wep_keys[keyconf->keyidx].key, &keyconf->key,
239 keyconf->keylen);
240
241 ret = iwl4965_static_wepkey_cmd(priv, ctx, false);
242 IWL_DEBUG_WEP(priv, "Set default WEP key: len=%d idx=%d ret=%d\n",
243 keyconf->keylen, keyconf->keyidx, ret);
244
245 return ret;
246}
247
248static int iwl4965_set_wep_dynamic_key_info(struct iwl_priv *priv,
249 struct iwl_rxon_context *ctx,
250 struct ieee80211_key_conf *keyconf,
251 u8 sta_id)
252{
253 unsigned long flags;
254 __le16 key_flags = 0;
255 struct iwl_legacy_addsta_cmd sta_cmd;
256
257 lockdep_assert_held(&priv->mutex);
258
259 keyconf->flags &= ~IEEE80211_KEY_FLAG_GENERATE_IV;
260
261 key_flags |= (STA_KEY_FLG_WEP | STA_KEY_FLG_MAP_KEY_MSK);
262 key_flags |= cpu_to_le16(keyconf->keyidx << STA_KEY_FLG_KEYID_POS);
263 key_flags &= ~STA_KEY_FLG_INVALID;
264
265 if (keyconf->keylen == WEP_KEY_LEN_128)
266 key_flags |= STA_KEY_FLG_KEY_SIZE_MSK;
267
268 if (sta_id == ctx->bcast_sta_id)
269 key_flags |= STA_KEY_MULTICAST_MSK;
270
271 spin_lock_irqsave(&priv->sta_lock, flags);
272
273 priv->stations[sta_id].keyinfo.cipher = keyconf->cipher;
274 priv->stations[sta_id].keyinfo.keylen = keyconf->keylen;
275 priv->stations[sta_id].keyinfo.keyidx = keyconf->keyidx;
276
277 memcpy(priv->stations[sta_id].keyinfo.key,
278 keyconf->key, keyconf->keylen);
279
280 memcpy(&priv->stations[sta_id].sta.key.key[3],
281 keyconf->key, keyconf->keylen);
282
283 if ((priv->stations[sta_id].sta.key.key_flags & STA_KEY_FLG_ENCRYPT_MSK)
284 == STA_KEY_FLG_NO_ENC)
285 priv->stations[sta_id].sta.key.key_offset =
286 iwl_legacy_get_free_ucode_key_index(priv);
287 /* else, we are overriding an existing key => no need to allocated room
288 * in uCode. */
289
290 WARN(priv->stations[sta_id].sta.key.key_offset == WEP_INVALID_OFFSET,
291 "no space for a new key");
292
293 priv->stations[sta_id].sta.key.key_flags = key_flags;
294 priv->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_KEY_MASK;
295 priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
296
297 memcpy(&sta_cmd, &priv->stations[sta_id].sta,
298 sizeof(struct iwl_legacy_addsta_cmd));
299 spin_unlock_irqrestore(&priv->sta_lock, flags);
300
301 return iwl_legacy_send_add_sta(priv, &sta_cmd, CMD_SYNC);
302}
303
304static int iwl4965_set_ccmp_dynamic_key_info(struct iwl_priv *priv,
305 struct iwl_rxon_context *ctx,
306 struct ieee80211_key_conf *keyconf,
307 u8 sta_id)
308{
309 unsigned long flags;
310 __le16 key_flags = 0;
311 struct iwl_legacy_addsta_cmd sta_cmd;
312
313 lockdep_assert_held(&priv->mutex);
314
315 key_flags |= (STA_KEY_FLG_CCMP | STA_KEY_FLG_MAP_KEY_MSK);
316 key_flags |= cpu_to_le16(keyconf->keyidx << STA_KEY_FLG_KEYID_POS);
317 key_flags &= ~STA_KEY_FLG_INVALID;
318
319 if (sta_id == ctx->bcast_sta_id)
320 key_flags |= STA_KEY_MULTICAST_MSK;
321
322 keyconf->flags |= IEEE80211_KEY_FLAG_GENERATE_IV;
323
324 spin_lock_irqsave(&priv->sta_lock, flags);
325 priv->stations[sta_id].keyinfo.cipher = keyconf->cipher;
326 priv->stations[sta_id].keyinfo.keylen = keyconf->keylen;
327
328 memcpy(priv->stations[sta_id].keyinfo.key, keyconf->key,
329 keyconf->keylen);
330
331 memcpy(priv->stations[sta_id].sta.key.key, keyconf->key,
332 keyconf->keylen);
333
334 if ((priv->stations[sta_id].sta.key.key_flags & STA_KEY_FLG_ENCRYPT_MSK)
335 == STA_KEY_FLG_NO_ENC)
336 priv->stations[sta_id].sta.key.key_offset =
337 iwl_legacy_get_free_ucode_key_index(priv);
338 /* else, we are overriding an existing key => no need to allocated room
339 * in uCode. */
340
341 WARN(priv->stations[sta_id].sta.key.key_offset == WEP_INVALID_OFFSET,
342 "no space for a new key");
343
344 priv->stations[sta_id].sta.key.key_flags = key_flags;
345 priv->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_KEY_MASK;
346 priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
347
348 memcpy(&sta_cmd, &priv->stations[sta_id].sta,
349 sizeof(struct iwl_legacy_addsta_cmd));
350 spin_unlock_irqrestore(&priv->sta_lock, flags);
351
352 return iwl_legacy_send_add_sta(priv, &sta_cmd, CMD_SYNC);
353}
354
355static int iwl4965_set_tkip_dynamic_key_info(struct iwl_priv *priv,
356 struct iwl_rxon_context *ctx,
357 struct ieee80211_key_conf *keyconf,
358 u8 sta_id)
359{
360 unsigned long flags;
361 int ret = 0;
362 __le16 key_flags = 0;
363
364 key_flags |= (STA_KEY_FLG_TKIP | STA_KEY_FLG_MAP_KEY_MSK);
365 key_flags |= cpu_to_le16(keyconf->keyidx << STA_KEY_FLG_KEYID_POS);
366 key_flags &= ~STA_KEY_FLG_INVALID;
367
368 if (sta_id == ctx->bcast_sta_id)
369 key_flags |= STA_KEY_MULTICAST_MSK;
370
371 keyconf->flags |= IEEE80211_KEY_FLAG_GENERATE_IV;
372 keyconf->flags |= IEEE80211_KEY_FLAG_GENERATE_MMIC;
373
374 spin_lock_irqsave(&priv->sta_lock, flags);
375
376 priv->stations[sta_id].keyinfo.cipher = keyconf->cipher;
377 priv->stations[sta_id].keyinfo.keylen = 16;
378
379 if ((priv->stations[sta_id].sta.key.key_flags & STA_KEY_FLG_ENCRYPT_MSK)
380 == STA_KEY_FLG_NO_ENC)
381 priv->stations[sta_id].sta.key.key_offset =
382 iwl_legacy_get_free_ucode_key_index(priv);
383 /* else, we are overriding an existing key => no need to allocated room
384 * in uCode. */
385
386 WARN(priv->stations[sta_id].sta.key.key_offset == WEP_INVALID_OFFSET,
387 "no space for a new key");
388
389 priv->stations[sta_id].sta.key.key_flags = key_flags;
390
391
392 /* This copy is acutally not needed: we get the key with each TX */
393 memcpy(priv->stations[sta_id].keyinfo.key, keyconf->key, 16);
394
395 memcpy(priv->stations[sta_id].sta.key.key, keyconf->key, 16);
396
397 spin_unlock_irqrestore(&priv->sta_lock, flags);
398
399 return ret;
400}
401
402void iwl4965_update_tkip_key(struct iwl_priv *priv,
403 struct iwl_rxon_context *ctx,
404 struct ieee80211_key_conf *keyconf,
405 struct ieee80211_sta *sta, u32 iv32, u16 *phase1key)
406{
407 u8 sta_id;
408 unsigned long flags;
409 int i;
410
411 if (iwl_legacy_scan_cancel(priv)) {
412 /* cancel scan failed, just live w/ bad key and rely
413 briefly on SW decryption */
414 return;
415 }
416
417 sta_id = iwl_legacy_sta_id_or_broadcast(priv, ctx, sta);
418 if (sta_id == IWL_INVALID_STATION)
419 return;
420
421 spin_lock_irqsave(&priv->sta_lock, flags);
422
423 priv->stations[sta_id].sta.key.tkip_rx_tsc_byte2 = (u8) iv32;
424
425 for (i = 0; i < 5; i++)
426 priv->stations[sta_id].sta.key.tkip_rx_ttak[i] =
427 cpu_to_le16(phase1key[i]);
428
429 priv->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_KEY_MASK;
430 priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
431
432 iwl_legacy_send_add_sta(priv, &priv->stations[sta_id].sta, CMD_ASYNC);
433
434 spin_unlock_irqrestore(&priv->sta_lock, flags);
435
436}
437
438int iwl4965_remove_dynamic_key(struct iwl_priv *priv,
439 struct iwl_rxon_context *ctx,
440 struct ieee80211_key_conf *keyconf,
441 u8 sta_id)
442{
443 unsigned long flags;
444 u16 key_flags;
445 u8 keyidx;
446 struct iwl_legacy_addsta_cmd sta_cmd;
447
448 lockdep_assert_held(&priv->mutex);
449
450 ctx->key_mapping_keys--;
451
452 spin_lock_irqsave(&priv->sta_lock, flags);
453 key_flags = le16_to_cpu(priv->stations[sta_id].sta.key.key_flags);
454 keyidx = (key_flags >> STA_KEY_FLG_KEYID_POS) & 0x3;
455
456 IWL_DEBUG_WEP(priv, "Remove dynamic key: idx=%d sta=%d\n",
457 keyconf->keyidx, sta_id);
458
459 if (keyconf->keyidx != keyidx) {
460 /* We need to remove a key with index different that the one
461 * in the uCode. This means that the key we need to remove has
462 * been replaced by another one with different index.
463 * Don't do anything and return ok
464 */
465 spin_unlock_irqrestore(&priv->sta_lock, flags);
466 return 0;
467 }
468
469 if (priv->stations[sta_id].sta.key.key_offset == WEP_INVALID_OFFSET) {
470 IWL_WARN(priv, "Removing wrong key %d 0x%x\n",
471 keyconf->keyidx, key_flags);
472 spin_unlock_irqrestore(&priv->sta_lock, flags);
473 return 0;
474 }
475
476 if (!test_and_clear_bit(priv->stations[sta_id].sta.key.key_offset,
477 &priv->ucode_key_table))
478 IWL_ERR(priv, "index %d not used in uCode key table.\n",
479 priv->stations[sta_id].sta.key.key_offset);
480 memset(&priv->stations[sta_id].keyinfo, 0,
481 sizeof(struct iwl_hw_key));
482 memset(&priv->stations[sta_id].sta.key, 0,
483 sizeof(struct iwl4965_keyinfo));
484 priv->stations[sta_id].sta.key.key_flags =
485 STA_KEY_FLG_NO_ENC | STA_KEY_FLG_INVALID;
486 priv->stations[sta_id].sta.key.key_offset = WEP_INVALID_OFFSET;
487 priv->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_KEY_MASK;
488 priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
489
490 if (iwl_legacy_is_rfkill(priv)) {
491 IWL_DEBUG_WEP(priv,
492 "Not sending REPLY_ADD_STA command because RFKILL enabled.\n");
493 spin_unlock_irqrestore(&priv->sta_lock, flags);
494 return 0;
495 }
496 memcpy(&sta_cmd, &priv->stations[sta_id].sta,
497 sizeof(struct iwl_legacy_addsta_cmd));
498 spin_unlock_irqrestore(&priv->sta_lock, flags);
499
500 return iwl_legacy_send_add_sta(priv, &sta_cmd, CMD_SYNC);
501}
502
503int iwl4965_set_dynamic_key(struct iwl_priv *priv, struct iwl_rxon_context *ctx,
504 struct ieee80211_key_conf *keyconf, u8 sta_id)
505{
506 int ret;
507
508 lockdep_assert_held(&priv->mutex);
509
510 ctx->key_mapping_keys++;
511 keyconf->hw_key_idx = HW_KEY_DYNAMIC;
512
513 switch (keyconf->cipher) {
514 case WLAN_CIPHER_SUITE_CCMP:
515 ret = iwl4965_set_ccmp_dynamic_key_info(priv, ctx,
516 keyconf, sta_id);
517 break;
518 case WLAN_CIPHER_SUITE_TKIP:
519 ret = iwl4965_set_tkip_dynamic_key_info(priv, ctx,
520 keyconf, sta_id);
521 break;
522 case WLAN_CIPHER_SUITE_WEP40:
523 case WLAN_CIPHER_SUITE_WEP104:
524 ret = iwl4965_set_wep_dynamic_key_info(priv, ctx,
525 keyconf, sta_id);
526 break;
527 default:
528 IWL_ERR(priv,
529 "Unknown alg: %s cipher = %x\n", __func__,
530 keyconf->cipher);
531 ret = -EINVAL;
532 }
533
534 IWL_DEBUG_WEP(priv,
535 "Set dynamic key: cipher=%x len=%d idx=%d sta=%d ret=%d\n",
536 keyconf->cipher, keyconf->keylen, keyconf->keyidx,
537 sta_id, ret);
538
539 return ret;
540}
541
542/**
543 * iwl4965_alloc_bcast_station - add broadcast station into driver's station table.
544 *
545 * This adds the broadcast station into the driver's station table
546 * and marks it driver active, so that it will be restored to the
547 * device at the next best time.
548 */
549int iwl4965_alloc_bcast_station(struct iwl_priv *priv,
550 struct iwl_rxon_context *ctx)
551{
552 struct iwl_link_quality_cmd *link_cmd;
553 unsigned long flags;
554 u8 sta_id;
555
556 spin_lock_irqsave(&priv->sta_lock, flags);
557 sta_id = iwl_legacy_prep_station(priv, ctx, iwlegacy_bcast_addr,
558 false, NULL);
559 if (sta_id == IWL_INVALID_STATION) {
560 IWL_ERR(priv, "Unable to prepare broadcast station\n");
561 spin_unlock_irqrestore(&priv->sta_lock, flags);
562
563 return -EINVAL;
564 }
565
566 priv->stations[sta_id].used |= IWL_STA_DRIVER_ACTIVE;
567 priv->stations[sta_id].used |= IWL_STA_BCAST;
568 spin_unlock_irqrestore(&priv->sta_lock, flags);
569
570 link_cmd = iwl4965_sta_alloc_lq(priv, sta_id);
571 if (!link_cmd) {
572 IWL_ERR(priv,
573 "Unable to initialize rate scaling for bcast station.\n");
574 return -ENOMEM;
575 }
576
577 spin_lock_irqsave(&priv->sta_lock, flags);
578 priv->stations[sta_id].lq = link_cmd;
579 spin_unlock_irqrestore(&priv->sta_lock, flags);
580
581 return 0;
582}
583
584/**
585 * iwl4965_update_bcast_station - update broadcast station's LQ command
586 *
587 * Only used by iwl4965. Placed here to have all bcast station management
588 * code together.
589 */
590static int iwl4965_update_bcast_station(struct iwl_priv *priv,
591 struct iwl_rxon_context *ctx)
592{
593 unsigned long flags;
594 struct iwl_link_quality_cmd *link_cmd;
595 u8 sta_id = ctx->bcast_sta_id;
596
597 link_cmd = iwl4965_sta_alloc_lq(priv, sta_id);
598 if (!link_cmd) {
599 IWL_ERR(priv,
600 "Unable to initialize rate scaling for bcast station.\n");
601 return -ENOMEM;
602 }
603
604 spin_lock_irqsave(&priv->sta_lock, flags);
605 if (priv->stations[sta_id].lq)
606 kfree(priv->stations[sta_id].lq);
607 else
608 IWL_DEBUG_INFO(priv,
609 "Bcast station rate scaling has not been initialized yet.\n");
610 priv->stations[sta_id].lq = link_cmd;
611 spin_unlock_irqrestore(&priv->sta_lock, flags);
612
613 return 0;
614}
615
616int iwl4965_update_bcast_stations(struct iwl_priv *priv)
617{
618 struct iwl_rxon_context *ctx;
619 int ret = 0;
620
621 for_each_context(priv, ctx) {
622 ret = iwl4965_update_bcast_station(priv, ctx);
623 if (ret)
624 break;
625 }
626
627 return ret;
628}
629
630/**
631 * iwl4965_sta_tx_modify_enable_tid - Enable Tx for this TID in station table
632 */
633int iwl4965_sta_tx_modify_enable_tid(struct iwl_priv *priv, int sta_id, int tid)
634{
635 unsigned long flags;
636 struct iwl_legacy_addsta_cmd sta_cmd;
637
638 lockdep_assert_held(&priv->mutex);
639
640 /* Remove "disable" flag, to enable Tx for this TID */
641 spin_lock_irqsave(&priv->sta_lock, flags);
642 priv->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_TID_DISABLE_TX;
643 priv->stations[sta_id].sta.tid_disable_tx &= cpu_to_le16(~(1 << tid));
644 priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
645 memcpy(&sta_cmd, &priv->stations[sta_id].sta,
646 sizeof(struct iwl_legacy_addsta_cmd));
647 spin_unlock_irqrestore(&priv->sta_lock, flags);
648
649 return iwl_legacy_send_add_sta(priv, &sta_cmd, CMD_SYNC);
650}
651
652int iwl4965_sta_rx_agg_start(struct iwl_priv *priv, struct ieee80211_sta *sta,
653 int tid, u16 ssn)
654{
655 unsigned long flags;
656 int sta_id;
657 struct iwl_legacy_addsta_cmd sta_cmd;
658
659 lockdep_assert_held(&priv->mutex);
660
661 sta_id = iwl_legacy_sta_id(sta);
662 if (sta_id == IWL_INVALID_STATION)
663 return -ENXIO;
664
665 spin_lock_irqsave(&priv->sta_lock, flags);
666 priv->stations[sta_id].sta.station_flags_msk = 0;
667 priv->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_ADDBA_TID_MSK;
668 priv->stations[sta_id].sta.add_immediate_ba_tid = (u8)tid;
669 priv->stations[sta_id].sta.add_immediate_ba_ssn = cpu_to_le16(ssn);
670 priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
671 memcpy(&sta_cmd, &priv->stations[sta_id].sta,
672 sizeof(struct iwl_legacy_addsta_cmd));
673 spin_unlock_irqrestore(&priv->sta_lock, flags);
674
675 return iwl_legacy_send_add_sta(priv, &sta_cmd, CMD_SYNC);
676}
677
678int iwl4965_sta_rx_agg_stop(struct iwl_priv *priv, struct ieee80211_sta *sta,
679 int tid)
680{
681 unsigned long flags;
682 int sta_id;
683 struct iwl_legacy_addsta_cmd sta_cmd;
684
685 lockdep_assert_held(&priv->mutex);
686
687 sta_id = iwl_legacy_sta_id(sta);
688 if (sta_id == IWL_INVALID_STATION) {
689 IWL_ERR(priv, "Invalid station for AGG tid %d\n", tid);
690 return -ENXIO;
691 }
692
693 spin_lock_irqsave(&priv->sta_lock, flags);
694 priv->stations[sta_id].sta.station_flags_msk = 0;
695 priv->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_DELBA_TID_MSK;
696 priv->stations[sta_id].sta.remove_immediate_ba_tid = (u8)tid;
697 priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
698 memcpy(&sta_cmd, &priv->stations[sta_id].sta,
699 sizeof(struct iwl_legacy_addsta_cmd));
700 spin_unlock_irqrestore(&priv->sta_lock, flags);
701
702 return iwl_legacy_send_add_sta(priv, &sta_cmd, CMD_SYNC);
703}
704
705void
706iwl4965_sta_modify_sleep_tx_count(struct iwl_priv *priv, int sta_id, int cnt)
707{
708 unsigned long flags;
709
710 spin_lock_irqsave(&priv->sta_lock, flags);
711 priv->stations[sta_id].sta.station_flags |= STA_FLG_PWR_SAVE_MSK;
712 priv->stations[sta_id].sta.station_flags_msk = STA_FLG_PWR_SAVE_MSK;
713 priv->stations[sta_id].sta.sta.modify_mask =
714 STA_MODIFY_SLEEP_TX_COUNT_MSK;
715 priv->stations[sta_id].sta.sleep_tx_count = cpu_to_le16(cnt);
716 priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
717 iwl_legacy_send_add_sta(priv,
718 &priv->stations[sta_id].sta, CMD_ASYNC);
719 spin_unlock_irqrestore(&priv->sta_lock, flags);
720
721}
diff --git a/drivers/net/wireless/iwlegacy/iwl-4965-tx.c b/drivers/net/wireless/iwlegacy/iwl-4965-tx.c
new file mode 100644
index 00000000000..ac4f64de136
--- /dev/null
+++ b/drivers/net/wireless/iwlegacy/iwl-4965-tx.c
@@ -0,0 +1,1378 @@
1/******************************************************************************
2 *
3 * GPL LICENSE SUMMARY
4 *
5 * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of version 2 of the GNU General Public License as
9 * published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful, but
12 * WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
19 * USA
20 *
21 * The full GNU General Public License is included in this distribution
22 * in the file called LICENSE.GPL.
23 *
24 * Contact Information:
25 * Intel Linux Wireless <ilw@linux.intel.com>
26 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
27 *
28 *****************************************************************************/
29
30#include <linux/kernel.h>
31#include <linux/module.h>
32#include <linux/init.h>
33#include <linux/sched.h>
34
35#include "iwl-dev.h"
36#include "iwl-core.h"
37#include "iwl-sta.h"
38#include "iwl-io.h"
39#include "iwl-helpers.h"
40#include "iwl-4965-hw.h"
41#include "iwl-4965.h"
42
43/*
44 * mac80211 queues, ACs, hardware queues, FIFOs.
45 *
46 * Cf. http://wireless.kernel.org/en/developers/Documentation/mac80211/queues
47 *
48 * Mac80211 uses the following numbers, which we get as from it
49 * by way of skb_get_queue_mapping(skb):
50 *
51 * VO 0
52 * VI 1
53 * BE 2
54 * BK 3
55 *
56 *
57 * Regular (not A-MPDU) frames are put into hardware queues corresponding
58 * to the FIFOs, see comments in iwl-prph.h. Aggregated frames get their
59 * own queue per aggregation session (RA/TID combination), such queues are
60 * set up to map into FIFOs too, for which we need an AC->FIFO mapping. In
61 * order to map frames to the right queue, we also need an AC->hw queue
62 * mapping. This is implemented here.
63 *
64 * Due to the way hw queues are set up (by the hw specific modules like
65 * iwl-4965.c), the AC->hw queue mapping is the identity
66 * mapping.
67 */
68
69static const u8 tid_to_ac[] = {
70 IEEE80211_AC_BE,
71 IEEE80211_AC_BK,
72 IEEE80211_AC_BK,
73 IEEE80211_AC_BE,
74 IEEE80211_AC_VI,
75 IEEE80211_AC_VI,
76 IEEE80211_AC_VO,
77 IEEE80211_AC_VO
78};
79
80static inline int iwl4965_get_ac_from_tid(u16 tid)
81{
82 if (likely(tid < ARRAY_SIZE(tid_to_ac)))
83 return tid_to_ac[tid];
84
85 /* no support for TIDs 8-15 yet */
86 return -EINVAL;
87}
88
89static inline int
90iwl4965_get_fifo_from_tid(struct iwl_rxon_context *ctx, u16 tid)
91{
92 if (likely(tid < ARRAY_SIZE(tid_to_ac)))
93 return ctx->ac_to_fifo[tid_to_ac[tid]];
94
95 /* no support for TIDs 8-15 yet */
96 return -EINVAL;
97}
98
99/*
100 * handle build REPLY_TX command notification.
101 */
102static void iwl4965_tx_cmd_build_basic(struct iwl_priv *priv,
103 struct sk_buff *skb,
104 struct iwl_tx_cmd *tx_cmd,
105 struct ieee80211_tx_info *info,
106 struct ieee80211_hdr *hdr,
107 u8 std_id)
108{
109 __le16 fc = hdr->frame_control;
110 __le32 tx_flags = tx_cmd->tx_flags;
111
112 tx_cmd->stop_time.life_time = TX_CMD_LIFE_TIME_INFINITE;
113 if (!(info->flags & IEEE80211_TX_CTL_NO_ACK)) {
114 tx_flags |= TX_CMD_FLG_ACK_MSK;
115 if (ieee80211_is_mgmt(fc))
116 tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK;
117 if (ieee80211_is_probe_resp(fc) &&
118 !(le16_to_cpu(hdr->seq_ctrl) & 0xf))
119 tx_flags |= TX_CMD_FLG_TSF_MSK;
120 } else {
121 tx_flags &= (~TX_CMD_FLG_ACK_MSK);
122 tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK;
123 }
124
125 if (ieee80211_is_back_req(fc))
126 tx_flags |= TX_CMD_FLG_ACK_MSK | TX_CMD_FLG_IMM_BA_RSP_MASK;
127
128 tx_cmd->sta_id = std_id;
129 if (ieee80211_has_morefrags(fc))
130 tx_flags |= TX_CMD_FLG_MORE_FRAG_MSK;
131
132 if (ieee80211_is_data_qos(fc)) {
133 u8 *qc = ieee80211_get_qos_ctl(hdr);
134 tx_cmd->tid_tspec = qc[0] & 0xf;
135 tx_flags &= ~TX_CMD_FLG_SEQ_CTL_MSK;
136 } else {
137 tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK;
138 }
139
140 iwl_legacy_tx_cmd_protection(priv, info, fc, &tx_flags);
141
142 tx_flags &= ~(TX_CMD_FLG_ANT_SEL_MSK);
143 if (ieee80211_is_mgmt(fc)) {
144 if (ieee80211_is_assoc_req(fc) || ieee80211_is_reassoc_req(fc))
145 tx_cmd->timeout.pm_frame_timeout = cpu_to_le16(3);
146 else
147 tx_cmd->timeout.pm_frame_timeout = cpu_to_le16(2);
148 } else {
149 tx_cmd->timeout.pm_frame_timeout = 0;
150 }
151
152 tx_cmd->driver_txop = 0;
153 tx_cmd->tx_flags = tx_flags;
154 tx_cmd->next_frame_len = 0;
155}
156
157#define RTS_DFAULT_RETRY_LIMIT 60
158
159static void iwl4965_tx_cmd_build_rate(struct iwl_priv *priv,
160 struct iwl_tx_cmd *tx_cmd,
161 struct ieee80211_tx_info *info,
162 __le16 fc)
163{
164 u32 rate_flags;
165 int rate_idx;
166 u8 rts_retry_limit;
167 u8 data_retry_limit;
168 u8 rate_plcp;
169
170 /* Set retry limit on DATA packets and Probe Responses*/
171 if (ieee80211_is_probe_resp(fc))
172 data_retry_limit = 3;
173 else
174 data_retry_limit = IWL4965_DEFAULT_TX_RETRY;
175 tx_cmd->data_retry_limit = data_retry_limit;
176
177 /* Set retry limit on RTS packets */
178 rts_retry_limit = RTS_DFAULT_RETRY_LIMIT;
179 if (data_retry_limit < rts_retry_limit)
180 rts_retry_limit = data_retry_limit;
181 tx_cmd->rts_retry_limit = rts_retry_limit;
182
183 /* DATA packets will use the uCode station table for rate/antenna
184 * selection */
185 if (ieee80211_is_data(fc)) {
186 tx_cmd->initial_rate_index = 0;
187 tx_cmd->tx_flags |= TX_CMD_FLG_STA_RATE_MSK;
188 return;
189 }
190
191 /**
192 * If the current TX rate stored in mac80211 has the MCS bit set, it's
193 * not really a TX rate. Thus, we use the lowest supported rate for
194 * this band. Also use the lowest supported rate if the stored rate
195 * index is invalid.
196 */
197 rate_idx = info->control.rates[0].idx;
198 if (info->control.rates[0].flags & IEEE80211_TX_RC_MCS ||
199 (rate_idx < 0) || (rate_idx > IWL_RATE_COUNT_LEGACY))
200 rate_idx = rate_lowest_index(&priv->bands[info->band],
201 info->control.sta);
202 /* For 5 GHZ band, remap mac80211 rate indices into driver indices */
203 if (info->band == IEEE80211_BAND_5GHZ)
204 rate_idx += IWL_FIRST_OFDM_RATE;
205 /* Get PLCP rate for tx_cmd->rate_n_flags */
206 rate_plcp = iwlegacy_rates[rate_idx].plcp;
207 /* Zero out flags for this packet */
208 rate_flags = 0;
209
210 /* Set CCK flag as needed */
211 if ((rate_idx >= IWL_FIRST_CCK_RATE) && (rate_idx <= IWL_LAST_CCK_RATE))
212 rate_flags |= RATE_MCS_CCK_MSK;
213
214 /* Set up antennas */
215 priv->mgmt_tx_ant = iwl4965_toggle_tx_ant(priv, priv->mgmt_tx_ant,
216 priv->hw_params.valid_tx_ant);
217
218 rate_flags |= iwl4965_ant_idx_to_flags(priv->mgmt_tx_ant);
219
220 /* Set the rate in the TX cmd */
221 tx_cmd->rate_n_flags = iwl4965_hw_set_rate_n_flags(rate_plcp, rate_flags);
222}
223
224static void iwl4965_tx_cmd_build_hwcrypto(struct iwl_priv *priv,
225 struct ieee80211_tx_info *info,
226 struct iwl_tx_cmd *tx_cmd,
227 struct sk_buff *skb_frag,
228 int sta_id)
229{
230 struct ieee80211_key_conf *keyconf = info->control.hw_key;
231
232 switch (keyconf->cipher) {
233 case WLAN_CIPHER_SUITE_CCMP:
234 tx_cmd->sec_ctl = TX_CMD_SEC_CCM;
235 memcpy(tx_cmd->key, keyconf->key, keyconf->keylen);
236 if (info->flags & IEEE80211_TX_CTL_AMPDU)
237 tx_cmd->tx_flags |= TX_CMD_FLG_AGG_CCMP_MSK;
238 IWL_DEBUG_TX(priv, "tx_cmd with AES hwcrypto\n");
239 break;
240
241 case WLAN_CIPHER_SUITE_TKIP:
242 tx_cmd->sec_ctl = TX_CMD_SEC_TKIP;
243 ieee80211_get_tkip_p2k(keyconf, skb_frag, tx_cmd->key);
244 IWL_DEBUG_TX(priv, "tx_cmd with tkip hwcrypto\n");
245 break;
246
247 case WLAN_CIPHER_SUITE_WEP104:
248 tx_cmd->sec_ctl |= TX_CMD_SEC_KEY128;
249 /* fall through */
250 case WLAN_CIPHER_SUITE_WEP40:
251 tx_cmd->sec_ctl |= (TX_CMD_SEC_WEP |
252 (keyconf->keyidx & TX_CMD_SEC_MSK) << TX_CMD_SEC_SHIFT);
253
254 memcpy(&tx_cmd->key[3], keyconf->key, keyconf->keylen);
255
256 IWL_DEBUG_TX(priv, "Configuring packet for WEP encryption "
257 "with key %d\n", keyconf->keyidx);
258 break;
259
260 default:
261 IWL_ERR(priv, "Unknown encode cipher %x\n", keyconf->cipher);
262 break;
263 }
264}
265
266/*
267 * start REPLY_TX command process
268 */
269int iwl4965_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
270{
271 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
272 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
273 struct ieee80211_sta *sta = info->control.sta;
274 struct iwl_station_priv *sta_priv = NULL;
275 struct iwl_tx_queue *txq;
276 struct iwl_queue *q;
277 struct iwl_device_cmd *out_cmd;
278 struct iwl_cmd_meta *out_meta;
279 struct iwl_tx_cmd *tx_cmd;
280 struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
281 int txq_id;
282 dma_addr_t phys_addr;
283 dma_addr_t txcmd_phys;
284 dma_addr_t scratch_phys;
285 u16 len, firstlen, secondlen;
286 u16 seq_number = 0;
287 __le16 fc;
288 u8 hdr_len;
289 u8 sta_id;
290 u8 wait_write_ptr = 0;
291 u8 tid = 0;
292 u8 *qc = NULL;
293 unsigned long flags;
294 bool is_agg = false;
295
296 if (info->control.vif)
297 ctx = iwl_legacy_rxon_ctx_from_vif(info->control.vif);
298
299 spin_lock_irqsave(&priv->lock, flags);
300 if (iwl_legacy_is_rfkill(priv)) {
301 IWL_DEBUG_DROP(priv, "Dropping - RF KILL\n");
302 goto drop_unlock;
303 }
304
305 fc = hdr->frame_control;
306
307#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
308 if (ieee80211_is_auth(fc))
309 IWL_DEBUG_TX(priv, "Sending AUTH frame\n");
310 else if (ieee80211_is_assoc_req(fc))
311 IWL_DEBUG_TX(priv, "Sending ASSOC frame\n");
312 else if (ieee80211_is_reassoc_req(fc))
313 IWL_DEBUG_TX(priv, "Sending REASSOC frame\n");
314#endif
315
316 hdr_len = ieee80211_hdrlen(fc);
317
318 /* For management frames use broadcast id to do not break aggregation */
319 if (!ieee80211_is_data(fc))
320 sta_id = ctx->bcast_sta_id;
321 else {
322 /* Find index into station table for destination station */
323 sta_id = iwl_legacy_sta_id_or_broadcast(priv, ctx, info->control.sta);
324
325 if (sta_id == IWL_INVALID_STATION) {
326 IWL_DEBUG_DROP(priv, "Dropping - INVALID STATION: %pM\n",
327 hdr->addr1);
328 goto drop_unlock;
329 }
330 }
331
332 IWL_DEBUG_TX(priv, "station Id %d\n", sta_id);
333
334 if (sta)
335 sta_priv = (void *)sta->drv_priv;
336
337 if (sta_priv && sta_priv->asleep &&
338 (info->flags & IEEE80211_TX_CTL_PSPOLL_RESPONSE)) {
339 /*
340 * This sends an asynchronous command to the device,
341 * but we can rely on it being processed before the
342 * next frame is processed -- and the next frame to
343 * this station is the one that will consume this
344 * counter.
345 * For now set the counter to just 1 since we do not
346 * support uAPSD yet.
347 */
348 iwl4965_sta_modify_sleep_tx_count(priv, sta_id, 1);
349 }
350
351 /*
352 * Send this frame after DTIM -- there's a special queue
353 * reserved for this for contexts that support AP mode.
354 */
355 if (info->flags & IEEE80211_TX_CTL_SEND_AFTER_DTIM) {
356 txq_id = ctx->mcast_queue;
357 /*
358 * The microcode will clear the more data
359 * bit in the last frame it transmits.
360 */
361 hdr->frame_control |=
362 cpu_to_le16(IEEE80211_FCTL_MOREDATA);
363 } else
364 txq_id = ctx->ac_to_queue[skb_get_queue_mapping(skb)];
365
366 /* irqs already disabled/saved above when locking priv->lock */
367 spin_lock(&priv->sta_lock);
368
369 if (ieee80211_is_data_qos(fc)) {
370 qc = ieee80211_get_qos_ctl(hdr);
371 tid = qc[0] & IEEE80211_QOS_CTL_TID_MASK;
372 if (WARN_ON_ONCE(tid >= MAX_TID_COUNT)) {
373 spin_unlock(&priv->sta_lock);
374 goto drop_unlock;
375 }
376 seq_number = priv->stations[sta_id].tid[tid].seq_number;
377 seq_number &= IEEE80211_SCTL_SEQ;
378 hdr->seq_ctrl = hdr->seq_ctrl &
379 cpu_to_le16(IEEE80211_SCTL_FRAG);
380 hdr->seq_ctrl |= cpu_to_le16(seq_number);
381 seq_number += 0x10;
382 /* aggregation is on for this <sta,tid> */
383 if (info->flags & IEEE80211_TX_CTL_AMPDU &&
384 priv->stations[sta_id].tid[tid].agg.state == IWL_AGG_ON) {
385 txq_id = priv->stations[sta_id].tid[tid].agg.txq_id;
386 is_agg = true;
387 }
388 }
389
390 txq = &priv->txq[txq_id];
391 q = &txq->q;
392
393 if (unlikely(iwl_legacy_queue_space(q) < q->high_mark)) {
394 spin_unlock(&priv->sta_lock);
395 goto drop_unlock;
396 }
397
398 if (ieee80211_is_data_qos(fc)) {
399 priv->stations[sta_id].tid[tid].tfds_in_queue++;
400 if (!ieee80211_has_morefrags(fc))
401 priv->stations[sta_id].tid[tid].seq_number = seq_number;
402 }
403
404 spin_unlock(&priv->sta_lock);
405
406 /* Set up driver data for this TFD */
407 memset(&(txq->txb[q->write_ptr]), 0, sizeof(struct iwl_tx_info));
408 txq->txb[q->write_ptr].skb = skb;
409 txq->txb[q->write_ptr].ctx = ctx;
410
411 /* Set up first empty entry in queue's array of Tx/cmd buffers */
412 out_cmd = txq->cmd[q->write_ptr];
413 out_meta = &txq->meta[q->write_ptr];
414 tx_cmd = &out_cmd->cmd.tx;
415 memset(&out_cmd->hdr, 0, sizeof(out_cmd->hdr));
416 memset(tx_cmd, 0, sizeof(struct iwl_tx_cmd));
417
418 /*
419 * Set up the Tx-command (not MAC!) header.
420 * Store the chosen Tx queue and TFD index within the sequence field;
421 * after Tx, uCode's Tx response will return this value so driver can
422 * locate the frame within the tx queue and do post-tx processing.
423 */
424 out_cmd->hdr.cmd = REPLY_TX;
425 out_cmd->hdr.sequence = cpu_to_le16((u16)(QUEUE_TO_SEQ(txq_id) |
426 INDEX_TO_SEQ(q->write_ptr)));
427
428 /* Copy MAC header from skb into command buffer */
429 memcpy(tx_cmd->hdr, hdr, hdr_len);
430
431
432 /* Total # bytes to be transmitted */
433 len = (u16)skb->len;
434 tx_cmd->len = cpu_to_le16(len);
435
436 if (info->control.hw_key)
437 iwl4965_tx_cmd_build_hwcrypto(priv, info, tx_cmd, skb, sta_id);
438
439 /* TODO need this for burst mode later on */
440 iwl4965_tx_cmd_build_basic(priv, skb, tx_cmd, info, hdr, sta_id);
441 iwl_legacy_dbg_log_tx_data_frame(priv, len, hdr);
442
443 iwl4965_tx_cmd_build_rate(priv, tx_cmd, info, fc);
444
445 iwl_legacy_update_stats(priv, true, fc, len);
446 /*
447 * Use the first empty entry in this queue's command buffer array
448 * to contain the Tx command and MAC header concatenated together
449 * (payload data will be in another buffer).
450 * Size of this varies, due to varying MAC header length.
451 * If end is not dword aligned, we'll have 2 extra bytes at the end
452 * of the MAC header (device reads on dword boundaries).
453 * We'll tell device about this padding later.
454 */
455 len = sizeof(struct iwl_tx_cmd) +
456 sizeof(struct iwl_cmd_header) + hdr_len;
457 firstlen = (len + 3) & ~3;
458
459 /* Tell NIC about any 2-byte padding after MAC header */
460 if (firstlen != len)
461 tx_cmd->tx_flags |= TX_CMD_FLG_MH_PAD_MSK;
462
463 /* Physical address of this Tx command's header (not MAC header!),
464 * within command buffer array. */
465 txcmd_phys = pci_map_single(priv->pci_dev,
466 &out_cmd->hdr, firstlen,
467 PCI_DMA_BIDIRECTIONAL);
468 dma_unmap_addr_set(out_meta, mapping, txcmd_phys);
469 dma_unmap_len_set(out_meta, len, firstlen);
470 /* Add buffer containing Tx command and MAC(!) header to TFD's
471 * first entry */
472 priv->cfg->ops->lib->txq_attach_buf_to_tfd(priv, txq,
473 txcmd_phys, firstlen, 1, 0);
474
475 if (!ieee80211_has_morefrags(hdr->frame_control)) {
476 txq->need_update = 1;
477 } else {
478 wait_write_ptr = 1;
479 txq->need_update = 0;
480 }
481
482 /* Set up TFD's 2nd entry to point directly to remainder of skb,
483 * if any (802.11 null frames have no payload). */
484 secondlen = skb->len - hdr_len;
485 if (secondlen > 0) {
486 phys_addr = pci_map_single(priv->pci_dev, skb->data + hdr_len,
487 secondlen, PCI_DMA_TODEVICE);
488 priv->cfg->ops->lib->txq_attach_buf_to_tfd(priv, txq,
489 phys_addr, secondlen,
490 0, 0);
491 }
492
493 scratch_phys = txcmd_phys + sizeof(struct iwl_cmd_header) +
494 offsetof(struct iwl_tx_cmd, scratch);
495
496 /* take back ownership of DMA buffer to enable update */
497 pci_dma_sync_single_for_cpu(priv->pci_dev, txcmd_phys,
498 firstlen, PCI_DMA_BIDIRECTIONAL);
499 tx_cmd->dram_lsb_ptr = cpu_to_le32(scratch_phys);
500 tx_cmd->dram_msb_ptr = iwl_legacy_get_dma_hi_addr(scratch_phys);
501
502 IWL_DEBUG_TX(priv, "sequence nr = 0X%x\n",
503 le16_to_cpu(out_cmd->hdr.sequence));
504 IWL_DEBUG_TX(priv, "tx_flags = 0X%x\n", le32_to_cpu(tx_cmd->tx_flags));
505 iwl_print_hex_dump(priv, IWL_DL_TX, (u8 *)tx_cmd, sizeof(*tx_cmd));
506 iwl_print_hex_dump(priv, IWL_DL_TX, (u8 *)tx_cmd->hdr, hdr_len);
507
508 /* Set up entry for this TFD in Tx byte-count array */
509 if (info->flags & IEEE80211_TX_CTL_AMPDU)
510 priv->cfg->ops->lib->txq_update_byte_cnt_tbl(priv, txq,
511 le16_to_cpu(tx_cmd->len));
512
513 pci_dma_sync_single_for_device(priv->pci_dev, txcmd_phys,
514 firstlen, PCI_DMA_BIDIRECTIONAL);
515
516 trace_iwlwifi_legacy_dev_tx(priv,
517 &((struct iwl_tfd *)txq->tfds)[txq->q.write_ptr],
518 sizeof(struct iwl_tfd),
519 &out_cmd->hdr, firstlen,
520 skb->data + hdr_len, secondlen);
521
522 /* Tell device the write index *just past* this latest filled TFD */
523 q->write_ptr = iwl_legacy_queue_inc_wrap(q->write_ptr, q->n_bd);
524 iwl_legacy_txq_update_write_ptr(priv, txq);
525 spin_unlock_irqrestore(&priv->lock, flags);
526
527 /*
528 * At this point the frame is "transmitted" successfully
529 * and we will get a TX status notification eventually,
530 * regardless of the value of ret. "ret" only indicates
531 * whether or not we should update the write pointer.
532 */
533
534 /*
535 * Avoid atomic ops if it isn't an associated client.
536 * Also, if this is a packet for aggregation, don't
537 * increase the counter because the ucode will stop
538 * aggregation queues when their respective station
539 * goes to sleep.
540 */
541 if (sta_priv && sta_priv->client && !is_agg)
542 atomic_inc(&sta_priv->pending_frames);
543
544 if ((iwl_legacy_queue_space(q) < q->high_mark) &&
545 priv->mac80211_registered) {
546 if (wait_write_ptr) {
547 spin_lock_irqsave(&priv->lock, flags);
548 txq->need_update = 1;
549 iwl_legacy_txq_update_write_ptr(priv, txq);
550 spin_unlock_irqrestore(&priv->lock, flags);
551 } else {
552 iwl_legacy_stop_queue(priv, txq);
553 }
554 }
555
556 return 0;
557
558drop_unlock:
559 spin_unlock_irqrestore(&priv->lock, flags);
560 return -1;
561}
562
563static inline int iwl4965_alloc_dma_ptr(struct iwl_priv *priv,
564 struct iwl_dma_ptr *ptr, size_t size)
565{
566 ptr->addr = dma_alloc_coherent(&priv->pci_dev->dev, size, &ptr->dma,
567 GFP_KERNEL);
568 if (!ptr->addr)
569 return -ENOMEM;
570 ptr->size = size;
571 return 0;
572}
573
574static inline void iwl4965_free_dma_ptr(struct iwl_priv *priv,
575 struct iwl_dma_ptr *ptr)
576{
577 if (unlikely(!ptr->addr))
578 return;
579
580 dma_free_coherent(&priv->pci_dev->dev, ptr->size, ptr->addr, ptr->dma);
581 memset(ptr, 0, sizeof(*ptr));
582}
583
584/**
585 * iwl4965_hw_txq_ctx_free - Free TXQ Context
586 *
587 * Destroy all TX DMA queues and structures
588 */
589void iwl4965_hw_txq_ctx_free(struct iwl_priv *priv)
590{
591 int txq_id;
592
593 /* Tx queues */
594 if (priv->txq) {
595 for (txq_id = 0; txq_id < priv->hw_params.max_txq_num; txq_id++)
596 if (txq_id == priv->cmd_queue)
597 iwl_legacy_cmd_queue_free(priv);
598 else
599 iwl_legacy_tx_queue_free(priv, txq_id);
600 }
601 iwl4965_free_dma_ptr(priv, &priv->kw);
602
603 iwl4965_free_dma_ptr(priv, &priv->scd_bc_tbls);
604
605 /* free tx queue structure */
606 iwl_legacy_txq_mem(priv);
607}
608
609/**
610 * iwl4965_txq_ctx_alloc - allocate TX queue context
611 * Allocate all Tx DMA structures and initialize them
612 *
613 * @param priv
614 * @return error code
615 */
616int iwl4965_txq_ctx_alloc(struct iwl_priv *priv)
617{
618 int ret;
619 int txq_id, slots_num;
620 unsigned long flags;
621
622 /* Free all tx/cmd queues and keep-warm buffer */
623 iwl4965_hw_txq_ctx_free(priv);
624
625 ret = iwl4965_alloc_dma_ptr(priv, &priv->scd_bc_tbls,
626 priv->hw_params.scd_bc_tbls_size);
627 if (ret) {
628 IWL_ERR(priv, "Scheduler BC Table allocation failed\n");
629 goto error_bc_tbls;
630 }
631 /* Alloc keep-warm buffer */
632 ret = iwl4965_alloc_dma_ptr(priv, &priv->kw, IWL_KW_SIZE);
633 if (ret) {
634 IWL_ERR(priv, "Keep Warm allocation failed\n");
635 goto error_kw;
636 }
637
638 /* allocate tx queue structure */
639 ret = iwl_legacy_alloc_txq_mem(priv);
640 if (ret)
641 goto error;
642
643 spin_lock_irqsave(&priv->lock, flags);
644
645 /* Turn off all Tx DMA fifos */
646 iwl4965_txq_set_sched(priv, 0);
647
648 /* Tell NIC where to find the "keep warm" buffer */
649 iwl_legacy_write_direct32(priv, FH_KW_MEM_ADDR_REG, priv->kw.dma >> 4);
650
651 spin_unlock_irqrestore(&priv->lock, flags);
652
653 /* Alloc and init all Tx queues, including the command queue (#4/#9) */
654 for (txq_id = 0; txq_id < priv->hw_params.max_txq_num; txq_id++) {
655 slots_num = (txq_id == priv->cmd_queue) ?
656 TFD_CMD_SLOTS : TFD_TX_CMD_SLOTS;
657 ret = iwl_legacy_tx_queue_init(priv,
658 &priv->txq[txq_id], slots_num,
659 txq_id);
660 if (ret) {
661 IWL_ERR(priv, "Tx %d queue init failed\n", txq_id);
662 goto error;
663 }
664 }
665
666 return ret;
667
668 error:
669 iwl4965_hw_txq_ctx_free(priv);
670 iwl4965_free_dma_ptr(priv, &priv->kw);
671 error_kw:
672 iwl4965_free_dma_ptr(priv, &priv->scd_bc_tbls);
673 error_bc_tbls:
674 return ret;
675}
676
677void iwl4965_txq_ctx_reset(struct iwl_priv *priv)
678{
679 int txq_id, slots_num;
680 unsigned long flags;
681
682 spin_lock_irqsave(&priv->lock, flags);
683
684 /* Turn off all Tx DMA fifos */
685 iwl4965_txq_set_sched(priv, 0);
686
687 /* Tell NIC where to find the "keep warm" buffer */
688 iwl_legacy_write_direct32(priv, FH_KW_MEM_ADDR_REG, priv->kw.dma >> 4);
689
690 spin_unlock_irqrestore(&priv->lock, flags);
691
692 /* Alloc and init all Tx queues, including the command queue (#4) */
693 for (txq_id = 0; txq_id < priv->hw_params.max_txq_num; txq_id++) {
694 slots_num = txq_id == priv->cmd_queue ?
695 TFD_CMD_SLOTS : TFD_TX_CMD_SLOTS;
696 iwl_legacy_tx_queue_reset(priv, &priv->txq[txq_id],
697 slots_num, txq_id);
698 }
699}
700
701/**
702 * iwl4965_txq_ctx_stop - Stop all Tx DMA channels
703 */
704void iwl4965_txq_ctx_stop(struct iwl_priv *priv)
705{
706 int ch, txq_id;
707 unsigned long flags;
708
709 /* Turn off all Tx DMA fifos */
710 spin_lock_irqsave(&priv->lock, flags);
711
712 iwl4965_txq_set_sched(priv, 0);
713
714 /* Stop each Tx DMA channel, and wait for it to be idle */
715 for (ch = 0; ch < priv->hw_params.dma_chnl_num; ch++) {
716 iwl_legacy_write_direct32(priv,
717 FH_TCSR_CHNL_TX_CONFIG_REG(ch), 0x0);
718 if (iwl_poll_direct_bit(priv, FH_TSSR_TX_STATUS_REG,
719 FH_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(ch),
720 1000))
721 IWL_ERR(priv, "Failing on timeout while stopping"
722 " DMA channel %d [0x%08x]", ch,
723 iwl_legacy_read_direct32(priv,
724 FH_TSSR_TX_STATUS_REG));
725 }
726 spin_unlock_irqrestore(&priv->lock, flags);
727
728 if (!priv->txq)
729 return;
730
731 /* Unmap DMA from host system and free skb's */
732 for (txq_id = 0; txq_id < priv->hw_params.max_txq_num; txq_id++)
733 if (txq_id == priv->cmd_queue)
734 iwl_legacy_cmd_queue_unmap(priv);
735 else
736 iwl_legacy_tx_queue_unmap(priv, txq_id);
737}
738
739/*
740 * Find first available (lowest unused) Tx Queue, mark it "active".
741 * Called only when finding queue for aggregation.
742 * Should never return anything < 7, because they should already
743 * be in use as EDCA AC (0-3), Command (4), reserved (5, 6)
744 */
745static int iwl4965_txq_ctx_activate_free(struct iwl_priv *priv)
746{
747 int txq_id;
748
749 for (txq_id = 0; txq_id < priv->hw_params.max_txq_num; txq_id++)
750 if (!test_and_set_bit(txq_id, &priv->txq_ctx_active_msk))
751 return txq_id;
752 return -1;
753}
754
755/**
756 * iwl4965_tx_queue_stop_scheduler - Stop queue, but keep configuration
757 */
758static void iwl4965_tx_queue_stop_scheduler(struct iwl_priv *priv,
759 u16 txq_id)
760{
761 /* Simply stop the queue, but don't change any configuration;
762 * the SCD_ACT_EN bit is the write-enable mask for the ACTIVE bit. */
763 iwl_legacy_write_prph(priv,
764 IWL49_SCD_QUEUE_STATUS_BITS(txq_id),
765 (0 << IWL49_SCD_QUEUE_STTS_REG_POS_ACTIVE)|
766 (1 << IWL49_SCD_QUEUE_STTS_REG_POS_SCD_ACT_EN));
767}
768
769/**
770 * iwl4965_tx_queue_set_q2ratid - Map unique receiver/tid combination to a queue
771 */
772static int iwl4965_tx_queue_set_q2ratid(struct iwl_priv *priv, u16 ra_tid,
773 u16 txq_id)
774{
775 u32 tbl_dw_addr;
776 u32 tbl_dw;
777 u16 scd_q2ratid;
778
779 scd_q2ratid = ra_tid & IWL_SCD_QUEUE_RA_TID_MAP_RATID_MSK;
780
781 tbl_dw_addr = priv->scd_base_addr +
782 IWL49_SCD_TRANSLATE_TBL_OFFSET_QUEUE(txq_id);
783
784 tbl_dw = iwl_legacy_read_targ_mem(priv, tbl_dw_addr);
785
786 if (txq_id & 0x1)
787 tbl_dw = (scd_q2ratid << 16) | (tbl_dw & 0x0000FFFF);
788 else
789 tbl_dw = scd_q2ratid | (tbl_dw & 0xFFFF0000);
790
791 iwl_legacy_write_targ_mem(priv, tbl_dw_addr, tbl_dw);
792
793 return 0;
794}
795
796/**
797 * iwl4965_tx_queue_agg_enable - Set up & enable aggregation for selected queue
798 *
799 * NOTE: txq_id must be greater than IWL49_FIRST_AMPDU_QUEUE,
800 * i.e. it must be one of the higher queues used for aggregation
801 */
802static int iwl4965_txq_agg_enable(struct iwl_priv *priv, int txq_id,
803 int tx_fifo, int sta_id, int tid, u16 ssn_idx)
804{
805 unsigned long flags;
806 u16 ra_tid;
807 int ret;
808
809 if ((IWL49_FIRST_AMPDU_QUEUE > txq_id) ||
810 (IWL49_FIRST_AMPDU_QUEUE +
811 priv->cfg->base_params->num_of_ampdu_queues <= txq_id)) {
812 IWL_WARN(priv,
813 "queue number out of range: %d, must be %d to %d\n",
814 txq_id, IWL49_FIRST_AMPDU_QUEUE,
815 IWL49_FIRST_AMPDU_QUEUE +
816 priv->cfg->base_params->num_of_ampdu_queues - 1);
817 return -EINVAL;
818 }
819
820 ra_tid = BUILD_RAxTID(sta_id, tid);
821
822 /* Modify device's station table to Tx this TID */
823 ret = iwl4965_sta_tx_modify_enable_tid(priv, sta_id, tid);
824 if (ret)
825 return ret;
826
827 spin_lock_irqsave(&priv->lock, flags);
828
829 /* Stop this Tx queue before configuring it */
830 iwl4965_tx_queue_stop_scheduler(priv, txq_id);
831
832 /* Map receiver-address / traffic-ID to this queue */
833 iwl4965_tx_queue_set_q2ratid(priv, ra_tid, txq_id);
834
835 /* Set this queue as a chain-building queue */
836 iwl_legacy_set_bits_prph(priv, IWL49_SCD_QUEUECHAIN_SEL, (1 << txq_id));
837
838 /* Place first TFD at index corresponding to start sequence number.
839 * Assumes that ssn_idx is valid (!= 0xFFF) */
840 priv->txq[txq_id].q.read_ptr = (ssn_idx & 0xff);
841 priv->txq[txq_id].q.write_ptr = (ssn_idx & 0xff);
842 iwl4965_set_wr_ptrs(priv, txq_id, ssn_idx);
843
844 /* Set up Tx window size and frame limit for this queue */
845 iwl_legacy_write_targ_mem(priv,
846 priv->scd_base_addr + IWL49_SCD_CONTEXT_QUEUE_OFFSET(txq_id),
847 (SCD_WIN_SIZE << IWL49_SCD_QUEUE_CTX_REG1_WIN_SIZE_POS) &
848 IWL49_SCD_QUEUE_CTX_REG1_WIN_SIZE_MSK);
849
850 iwl_legacy_write_targ_mem(priv, priv->scd_base_addr +
851 IWL49_SCD_CONTEXT_QUEUE_OFFSET(txq_id) + sizeof(u32),
852 (SCD_FRAME_LIMIT << IWL49_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS)
853 & IWL49_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK);
854
855 iwl_legacy_set_bits_prph(priv, IWL49_SCD_INTERRUPT_MASK, (1 << txq_id));
856
857 /* Set up Status area in SRAM, map to Tx DMA/FIFO, activate the queue */
858 iwl4965_tx_queue_set_status(priv, &priv->txq[txq_id], tx_fifo, 1);
859
860 spin_unlock_irqrestore(&priv->lock, flags);
861
862 return 0;
863}
864
865
866int iwl4965_tx_agg_start(struct iwl_priv *priv, struct ieee80211_vif *vif,
867 struct ieee80211_sta *sta, u16 tid, u16 *ssn)
868{
869 int sta_id;
870 int tx_fifo;
871 int txq_id;
872 int ret;
873 unsigned long flags;
874 struct iwl_tid_data *tid_data;
875
876 tx_fifo = iwl4965_get_fifo_from_tid(iwl_legacy_rxon_ctx_from_vif(vif), tid);
877 if (unlikely(tx_fifo < 0))
878 return tx_fifo;
879
880 IWL_WARN(priv, "%s on ra = %pM tid = %d\n",
881 __func__, sta->addr, tid);
882
883 sta_id = iwl_legacy_sta_id(sta);
884 if (sta_id == IWL_INVALID_STATION) {
885 IWL_ERR(priv, "Start AGG on invalid station\n");
886 return -ENXIO;
887 }
888 if (unlikely(tid >= MAX_TID_COUNT))
889 return -EINVAL;
890
891 if (priv->stations[sta_id].tid[tid].agg.state != IWL_AGG_OFF) {
892 IWL_ERR(priv, "Start AGG when state is not IWL_AGG_OFF !\n");
893 return -ENXIO;
894 }
895
896 txq_id = iwl4965_txq_ctx_activate_free(priv);
897 if (txq_id == -1) {
898 IWL_ERR(priv, "No free aggregation queue available\n");
899 return -ENXIO;
900 }
901
902 spin_lock_irqsave(&priv->sta_lock, flags);
903 tid_data = &priv->stations[sta_id].tid[tid];
904 *ssn = SEQ_TO_SN(tid_data->seq_number);
905 tid_data->agg.txq_id = txq_id;
906 iwl_legacy_set_swq_id(&priv->txq[txq_id],
907 iwl4965_get_ac_from_tid(tid), txq_id);
908 spin_unlock_irqrestore(&priv->sta_lock, flags);
909
910 ret = iwl4965_txq_agg_enable(priv, txq_id, tx_fifo,
911 sta_id, tid, *ssn);
912 if (ret)
913 return ret;
914
915 spin_lock_irqsave(&priv->sta_lock, flags);
916 tid_data = &priv->stations[sta_id].tid[tid];
917 if (tid_data->tfds_in_queue == 0) {
918 IWL_DEBUG_HT(priv, "HW queue is empty\n");
919 tid_data->agg.state = IWL_AGG_ON;
920 ieee80211_start_tx_ba_cb_irqsafe(vif, sta->addr, tid);
921 } else {
922 IWL_DEBUG_HT(priv,
923 "HW queue is NOT empty: %d packets in HW queue\n",
924 tid_data->tfds_in_queue);
925 tid_data->agg.state = IWL_EMPTYING_HW_QUEUE_ADDBA;
926 }
927 spin_unlock_irqrestore(&priv->sta_lock, flags);
928 return ret;
929}
930
931/**
932 * txq_id must be greater than IWL49_FIRST_AMPDU_QUEUE
933 * priv->lock must be held by the caller
934 */
935static int iwl4965_txq_agg_disable(struct iwl_priv *priv, u16 txq_id,
936 u16 ssn_idx, u8 tx_fifo)
937{
938 if ((IWL49_FIRST_AMPDU_QUEUE > txq_id) ||
939 (IWL49_FIRST_AMPDU_QUEUE +
940 priv->cfg->base_params->num_of_ampdu_queues <= txq_id)) {
941 IWL_WARN(priv,
942 "queue number out of range: %d, must be %d to %d\n",
943 txq_id, IWL49_FIRST_AMPDU_QUEUE,
944 IWL49_FIRST_AMPDU_QUEUE +
945 priv->cfg->base_params->num_of_ampdu_queues - 1);
946 return -EINVAL;
947 }
948
949 iwl4965_tx_queue_stop_scheduler(priv, txq_id);
950
951 iwl_legacy_clear_bits_prph(priv,
952 IWL49_SCD_QUEUECHAIN_SEL, (1 << txq_id));
953
954 priv->txq[txq_id].q.read_ptr = (ssn_idx & 0xff);
955 priv->txq[txq_id].q.write_ptr = (ssn_idx & 0xff);
956 /* supposes that ssn_idx is valid (!= 0xFFF) */
957 iwl4965_set_wr_ptrs(priv, txq_id, ssn_idx);
958
959 iwl_legacy_clear_bits_prph(priv,
960 IWL49_SCD_INTERRUPT_MASK, (1 << txq_id));
961 iwl_txq_ctx_deactivate(priv, txq_id);
962 iwl4965_tx_queue_set_status(priv, &priv->txq[txq_id], tx_fifo, 0);
963
964 return 0;
965}
966
967int iwl4965_tx_agg_stop(struct iwl_priv *priv, struct ieee80211_vif *vif,
968 struct ieee80211_sta *sta, u16 tid)
969{
970 int tx_fifo_id, txq_id, sta_id, ssn;
971 struct iwl_tid_data *tid_data;
972 int write_ptr, read_ptr;
973 unsigned long flags;
974
975 tx_fifo_id = iwl4965_get_fifo_from_tid(iwl_legacy_rxon_ctx_from_vif(vif), tid);
976 if (unlikely(tx_fifo_id < 0))
977 return tx_fifo_id;
978
979 sta_id = iwl_legacy_sta_id(sta);
980
981 if (sta_id == IWL_INVALID_STATION) {
982 IWL_ERR(priv, "Invalid station for AGG tid %d\n", tid);
983 return -ENXIO;
984 }
985
986 spin_lock_irqsave(&priv->sta_lock, flags);
987
988 tid_data = &priv->stations[sta_id].tid[tid];
989 ssn = (tid_data->seq_number & IEEE80211_SCTL_SEQ) >> 4;
990 txq_id = tid_data->agg.txq_id;
991
992 switch (priv->stations[sta_id].tid[tid].agg.state) {
993 case IWL_EMPTYING_HW_QUEUE_ADDBA:
994 /*
995 * This can happen if the peer stops aggregation
996 * again before we've had a chance to drain the
997 * queue we selected previously, i.e. before the
998 * session was really started completely.
999 */
1000 IWL_DEBUG_HT(priv, "AGG stop before setup done\n");
1001 goto turn_off;
1002 case IWL_AGG_ON:
1003 break;
1004 default:
1005 IWL_WARN(priv, "Stopping AGG while state not ON or starting\n");
1006 }
1007
1008 write_ptr = priv->txq[txq_id].q.write_ptr;
1009 read_ptr = priv->txq[txq_id].q.read_ptr;
1010
1011 /* The queue is not empty */
1012 if (write_ptr != read_ptr) {
1013 IWL_DEBUG_HT(priv, "Stopping a non empty AGG HW QUEUE\n");
1014 priv->stations[sta_id].tid[tid].agg.state =
1015 IWL_EMPTYING_HW_QUEUE_DELBA;
1016 spin_unlock_irqrestore(&priv->sta_lock, flags);
1017 return 0;
1018 }
1019
1020 IWL_DEBUG_HT(priv, "HW queue is empty\n");
1021 turn_off:
1022 priv->stations[sta_id].tid[tid].agg.state = IWL_AGG_OFF;
1023
1024 /* do not restore/save irqs */
1025 spin_unlock(&priv->sta_lock);
1026 spin_lock(&priv->lock);
1027
1028 /*
1029 * the only reason this call can fail is queue number out of range,
1030 * which can happen if uCode is reloaded and all the station
1031 * information are lost. if it is outside the range, there is no need
1032 * to deactivate the uCode queue, just return "success" to allow
1033 * mac80211 to clean up it own data.
1034 */
1035 iwl4965_txq_agg_disable(priv, txq_id, ssn, tx_fifo_id);
1036 spin_unlock_irqrestore(&priv->lock, flags);
1037
1038 ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid);
1039
1040 return 0;
1041}
1042
1043int iwl4965_txq_check_empty(struct iwl_priv *priv,
1044 int sta_id, u8 tid, int txq_id)
1045{
1046 struct iwl_queue *q = &priv->txq[txq_id].q;
1047 u8 *addr = priv->stations[sta_id].sta.sta.addr;
1048 struct iwl_tid_data *tid_data = &priv->stations[sta_id].tid[tid];
1049 struct iwl_rxon_context *ctx;
1050
1051 ctx = &priv->contexts[priv->stations[sta_id].ctxid];
1052
1053 lockdep_assert_held(&priv->sta_lock);
1054
1055 switch (priv->stations[sta_id].tid[tid].agg.state) {
1056 case IWL_EMPTYING_HW_QUEUE_DELBA:
1057 /* We are reclaiming the last packet of the */
1058 /* aggregated HW queue */
1059 if ((txq_id == tid_data->agg.txq_id) &&
1060 (q->read_ptr == q->write_ptr)) {
1061 u16 ssn = SEQ_TO_SN(tid_data->seq_number);
1062 int tx_fifo = iwl4965_get_fifo_from_tid(ctx, tid);
1063 IWL_DEBUG_HT(priv,
1064 "HW queue empty: continue DELBA flow\n");
1065 iwl4965_txq_agg_disable(priv, txq_id, ssn, tx_fifo);
1066 tid_data->agg.state = IWL_AGG_OFF;
1067 ieee80211_stop_tx_ba_cb_irqsafe(ctx->vif, addr, tid);
1068 }
1069 break;
1070 case IWL_EMPTYING_HW_QUEUE_ADDBA:
1071 /* We are reclaiming the last packet of the queue */
1072 if (tid_data->tfds_in_queue == 0) {
1073 IWL_DEBUG_HT(priv,
1074 "HW queue empty: continue ADDBA flow\n");
1075 tid_data->agg.state = IWL_AGG_ON;
1076 ieee80211_start_tx_ba_cb_irqsafe(ctx->vif, addr, tid);
1077 }
1078 break;
1079 }
1080
1081 return 0;
1082}
1083
1084static void iwl4965_non_agg_tx_status(struct iwl_priv *priv,
1085 struct iwl_rxon_context *ctx,
1086 const u8 *addr1)
1087{
1088 struct ieee80211_sta *sta;
1089 struct iwl_station_priv *sta_priv;
1090
1091 rcu_read_lock();
1092 sta = ieee80211_find_sta(ctx->vif, addr1);
1093 if (sta) {
1094 sta_priv = (void *)sta->drv_priv;
1095 /* avoid atomic ops if this isn't a client */
1096 if (sta_priv->client &&
1097 atomic_dec_return(&sta_priv->pending_frames) == 0)
1098 ieee80211_sta_block_awake(priv->hw, sta, false);
1099 }
1100 rcu_read_unlock();
1101}
1102
1103static void
1104iwl4965_tx_status(struct iwl_priv *priv, struct iwl_tx_info *tx_info,
1105 bool is_agg)
1106{
1107 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) tx_info->skb->data;
1108
1109 if (!is_agg)
1110 iwl4965_non_agg_tx_status(priv, tx_info->ctx, hdr->addr1);
1111
1112 ieee80211_tx_status_irqsafe(priv->hw, tx_info->skb);
1113}
1114
1115int iwl4965_tx_queue_reclaim(struct iwl_priv *priv, int txq_id, int index)
1116{
1117 struct iwl_tx_queue *txq = &priv->txq[txq_id];
1118 struct iwl_queue *q = &txq->q;
1119 struct iwl_tx_info *tx_info;
1120 int nfreed = 0;
1121 struct ieee80211_hdr *hdr;
1122
1123 if ((index >= q->n_bd) || (iwl_legacy_queue_used(q, index) == 0)) {
1124 IWL_ERR(priv, "Read index for DMA queue txq id (%d), index %d, "
1125 "is out of range [0-%d] %d %d.\n", txq_id,
1126 index, q->n_bd, q->write_ptr, q->read_ptr);
1127 return 0;
1128 }
1129
1130 for (index = iwl_legacy_queue_inc_wrap(index, q->n_bd);
1131 q->read_ptr != index;
1132 q->read_ptr = iwl_legacy_queue_inc_wrap(q->read_ptr, q->n_bd)) {
1133
1134 tx_info = &txq->txb[txq->q.read_ptr];
1135
1136 if (WARN_ON_ONCE(tx_info->skb == NULL))
1137 continue;
1138
1139 hdr = (struct ieee80211_hdr *)tx_info->skb->data;
1140 if (ieee80211_is_data_qos(hdr->frame_control))
1141 nfreed++;
1142
1143 iwl4965_tx_status(priv, tx_info,
1144 txq_id >= IWL4965_FIRST_AMPDU_QUEUE);
1145 tx_info->skb = NULL;
1146
1147 priv->cfg->ops->lib->txq_free_tfd(priv, txq);
1148 }
1149 return nfreed;
1150}
1151
1152/**
1153 * iwl4965_tx_status_reply_compressed_ba - Update tx status from block-ack
1154 *
1155 * Go through block-ack's bitmap of ACK'd frames, update driver's record of
1156 * ACK vs. not. This gets sent to mac80211, then to rate scaling algo.
1157 */
1158static int iwl4965_tx_status_reply_compressed_ba(struct iwl_priv *priv,
1159 struct iwl_ht_agg *agg,
1160 struct iwl_compressed_ba_resp *ba_resp)
1161
1162{
1163 int i, sh, ack;
1164 u16 seq_ctl = le16_to_cpu(ba_resp->seq_ctl);
1165 u16 scd_flow = le16_to_cpu(ba_resp->scd_flow);
1166 int successes = 0;
1167 struct ieee80211_tx_info *info;
1168 u64 bitmap, sent_bitmap;
1169
1170 if (unlikely(!agg->wait_for_ba)) {
1171 if (unlikely(ba_resp->bitmap))
1172 IWL_ERR(priv, "Received BA when not expected\n");
1173 return -EINVAL;
1174 }
1175
1176 /* Mark that the expected block-ack response arrived */
1177 agg->wait_for_ba = 0;
1178 IWL_DEBUG_TX_REPLY(priv, "BA %d %d\n", agg->start_idx,
1179 ba_resp->seq_ctl);
1180
1181 /* Calculate shift to align block-ack bits with our Tx window bits */
1182 sh = agg->start_idx - SEQ_TO_INDEX(seq_ctl >> 4);
1183 if (sh < 0) /* tbw something is wrong with indices */
1184 sh += 0x100;
1185
1186 if (agg->frame_count > (64 - sh)) {
1187 IWL_DEBUG_TX_REPLY(priv, "more frames than bitmap size");
1188 return -1;
1189 }
1190
1191 /* don't use 64-bit values for now */
1192 bitmap = le64_to_cpu(ba_resp->bitmap) >> sh;
1193
1194 /* check for success or failure according to the
1195 * transmitted bitmap and block-ack bitmap */
1196 sent_bitmap = bitmap & agg->bitmap;
1197
1198 /* For each frame attempted in aggregation,
1199 * update driver's record of tx frame's status. */
1200 i = 0;
1201 while (sent_bitmap) {
1202 ack = sent_bitmap & 1ULL;
1203 successes += ack;
1204 IWL_DEBUG_TX_REPLY(priv, "%s ON i=%d idx=%d raw=%d\n",
1205 ack ? "ACK" : "NACK", i,
1206 (agg->start_idx + i) & 0xff,
1207 agg->start_idx + i);
1208 sent_bitmap >>= 1;
1209 ++i;
1210 }
1211
1212 IWL_DEBUG_TX_REPLY(priv, "Bitmap %llx\n",
1213 (unsigned long long)bitmap);
1214
1215 info = IEEE80211_SKB_CB(priv->txq[scd_flow].txb[agg->start_idx].skb);
1216 memset(&info->status, 0, sizeof(info->status));
1217 info->flags |= IEEE80211_TX_STAT_ACK;
1218 info->flags |= IEEE80211_TX_STAT_AMPDU;
1219 info->status.ampdu_ack_len = successes;
1220 info->status.ampdu_len = agg->frame_count;
1221 iwl4965_hwrate_to_tx_control(priv, agg->rate_n_flags, info);
1222
1223 return 0;
1224}
1225
1226/**
1227 * translate ucode response to mac80211 tx status control values
1228 */
1229void iwl4965_hwrate_to_tx_control(struct iwl_priv *priv, u32 rate_n_flags,
1230 struct ieee80211_tx_info *info)
1231{
1232 struct ieee80211_tx_rate *r = &info->control.rates[0];
1233
1234 info->antenna_sel_tx =
1235 ((rate_n_flags & RATE_MCS_ANT_ABC_MSK) >> RATE_MCS_ANT_POS);
1236 if (rate_n_flags & RATE_MCS_HT_MSK)
1237 r->flags |= IEEE80211_TX_RC_MCS;
1238 if (rate_n_flags & RATE_MCS_GF_MSK)
1239 r->flags |= IEEE80211_TX_RC_GREEN_FIELD;
1240 if (rate_n_flags & RATE_MCS_HT40_MSK)
1241 r->flags |= IEEE80211_TX_RC_40_MHZ_WIDTH;
1242 if (rate_n_flags & RATE_MCS_DUP_MSK)
1243 r->flags |= IEEE80211_TX_RC_DUP_DATA;
1244 if (rate_n_flags & RATE_MCS_SGI_MSK)
1245 r->flags |= IEEE80211_TX_RC_SHORT_GI;
1246 r->idx = iwl4965_hwrate_to_mac80211_idx(rate_n_flags, info->band);
1247}
1248
1249/**
1250 * iwl4965_rx_reply_compressed_ba - Handler for REPLY_COMPRESSED_BA
1251 *
1252 * Handles block-acknowledge notification from device, which reports success
1253 * of frames sent via aggregation.
1254 */
1255void iwl4965_rx_reply_compressed_ba(struct iwl_priv *priv,
1256 struct iwl_rx_mem_buffer *rxb)
1257{
1258 struct iwl_rx_packet *pkt = rxb_addr(rxb);
1259 struct iwl_compressed_ba_resp *ba_resp = &pkt->u.compressed_ba;
1260 struct iwl_tx_queue *txq = NULL;
1261 struct iwl_ht_agg *agg;
1262 int index;
1263 int sta_id;
1264 int tid;
1265 unsigned long flags;
1266
1267 /* "flow" corresponds to Tx queue */
1268 u16 scd_flow = le16_to_cpu(ba_resp->scd_flow);
1269
1270 /* "ssn" is start of block-ack Tx window, corresponds to index
1271 * (in Tx queue's circular buffer) of first TFD/frame in window */
1272 u16 ba_resp_scd_ssn = le16_to_cpu(ba_resp->scd_ssn);
1273
1274 if (scd_flow >= priv->hw_params.max_txq_num) {
1275 IWL_ERR(priv,
1276 "BUG_ON scd_flow is bigger than number of queues\n");
1277 return;
1278 }
1279
1280 txq = &priv->txq[scd_flow];
1281 sta_id = ba_resp->sta_id;
1282 tid = ba_resp->tid;
1283 agg = &priv->stations[sta_id].tid[tid].agg;
1284 if (unlikely(agg->txq_id != scd_flow)) {
1285 /*
1286 * FIXME: this is a uCode bug which need to be addressed,
1287 * log the information and return for now!
1288 * since it is possible happen very often and in order
1289 * not to fill the syslog, don't enable the logging by default
1290 */
1291 IWL_DEBUG_TX_REPLY(priv,
1292 "BA scd_flow %d does not match txq_id %d\n",
1293 scd_flow, agg->txq_id);
1294 return;
1295 }
1296
1297 /* Find index just before block-ack window */
1298 index = iwl_legacy_queue_dec_wrap(ba_resp_scd_ssn & 0xff, txq->q.n_bd);
1299
1300 spin_lock_irqsave(&priv->sta_lock, flags);
1301
1302 IWL_DEBUG_TX_REPLY(priv, "REPLY_COMPRESSED_BA [%d] Received from %pM, "
1303 "sta_id = %d\n",
1304 agg->wait_for_ba,
1305 (u8 *) &ba_resp->sta_addr_lo32,
1306 ba_resp->sta_id);
1307 IWL_DEBUG_TX_REPLY(priv, "TID = %d, SeqCtl = %d, bitmap = 0x%llx,"
1308 "scd_flow = "
1309 "%d, scd_ssn = %d\n",
1310 ba_resp->tid,
1311 ba_resp->seq_ctl,
1312 (unsigned long long)le64_to_cpu(ba_resp->bitmap),
1313 ba_resp->scd_flow,
1314 ba_resp->scd_ssn);
1315 IWL_DEBUG_TX_REPLY(priv, "DAT start_idx = %d, bitmap = 0x%llx\n",
1316 agg->start_idx,
1317 (unsigned long long)agg->bitmap);
1318
1319 /* Update driver's record of ACK vs. not for each frame in window */
1320 iwl4965_tx_status_reply_compressed_ba(priv, agg, ba_resp);
1321
1322 /* Release all TFDs before the SSN, i.e. all TFDs in front of
1323 * block-ack window (we assume that they've been successfully
1324 * transmitted ... if not, it's too late anyway). */
1325 if (txq->q.read_ptr != (ba_resp_scd_ssn & 0xff)) {
1326 /* calculate mac80211 ampdu sw queue to wake */
1327 int freed = iwl4965_tx_queue_reclaim(priv, scd_flow, index);
1328 iwl4965_free_tfds_in_queue(priv, sta_id, tid, freed);
1329
1330 if ((iwl_legacy_queue_space(&txq->q) > txq->q.low_mark) &&
1331 priv->mac80211_registered &&
1332 (agg->state != IWL_EMPTYING_HW_QUEUE_DELBA))
1333 iwl_legacy_wake_queue(priv, txq);
1334
1335 iwl4965_txq_check_empty(priv, sta_id, tid, scd_flow);
1336 }
1337
1338 spin_unlock_irqrestore(&priv->sta_lock, flags);
1339}
1340
1341#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
1342const char *iwl4965_get_tx_fail_reason(u32 status)
1343{
1344#define TX_STATUS_FAIL(x) case TX_STATUS_FAIL_ ## x: return #x
1345#define TX_STATUS_POSTPONE(x) case TX_STATUS_POSTPONE_ ## x: return #x
1346
1347 switch (status & TX_STATUS_MSK) {
1348 case TX_STATUS_SUCCESS:
1349 return "SUCCESS";
1350 TX_STATUS_POSTPONE(DELAY);
1351 TX_STATUS_POSTPONE(FEW_BYTES);
1352 TX_STATUS_POSTPONE(QUIET_PERIOD);
1353 TX_STATUS_POSTPONE(CALC_TTAK);
1354 TX_STATUS_FAIL(INTERNAL_CROSSED_RETRY);
1355 TX_STATUS_FAIL(SHORT_LIMIT);
1356 TX_STATUS_FAIL(LONG_LIMIT);
1357 TX_STATUS_FAIL(FIFO_UNDERRUN);
1358 TX_STATUS_FAIL(DRAIN_FLOW);
1359 TX_STATUS_FAIL(RFKILL_FLUSH);
1360 TX_STATUS_FAIL(LIFE_EXPIRE);
1361 TX_STATUS_FAIL(DEST_PS);
1362 TX_STATUS_FAIL(HOST_ABORTED);
1363 TX_STATUS_FAIL(BT_RETRY);
1364 TX_STATUS_FAIL(STA_INVALID);
1365 TX_STATUS_FAIL(FRAG_DROPPED);
1366 TX_STATUS_FAIL(TID_DISABLE);
1367 TX_STATUS_FAIL(FIFO_FLUSHED);
1368 TX_STATUS_FAIL(INSUFFICIENT_CF_POLL);
1369 TX_STATUS_FAIL(PASSIVE_NO_RX);
1370 TX_STATUS_FAIL(NO_BEACON_ON_RADAR);
1371 }
1372
1373 return "UNKNOWN";
1374
1375#undef TX_STATUS_FAIL
1376#undef TX_STATUS_POSTPONE
1377}
1378#endif /* CONFIG_IWLWIFI_LEGACY_DEBUG */
diff --git a/drivers/net/wireless/iwlegacy/iwl-4965-ucode.c b/drivers/net/wireless/iwlegacy/iwl-4965-ucode.c
new file mode 100644
index 00000000000..001d148feb9
--- /dev/null
+++ b/drivers/net/wireless/iwlegacy/iwl-4965-ucode.c
@@ -0,0 +1,166 @@
1/******************************************************************************
2 *
3 * GPL LICENSE SUMMARY
4 *
5 * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of version 2 of the GNU General Public License as
9 * published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful, but
12 * WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
19 * USA
20 *
21 * The full GNU General Public License is included in this distribution
22 * in the file called LICENSE.GPL.
23 *
24 * Contact Information:
25 * Intel Linux Wireless <ilw@linux.intel.com>
26 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
27 *
28 *****************************************************************************/
29
30#include <linux/kernel.h>
31#include <linux/module.h>
32#include <linux/init.h>
33#include <linux/sched.h>
34
35#include "iwl-dev.h"
36#include "iwl-core.h"
37#include "iwl-io.h"
38#include "iwl-helpers.h"
39#include "iwl-4965-hw.h"
40#include "iwl-4965.h"
41#include "iwl-4965-calib.h"
42
43#define IWL_AC_UNSET -1
44
45/**
46 * iwl_verify_inst_sparse - verify runtime uCode image in card vs. host,
47 * using sample data 100 bytes apart. If these sample points are good,
48 * it's a pretty good bet that everything between them is good, too.
49 */
50static int
51iwl4965_verify_inst_sparse(struct iwl_priv *priv, __le32 *image, u32 len)
52{
53 u32 val;
54 int ret = 0;
55 u32 errcnt = 0;
56 u32 i;
57
58 IWL_DEBUG_INFO(priv, "ucode inst image size is %u\n", len);
59
60 for (i = 0; i < len; i += 100, image += 100/sizeof(u32)) {
61 /* read data comes through single port, auto-incr addr */
62 /* NOTE: Use the debugless read so we don't flood kernel log
63 * if IWL_DL_IO is set */
64 iwl_legacy_write_direct32(priv, HBUS_TARG_MEM_RADDR,
65 i + IWL4965_RTC_INST_LOWER_BOUND);
66 val = _iwl_legacy_read_direct32(priv, HBUS_TARG_MEM_RDAT);
67 if (val != le32_to_cpu(*image)) {
68 ret = -EIO;
69 errcnt++;
70 if (errcnt >= 3)
71 break;
72 }
73 }
74
75 return ret;
76}
77
78/**
79 * iwl4965_verify_inst_full - verify runtime uCode image in card vs. host,
80 * looking at all data.
81 */
82static int iwl4965_verify_inst_full(struct iwl_priv *priv, __le32 *image,
83 u32 len)
84{
85 u32 val;
86 u32 save_len = len;
87 int ret = 0;
88 u32 errcnt;
89
90 IWL_DEBUG_INFO(priv, "ucode inst image size is %u\n", len);
91
92 iwl_legacy_write_direct32(priv, HBUS_TARG_MEM_RADDR,
93 IWL4965_RTC_INST_LOWER_BOUND);
94
95 errcnt = 0;
96 for (; len > 0; len -= sizeof(u32), image++) {
97 /* read data comes through single port, auto-incr addr */
98 /* NOTE: Use the debugless read so we don't flood kernel log
99 * if IWL_DL_IO is set */
100 val = _iwl_legacy_read_direct32(priv, HBUS_TARG_MEM_RDAT);
101 if (val != le32_to_cpu(*image)) {
102 IWL_ERR(priv, "uCode INST section is invalid at "
103 "offset 0x%x, is 0x%x, s/b 0x%x\n",
104 save_len - len, val, le32_to_cpu(*image));
105 ret = -EIO;
106 errcnt++;
107 if (errcnt >= 20)
108 break;
109 }
110 }
111
112 if (!errcnt)
113 IWL_DEBUG_INFO(priv,
114 "ucode image in INSTRUCTION memory is good\n");
115
116 return ret;
117}
118
119/**
120 * iwl4965_verify_ucode - determine which instruction image is in SRAM,
121 * and verify its contents
122 */
123int iwl4965_verify_ucode(struct iwl_priv *priv)
124{
125 __le32 *image;
126 u32 len;
127 int ret;
128
129 /* Try bootstrap */
130 image = (__le32 *)priv->ucode_boot.v_addr;
131 len = priv->ucode_boot.len;
132 ret = iwl4965_verify_inst_sparse(priv, image, len);
133 if (!ret) {
134 IWL_DEBUG_INFO(priv, "Bootstrap uCode is good in inst SRAM\n");
135 return 0;
136 }
137
138 /* Try initialize */
139 image = (__le32 *)priv->ucode_init.v_addr;
140 len = priv->ucode_init.len;
141 ret = iwl4965_verify_inst_sparse(priv, image, len);
142 if (!ret) {
143 IWL_DEBUG_INFO(priv, "Initialize uCode is good in inst SRAM\n");
144 return 0;
145 }
146
147 /* Try runtime/protocol */
148 image = (__le32 *)priv->ucode_code.v_addr;
149 len = priv->ucode_code.len;
150 ret = iwl4965_verify_inst_sparse(priv, image, len);
151 if (!ret) {
152 IWL_DEBUG_INFO(priv, "Runtime uCode is good in inst SRAM\n");
153 return 0;
154 }
155
156 IWL_ERR(priv, "NO VALID UCODE IMAGE IN INSTRUCTION SRAM!!\n");
157
158 /* Since nothing seems to match, show first several data entries in
159 * instruction SRAM, so maybe visual inspection will give a clue.
160 * Selection of bootstrap image (vs. other images) is arbitrary. */
161 image = (__le32 *)priv->ucode_boot.v_addr;
162 len = priv->ucode_boot.len;
163 ret = iwl4965_verify_inst_full(priv, image, len);
164
165 return ret;
166}
diff --git a/drivers/net/wireless/iwlegacy/iwl-4965.c b/drivers/net/wireless/iwlegacy/iwl-4965.c
new file mode 100644
index 00000000000..ecdc6e55742
--- /dev/null
+++ b/drivers/net/wireless/iwlegacy/iwl-4965.c
@@ -0,0 +1,2184 @@
1/******************************************************************************
2 *
3 * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of version 2 of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
17 *
18 * The full GNU General Public License is included in this distribution in the
19 * file called LICENSE.
20 *
21 * Contact Information:
22 * Intel Linux Wireless <ilw@linux.intel.com>
23 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
24 *
25 *****************************************************************************/
26
27#include <linux/kernel.h>
28#include <linux/module.h>
29#include <linux/init.h>
30#include <linux/pci.h>
31#include <linux/dma-mapping.h>
32#include <linux/delay.h>
33#include <linux/sched.h>
34#include <linux/skbuff.h>
35#include <linux/netdevice.h>
36#include <linux/wireless.h>
37#include <net/mac80211.h>
38#include <linux/etherdevice.h>
39#include <asm/unaligned.h>
40
41#include "iwl-eeprom.h"
42#include "iwl-dev.h"
43#include "iwl-core.h"
44#include "iwl-io.h"
45#include "iwl-helpers.h"
46#include "iwl-4965-calib.h"
47#include "iwl-sta.h"
48#include "iwl-4965-led.h"
49#include "iwl-4965.h"
50#include "iwl-4965-debugfs.h"
51
52static int iwl4965_send_tx_power(struct iwl_priv *priv);
53static int iwl4965_hw_get_temperature(struct iwl_priv *priv);
54
55/* Highest firmware API version supported */
56#define IWL4965_UCODE_API_MAX 2
57
58/* Lowest firmware API version supported */
59#define IWL4965_UCODE_API_MIN 2
60
61#define IWL4965_FW_PRE "iwlwifi-4965-"
62#define _IWL4965_MODULE_FIRMWARE(api) IWL4965_FW_PRE #api ".ucode"
63#define IWL4965_MODULE_FIRMWARE(api) _IWL4965_MODULE_FIRMWARE(api)
64
65/* check contents of special bootstrap uCode SRAM */
66static int iwl4965_verify_bsm(struct iwl_priv *priv)
67{
68 __le32 *image = priv->ucode_boot.v_addr;
69 u32 len = priv->ucode_boot.len;
70 u32 reg;
71 u32 val;
72
73 IWL_DEBUG_INFO(priv, "Begin verify bsm\n");
74
75 /* verify BSM SRAM contents */
76 val = iwl_legacy_read_prph(priv, BSM_WR_DWCOUNT_REG);
77 for (reg = BSM_SRAM_LOWER_BOUND;
78 reg < BSM_SRAM_LOWER_BOUND + len;
79 reg += sizeof(u32), image++) {
80 val = iwl_legacy_read_prph(priv, reg);
81 if (val != le32_to_cpu(*image)) {
82 IWL_ERR(priv, "BSM uCode verification failed at "
83 "addr 0x%08X+%u (of %u), is 0x%x, s/b 0x%x\n",
84 BSM_SRAM_LOWER_BOUND,
85 reg - BSM_SRAM_LOWER_BOUND, len,
86 val, le32_to_cpu(*image));
87 return -EIO;
88 }
89 }
90
91 IWL_DEBUG_INFO(priv, "BSM bootstrap uCode image OK\n");
92
93 return 0;
94}
95
96/**
97 * iwl4965_load_bsm - Load bootstrap instructions
98 *
99 * BSM operation:
100 *
101 * The Bootstrap State Machine (BSM) stores a short bootstrap uCode program
102 * in special SRAM that does not power down during RFKILL. When powering back
103 * up after power-saving sleeps (or during initial uCode load), the BSM loads
104 * the bootstrap program into the on-board processor, and starts it.
105 *
106 * The bootstrap program loads (via DMA) instructions and data for a new
107 * program from host DRAM locations indicated by the host driver in the
108 * BSM_DRAM_* registers. Once the new program is loaded, it starts
109 * automatically.
110 *
111 * When initializing the NIC, the host driver points the BSM to the
112 * "initialize" uCode image. This uCode sets up some internal data, then
113 * notifies host via "initialize alive" that it is complete.
114 *
115 * The host then replaces the BSM_DRAM_* pointer values to point to the
116 * normal runtime uCode instructions and a backup uCode data cache buffer
117 * (filled initially with starting data values for the on-board processor),
118 * then triggers the "initialize" uCode to load and launch the runtime uCode,
119 * which begins normal operation.
120 *
121 * When doing a power-save shutdown, runtime uCode saves data SRAM into
122 * the backup data cache in DRAM before SRAM is powered down.
123 *
124 * When powering back up, the BSM loads the bootstrap program. This reloads
125 * the runtime uCode instructions and the backup data cache into SRAM,
126 * and re-launches the runtime uCode from where it left off.
127 */
128static int iwl4965_load_bsm(struct iwl_priv *priv)
129{
130 __le32 *image = priv->ucode_boot.v_addr;
131 u32 len = priv->ucode_boot.len;
132 dma_addr_t pinst;
133 dma_addr_t pdata;
134 u32 inst_len;
135 u32 data_len;
136 int i;
137 u32 done;
138 u32 reg_offset;
139 int ret;
140
141 IWL_DEBUG_INFO(priv, "Begin load bsm\n");
142
143 priv->ucode_type = UCODE_RT;
144
145 /* make sure bootstrap program is no larger than BSM's SRAM size */
146 if (len > IWL49_MAX_BSM_SIZE)
147 return -EINVAL;
148
149 /* Tell bootstrap uCode where to find the "Initialize" uCode
150 * in host DRAM ... host DRAM physical address bits 35:4 for 4965.
151 * NOTE: iwl_init_alive_start() will replace these values,
152 * after the "initialize" uCode has run, to point to
153 * runtime/protocol instructions and backup data cache.
154 */
155 pinst = priv->ucode_init.p_addr >> 4;
156 pdata = priv->ucode_init_data.p_addr >> 4;
157 inst_len = priv->ucode_init.len;
158 data_len = priv->ucode_init_data.len;
159
160 iwl_legacy_write_prph(priv, BSM_DRAM_INST_PTR_REG, pinst);
161 iwl_legacy_write_prph(priv, BSM_DRAM_DATA_PTR_REG, pdata);
162 iwl_legacy_write_prph(priv, BSM_DRAM_INST_BYTECOUNT_REG, inst_len);
163 iwl_legacy_write_prph(priv, BSM_DRAM_DATA_BYTECOUNT_REG, data_len);
164
165 /* Fill BSM memory with bootstrap instructions */
166 for (reg_offset = BSM_SRAM_LOWER_BOUND;
167 reg_offset < BSM_SRAM_LOWER_BOUND + len;
168 reg_offset += sizeof(u32), image++)
169 _iwl_legacy_write_prph(priv, reg_offset, le32_to_cpu(*image));
170
171 ret = iwl4965_verify_bsm(priv);
172 if (ret)
173 return ret;
174
175 /* Tell BSM to copy from BSM SRAM into instruction SRAM, when asked */
176 iwl_legacy_write_prph(priv, BSM_WR_MEM_SRC_REG, 0x0);
177 iwl_legacy_write_prph(priv,
178 BSM_WR_MEM_DST_REG, IWL49_RTC_INST_LOWER_BOUND);
179 iwl_legacy_write_prph(priv, BSM_WR_DWCOUNT_REG, len / sizeof(u32));
180
181 /* Load bootstrap code into instruction SRAM now,
182 * to prepare to load "initialize" uCode */
183 iwl_legacy_write_prph(priv, BSM_WR_CTRL_REG, BSM_WR_CTRL_REG_BIT_START);
184
185 /* Wait for load of bootstrap uCode to finish */
186 for (i = 0; i < 100; i++) {
187 done = iwl_legacy_read_prph(priv, BSM_WR_CTRL_REG);
188 if (!(done & BSM_WR_CTRL_REG_BIT_START))
189 break;
190 udelay(10);
191 }
192 if (i < 100)
193 IWL_DEBUG_INFO(priv, "BSM write complete, poll %d iterations\n", i);
194 else {
195 IWL_ERR(priv, "BSM write did not complete!\n");
196 return -EIO;
197 }
198
199 /* Enable future boot loads whenever power management unit triggers it
200 * (e.g. when powering back up after power-save shutdown) */
201 iwl_legacy_write_prph(priv,
202 BSM_WR_CTRL_REG, BSM_WR_CTRL_REG_BIT_START_EN);
203
204
205 return 0;
206}
207
208/**
209 * iwl4965_set_ucode_ptrs - Set uCode address location
210 *
211 * Tell initialization uCode where to find runtime uCode.
212 *
213 * BSM registers initially contain pointers to initialization uCode.
214 * We need to replace them to load runtime uCode inst and data,
215 * and to save runtime data when powering down.
216 */
217static int iwl4965_set_ucode_ptrs(struct iwl_priv *priv)
218{
219 dma_addr_t pinst;
220 dma_addr_t pdata;
221 int ret = 0;
222
223 /* bits 35:4 for 4965 */
224 pinst = priv->ucode_code.p_addr >> 4;
225 pdata = priv->ucode_data_backup.p_addr >> 4;
226
227 /* Tell bootstrap uCode where to find image to load */
228 iwl_legacy_write_prph(priv, BSM_DRAM_INST_PTR_REG, pinst);
229 iwl_legacy_write_prph(priv, BSM_DRAM_DATA_PTR_REG, pdata);
230 iwl_legacy_write_prph(priv, BSM_DRAM_DATA_BYTECOUNT_REG,
231 priv->ucode_data.len);
232
233 /* Inst byte count must be last to set up, bit 31 signals uCode
234 * that all new ptr/size info is in place */
235 iwl_legacy_write_prph(priv, BSM_DRAM_INST_BYTECOUNT_REG,
236 priv->ucode_code.len | BSM_DRAM_INST_LOAD);
237 IWL_DEBUG_INFO(priv, "Runtime uCode pointers are set.\n");
238
239 return ret;
240}
241
242/**
243 * iwl4965_init_alive_start - Called after REPLY_ALIVE notification received
244 *
245 * Called after REPLY_ALIVE notification received from "initialize" uCode.
246 *
247 * The 4965 "initialize" ALIVE reply contains calibration data for:
248 * Voltage, temperature, and MIMO tx gain correction, now stored in priv
249 * (3945 does not contain this data).
250 *
251 * Tell "initialize" uCode to go ahead and load the runtime uCode.
252*/
253static void iwl4965_init_alive_start(struct iwl_priv *priv)
254{
255 /* Bootstrap uCode has loaded initialize uCode ... verify inst image.
256 * This is a paranoid check, because we would not have gotten the
257 * "initialize" alive if code weren't properly loaded. */
258 if (iwl4965_verify_ucode(priv)) {
259 /* Runtime instruction load was bad;
260 * take it all the way back down so we can try again */
261 IWL_DEBUG_INFO(priv, "Bad \"initialize\" uCode load.\n");
262 goto restart;
263 }
264
265 /* Calculate temperature */
266 priv->temperature = iwl4965_hw_get_temperature(priv);
267
268 /* Send pointers to protocol/runtime uCode image ... init code will
269 * load and launch runtime uCode, which will send us another "Alive"
270 * notification. */
271 IWL_DEBUG_INFO(priv, "Initialization Alive received.\n");
272 if (iwl4965_set_ucode_ptrs(priv)) {
273 /* Runtime instruction load won't happen;
274 * take it all the way back down so we can try again */
275 IWL_DEBUG_INFO(priv, "Couldn't set up uCode pointers.\n");
276 goto restart;
277 }
278 return;
279
280restart:
281 queue_work(priv->workqueue, &priv->restart);
282}
283
284static bool iw4965_is_ht40_channel(__le32 rxon_flags)
285{
286 int chan_mod = le32_to_cpu(rxon_flags & RXON_FLG_CHANNEL_MODE_MSK)
287 >> RXON_FLG_CHANNEL_MODE_POS;
288 return ((chan_mod == CHANNEL_MODE_PURE_40) ||
289 (chan_mod == CHANNEL_MODE_MIXED));
290}
291
292static void iwl4965_nic_config(struct iwl_priv *priv)
293{
294 unsigned long flags;
295 u16 radio_cfg;
296
297 spin_lock_irqsave(&priv->lock, flags);
298
299 radio_cfg = iwl_legacy_eeprom_query16(priv, EEPROM_RADIO_CONFIG);
300
301 /* write radio config values to register */
302 if (EEPROM_RF_CFG_TYPE_MSK(radio_cfg) == EEPROM_4965_RF_CFG_TYPE_MAX)
303 iwl_legacy_set_bit(priv, CSR_HW_IF_CONFIG_REG,
304 EEPROM_RF_CFG_TYPE_MSK(radio_cfg) |
305 EEPROM_RF_CFG_STEP_MSK(radio_cfg) |
306 EEPROM_RF_CFG_DASH_MSK(radio_cfg));
307
308 /* set CSR_HW_CONFIG_REG for uCode use */
309 iwl_legacy_set_bit(priv, CSR_HW_IF_CONFIG_REG,
310 CSR_HW_IF_CONFIG_REG_BIT_RADIO_SI |
311 CSR_HW_IF_CONFIG_REG_BIT_MAC_SI);
312
313 priv->calib_info = (struct iwl_eeprom_calib_info *)
314 iwl_legacy_eeprom_query_addr(priv,
315 EEPROM_4965_CALIB_TXPOWER_OFFSET);
316
317 spin_unlock_irqrestore(&priv->lock, flags);
318}
319
320/* Reset differential Rx gains in NIC to prepare for chain noise calibration.
321 * Called after every association, but this runs only once!
322 * ... once chain noise is calibrated the first time, it's good forever. */
323static void iwl4965_chain_noise_reset(struct iwl_priv *priv)
324{
325 struct iwl_chain_noise_data *data = &(priv->chain_noise_data);
326
327 if ((data->state == IWL_CHAIN_NOISE_ALIVE) &&
328 iwl_legacy_is_any_associated(priv)) {
329 struct iwl_calib_diff_gain_cmd cmd;
330
331 /* clear data for chain noise calibration algorithm */
332 data->chain_noise_a = 0;
333 data->chain_noise_b = 0;
334 data->chain_noise_c = 0;
335 data->chain_signal_a = 0;
336 data->chain_signal_b = 0;
337 data->chain_signal_c = 0;
338 data->beacon_count = 0;
339
340 memset(&cmd, 0, sizeof(cmd));
341 cmd.hdr.op_code = IWL_PHY_CALIBRATE_DIFF_GAIN_CMD;
342 cmd.diff_gain_a = 0;
343 cmd.diff_gain_b = 0;
344 cmd.diff_gain_c = 0;
345 if (iwl_legacy_send_cmd_pdu(priv, REPLY_PHY_CALIBRATION_CMD,
346 sizeof(cmd), &cmd))
347 IWL_ERR(priv,
348 "Could not send REPLY_PHY_CALIBRATION_CMD\n");
349 data->state = IWL_CHAIN_NOISE_ACCUMULATE;
350 IWL_DEBUG_CALIB(priv, "Run chain_noise_calibrate\n");
351 }
352}
353
354static struct iwl_sensitivity_ranges iwl4965_sensitivity = {
355 .min_nrg_cck = 97,
356 .max_nrg_cck = 0, /* not used, set to 0 */
357
358 .auto_corr_min_ofdm = 85,
359 .auto_corr_min_ofdm_mrc = 170,
360 .auto_corr_min_ofdm_x1 = 105,
361 .auto_corr_min_ofdm_mrc_x1 = 220,
362
363 .auto_corr_max_ofdm = 120,
364 .auto_corr_max_ofdm_mrc = 210,
365 .auto_corr_max_ofdm_x1 = 140,
366 .auto_corr_max_ofdm_mrc_x1 = 270,
367
368 .auto_corr_min_cck = 125,
369 .auto_corr_max_cck = 200,
370 .auto_corr_min_cck_mrc = 200,
371 .auto_corr_max_cck_mrc = 400,
372
373 .nrg_th_cck = 100,
374 .nrg_th_ofdm = 100,
375
376 .barker_corr_th_min = 190,
377 .barker_corr_th_min_mrc = 390,
378 .nrg_th_cca = 62,
379};
380
381static void iwl4965_set_ct_threshold(struct iwl_priv *priv)
382{
383 /* want Kelvin */
384 priv->hw_params.ct_kill_threshold =
385 CELSIUS_TO_KELVIN(CT_KILL_THRESHOLD_LEGACY);
386}
387
388/**
389 * iwl4965_hw_set_hw_params
390 *
391 * Called when initializing driver
392 */
393static int iwl4965_hw_set_hw_params(struct iwl_priv *priv)
394{
395 if (priv->cfg->mod_params->num_of_queues >= IWL_MIN_NUM_QUEUES &&
396 priv->cfg->mod_params->num_of_queues <= IWL49_NUM_QUEUES)
397 priv->cfg->base_params->num_of_queues =
398 priv->cfg->mod_params->num_of_queues;
399
400 priv->hw_params.max_txq_num = priv->cfg->base_params->num_of_queues;
401 priv->hw_params.dma_chnl_num = FH49_TCSR_CHNL_NUM;
402 priv->hw_params.scd_bc_tbls_size =
403 priv->cfg->base_params->num_of_queues *
404 sizeof(struct iwl4965_scd_bc_tbl);
405 priv->hw_params.tfd_size = sizeof(struct iwl_tfd);
406 priv->hw_params.max_stations = IWL4965_STATION_COUNT;
407 priv->contexts[IWL_RXON_CTX_BSS].bcast_sta_id = IWL4965_BROADCAST_ID;
408 priv->hw_params.max_data_size = IWL49_RTC_DATA_SIZE;
409 priv->hw_params.max_inst_size = IWL49_RTC_INST_SIZE;
410 priv->hw_params.max_bsm_size = BSM_SRAM_SIZE;
411 priv->hw_params.ht40_channel = BIT(IEEE80211_BAND_5GHZ);
412
413 priv->hw_params.rx_wrt_ptr_reg = FH_RSCSR_CHNL0_WPTR;
414
415 priv->hw_params.tx_chains_num = iwl4965_num_of_ant(priv->cfg->valid_tx_ant);
416 priv->hw_params.rx_chains_num = iwl4965_num_of_ant(priv->cfg->valid_rx_ant);
417 priv->hw_params.valid_tx_ant = priv->cfg->valid_tx_ant;
418 priv->hw_params.valid_rx_ant = priv->cfg->valid_rx_ant;
419
420 iwl4965_set_ct_threshold(priv);
421
422 priv->hw_params.sens = &iwl4965_sensitivity;
423 priv->hw_params.beacon_time_tsf_bits = IWL4965_EXT_BEACON_TIME_POS;
424
425 return 0;
426}
427
428static s32 iwl4965_math_div_round(s32 num, s32 denom, s32 *res)
429{
430 s32 sign = 1;
431
432 if (num < 0) {
433 sign = -sign;
434 num = -num;
435 }
436 if (denom < 0) {
437 sign = -sign;
438 denom = -denom;
439 }
440 *res = 1;
441 *res = ((num * 2 + denom) / (denom * 2)) * sign;
442
443 return 1;
444}
445
446/**
447 * iwl4965_get_voltage_compensation - Power supply voltage comp for txpower
448 *
449 * Determines power supply voltage compensation for txpower calculations.
450 * Returns number of 1/2-dB steps to subtract from gain table index,
451 * to compensate for difference between power supply voltage during
452 * factory measurements, vs. current power supply voltage.
453 *
454 * Voltage indication is higher for lower voltage.
455 * Lower voltage requires more gain (lower gain table index).
456 */
457static s32 iwl4965_get_voltage_compensation(s32 eeprom_voltage,
458 s32 current_voltage)
459{
460 s32 comp = 0;
461
462 if ((TX_POWER_IWL_ILLEGAL_VOLTAGE == eeprom_voltage) ||
463 (TX_POWER_IWL_ILLEGAL_VOLTAGE == current_voltage))
464 return 0;
465
466 iwl4965_math_div_round(current_voltage - eeprom_voltage,
467 TX_POWER_IWL_VOLTAGE_CODES_PER_03V, &comp);
468
469 if (current_voltage > eeprom_voltage)
470 comp *= 2;
471 if ((comp < -2) || (comp > 2))
472 comp = 0;
473
474 return comp;
475}
476
477static s32 iwl4965_get_tx_atten_grp(u16 channel)
478{
479 if (channel >= CALIB_IWL_TX_ATTEN_GR5_FCH &&
480 channel <= CALIB_IWL_TX_ATTEN_GR5_LCH)
481 return CALIB_CH_GROUP_5;
482
483 if (channel >= CALIB_IWL_TX_ATTEN_GR1_FCH &&
484 channel <= CALIB_IWL_TX_ATTEN_GR1_LCH)
485 return CALIB_CH_GROUP_1;
486
487 if (channel >= CALIB_IWL_TX_ATTEN_GR2_FCH &&
488 channel <= CALIB_IWL_TX_ATTEN_GR2_LCH)
489 return CALIB_CH_GROUP_2;
490
491 if (channel >= CALIB_IWL_TX_ATTEN_GR3_FCH &&
492 channel <= CALIB_IWL_TX_ATTEN_GR3_LCH)
493 return CALIB_CH_GROUP_3;
494
495 if (channel >= CALIB_IWL_TX_ATTEN_GR4_FCH &&
496 channel <= CALIB_IWL_TX_ATTEN_GR4_LCH)
497 return CALIB_CH_GROUP_4;
498
499 return -EINVAL;
500}
501
502static u32 iwl4965_get_sub_band(const struct iwl_priv *priv, u32 channel)
503{
504 s32 b = -1;
505
506 for (b = 0; b < EEPROM_TX_POWER_BANDS; b++) {
507 if (priv->calib_info->band_info[b].ch_from == 0)
508 continue;
509
510 if ((channel >= priv->calib_info->band_info[b].ch_from)
511 && (channel <= priv->calib_info->band_info[b].ch_to))
512 break;
513 }
514
515 return b;
516}
517
518static s32 iwl4965_interpolate_value(s32 x, s32 x1, s32 y1, s32 x2, s32 y2)
519{
520 s32 val;
521
522 if (x2 == x1)
523 return y1;
524 else {
525 iwl4965_math_div_round((x2 - x) * (y1 - y2), (x2 - x1), &val);
526 return val + y2;
527 }
528}
529
530/**
531 * iwl4965_interpolate_chan - Interpolate factory measurements for one channel
532 *
533 * Interpolates factory measurements from the two sample channels within a
534 * sub-band, to apply to channel of interest. Interpolation is proportional to
535 * differences in channel frequencies, which is proportional to differences
536 * in channel number.
537 */
538static int iwl4965_interpolate_chan(struct iwl_priv *priv, u32 channel,
539 struct iwl_eeprom_calib_ch_info *chan_info)
540{
541 s32 s = -1;
542 u32 c;
543 u32 m;
544 const struct iwl_eeprom_calib_measure *m1;
545 const struct iwl_eeprom_calib_measure *m2;
546 struct iwl_eeprom_calib_measure *omeas;
547 u32 ch_i1;
548 u32 ch_i2;
549
550 s = iwl4965_get_sub_band(priv, channel);
551 if (s >= EEPROM_TX_POWER_BANDS) {
552 IWL_ERR(priv, "Tx Power can not find channel %d\n", channel);
553 return -1;
554 }
555
556 ch_i1 = priv->calib_info->band_info[s].ch1.ch_num;
557 ch_i2 = priv->calib_info->band_info[s].ch2.ch_num;
558 chan_info->ch_num = (u8) channel;
559
560 IWL_DEBUG_TXPOWER(priv, "channel %d subband %d factory cal ch %d & %d\n",
561 channel, s, ch_i1, ch_i2);
562
563 for (c = 0; c < EEPROM_TX_POWER_TX_CHAINS; c++) {
564 for (m = 0; m < EEPROM_TX_POWER_MEASUREMENTS; m++) {
565 m1 = &(priv->calib_info->band_info[s].ch1.
566 measurements[c][m]);
567 m2 = &(priv->calib_info->band_info[s].ch2.
568 measurements[c][m]);
569 omeas = &(chan_info->measurements[c][m]);
570
571 omeas->actual_pow =
572 (u8) iwl4965_interpolate_value(channel, ch_i1,
573 m1->actual_pow,
574 ch_i2,
575 m2->actual_pow);
576 omeas->gain_idx =
577 (u8) iwl4965_interpolate_value(channel, ch_i1,
578 m1->gain_idx, ch_i2,
579 m2->gain_idx);
580 omeas->temperature =
581 (u8) iwl4965_interpolate_value(channel, ch_i1,
582 m1->temperature,
583 ch_i2,
584 m2->temperature);
585 omeas->pa_det =
586 (s8) iwl4965_interpolate_value(channel, ch_i1,
587 m1->pa_det, ch_i2,
588 m2->pa_det);
589
590 IWL_DEBUG_TXPOWER(priv,
591 "chain %d meas %d AP1=%d AP2=%d AP=%d\n", c, m,
592 m1->actual_pow, m2->actual_pow, omeas->actual_pow);
593 IWL_DEBUG_TXPOWER(priv,
594 "chain %d meas %d NI1=%d NI2=%d NI=%d\n", c, m,
595 m1->gain_idx, m2->gain_idx, omeas->gain_idx);
596 IWL_DEBUG_TXPOWER(priv,
597 "chain %d meas %d PA1=%d PA2=%d PA=%d\n", c, m,
598 m1->pa_det, m2->pa_det, omeas->pa_det);
599 IWL_DEBUG_TXPOWER(priv,
600 "chain %d meas %d T1=%d T2=%d T=%d\n", c, m,
601 m1->temperature, m2->temperature,
602 omeas->temperature);
603 }
604 }
605
606 return 0;
607}
608
609/* bit-rate-dependent table to prevent Tx distortion, in half-dB units,
610 * for OFDM 6, 12, 18, 24, 36, 48, 54, 60 MBit, and CCK all rates. */
611static s32 back_off_table[] = {
612 10, 10, 10, 10, 10, 15, 17, 20, /* OFDM SISO 20 MHz */
613 10, 10, 10, 10, 10, 15, 17, 20, /* OFDM MIMO 20 MHz */
614 10, 10, 10, 10, 10, 15, 17, 20, /* OFDM SISO 40 MHz */
615 10, 10, 10, 10, 10, 15, 17, 20, /* OFDM MIMO 40 MHz */
616 10 /* CCK */
617};
618
619/* Thermal compensation values for txpower for various frequency ranges ...
620 * ratios from 3:1 to 4.5:1 of degrees (Celsius) per half-dB gain adjust */
621static struct iwl4965_txpower_comp_entry {
622 s32 degrees_per_05db_a;
623 s32 degrees_per_05db_a_denom;
624} tx_power_cmp_tble[CALIB_CH_GROUP_MAX] = {
625 {9, 2}, /* group 0 5.2, ch 34-43 */
626 {4, 1}, /* group 1 5.2, ch 44-70 */
627 {4, 1}, /* group 2 5.2, ch 71-124 */
628 {4, 1}, /* group 3 5.2, ch 125-200 */
629 {3, 1} /* group 4 2.4, ch all */
630};
631
632static s32 get_min_power_index(s32 rate_power_index, u32 band)
633{
634 if (!band) {
635 if ((rate_power_index & 7) <= 4)
636 return MIN_TX_GAIN_INDEX_52GHZ_EXT;
637 }
638 return MIN_TX_GAIN_INDEX;
639}
640
641struct gain_entry {
642 u8 dsp;
643 u8 radio;
644};
645
646static const struct gain_entry gain_table[2][108] = {
647 /* 5.2GHz power gain index table */
648 {
649 {123, 0x3F}, /* highest txpower */
650 {117, 0x3F},
651 {110, 0x3F},
652 {104, 0x3F},
653 {98, 0x3F},
654 {110, 0x3E},
655 {104, 0x3E},
656 {98, 0x3E},
657 {110, 0x3D},
658 {104, 0x3D},
659 {98, 0x3D},
660 {110, 0x3C},
661 {104, 0x3C},
662 {98, 0x3C},
663 {110, 0x3B},
664 {104, 0x3B},
665 {98, 0x3B},
666 {110, 0x3A},
667 {104, 0x3A},
668 {98, 0x3A},
669 {110, 0x39},
670 {104, 0x39},
671 {98, 0x39},
672 {110, 0x38},
673 {104, 0x38},
674 {98, 0x38},
675 {110, 0x37},
676 {104, 0x37},
677 {98, 0x37},
678 {110, 0x36},
679 {104, 0x36},
680 {98, 0x36},
681 {110, 0x35},
682 {104, 0x35},
683 {98, 0x35},
684 {110, 0x34},
685 {104, 0x34},
686 {98, 0x34},
687 {110, 0x33},
688 {104, 0x33},
689 {98, 0x33},
690 {110, 0x32},
691 {104, 0x32},
692 {98, 0x32},
693 {110, 0x31},
694 {104, 0x31},
695 {98, 0x31},
696 {110, 0x30},
697 {104, 0x30},
698 {98, 0x30},
699 {110, 0x25},
700 {104, 0x25},
701 {98, 0x25},
702 {110, 0x24},
703 {104, 0x24},
704 {98, 0x24},
705 {110, 0x23},
706 {104, 0x23},
707 {98, 0x23},
708 {110, 0x22},
709 {104, 0x18},
710 {98, 0x18},
711 {110, 0x17},
712 {104, 0x17},
713 {98, 0x17},
714 {110, 0x16},
715 {104, 0x16},
716 {98, 0x16},
717 {110, 0x15},
718 {104, 0x15},
719 {98, 0x15},
720 {110, 0x14},
721 {104, 0x14},
722 {98, 0x14},
723 {110, 0x13},
724 {104, 0x13},
725 {98, 0x13},
726 {110, 0x12},
727 {104, 0x08},
728 {98, 0x08},
729 {110, 0x07},
730 {104, 0x07},
731 {98, 0x07},
732 {110, 0x06},
733 {104, 0x06},
734 {98, 0x06},
735 {110, 0x05},
736 {104, 0x05},
737 {98, 0x05},
738 {110, 0x04},
739 {104, 0x04},
740 {98, 0x04},
741 {110, 0x03},
742 {104, 0x03},
743 {98, 0x03},
744 {110, 0x02},
745 {104, 0x02},
746 {98, 0x02},
747 {110, 0x01},
748 {104, 0x01},
749 {98, 0x01},
750 {110, 0x00},
751 {104, 0x00},
752 {98, 0x00},
753 {93, 0x00},
754 {88, 0x00},
755 {83, 0x00},
756 {78, 0x00},
757 },
758 /* 2.4GHz power gain index table */
759 {
760 {110, 0x3f}, /* highest txpower */
761 {104, 0x3f},
762 {98, 0x3f},
763 {110, 0x3e},
764 {104, 0x3e},
765 {98, 0x3e},
766 {110, 0x3d},
767 {104, 0x3d},
768 {98, 0x3d},
769 {110, 0x3c},
770 {104, 0x3c},
771 {98, 0x3c},
772 {110, 0x3b},
773 {104, 0x3b},
774 {98, 0x3b},
775 {110, 0x3a},
776 {104, 0x3a},
777 {98, 0x3a},
778 {110, 0x39},
779 {104, 0x39},
780 {98, 0x39},
781 {110, 0x38},
782 {104, 0x38},
783 {98, 0x38},
784 {110, 0x37},
785 {104, 0x37},
786 {98, 0x37},
787 {110, 0x36},
788 {104, 0x36},
789 {98, 0x36},
790 {110, 0x35},
791 {104, 0x35},
792 {98, 0x35},
793 {110, 0x34},
794 {104, 0x34},
795 {98, 0x34},
796 {110, 0x33},
797 {104, 0x33},
798 {98, 0x33},
799 {110, 0x32},
800 {104, 0x32},
801 {98, 0x32},
802 {110, 0x31},
803 {104, 0x31},
804 {98, 0x31},
805 {110, 0x30},
806 {104, 0x30},
807 {98, 0x30},
808 {110, 0x6},
809 {104, 0x6},
810 {98, 0x6},
811 {110, 0x5},
812 {104, 0x5},
813 {98, 0x5},
814 {110, 0x4},
815 {104, 0x4},
816 {98, 0x4},
817 {110, 0x3},
818 {104, 0x3},
819 {98, 0x3},
820 {110, 0x2},
821 {104, 0x2},
822 {98, 0x2},
823 {110, 0x1},
824 {104, 0x1},
825 {98, 0x1},
826 {110, 0x0},
827 {104, 0x0},
828 {98, 0x0},
829 {97, 0},
830 {96, 0},
831 {95, 0},
832 {94, 0},
833 {93, 0},
834 {92, 0},
835 {91, 0},
836 {90, 0},
837 {89, 0},
838 {88, 0},
839 {87, 0},
840 {86, 0},
841 {85, 0},
842 {84, 0},
843 {83, 0},
844 {82, 0},
845 {81, 0},
846 {80, 0},
847 {79, 0},
848 {78, 0},
849 {77, 0},
850 {76, 0},
851 {75, 0},
852 {74, 0},
853 {73, 0},
854 {72, 0},
855 {71, 0},
856 {70, 0},
857 {69, 0},
858 {68, 0},
859 {67, 0},
860 {66, 0},
861 {65, 0},
862 {64, 0},
863 {63, 0},
864 {62, 0},
865 {61, 0},
866 {60, 0},
867 {59, 0},
868 }
869};
870
871static int iwl4965_fill_txpower_tbl(struct iwl_priv *priv, u8 band, u16 channel,
872 u8 is_ht40, u8 ctrl_chan_high,
873 struct iwl4965_tx_power_db *tx_power_tbl)
874{
875 u8 saturation_power;
876 s32 target_power;
877 s32 user_target_power;
878 s32 power_limit;
879 s32 current_temp;
880 s32 reg_limit;
881 s32 current_regulatory;
882 s32 txatten_grp = CALIB_CH_GROUP_MAX;
883 int i;
884 int c;
885 const struct iwl_channel_info *ch_info = NULL;
886 struct iwl_eeprom_calib_ch_info ch_eeprom_info;
887 const struct iwl_eeprom_calib_measure *measurement;
888 s16 voltage;
889 s32 init_voltage;
890 s32 voltage_compensation;
891 s32 degrees_per_05db_num;
892 s32 degrees_per_05db_denom;
893 s32 factory_temp;
894 s32 temperature_comp[2];
895 s32 factory_gain_index[2];
896 s32 factory_actual_pwr[2];
897 s32 power_index;
898
899 /* tx_power_user_lmt is in dBm, convert to half-dBm (half-dB units
900 * are used for indexing into txpower table) */
901 user_target_power = 2 * priv->tx_power_user_lmt;
902
903 /* Get current (RXON) channel, band, width */
904 IWL_DEBUG_TXPOWER(priv, "chan %d band %d is_ht40 %d\n", channel, band,
905 is_ht40);
906
907 ch_info = iwl_legacy_get_channel_info(priv, priv->band, channel);
908
909 if (!iwl_legacy_is_channel_valid(ch_info))
910 return -EINVAL;
911
912 /* get txatten group, used to select 1) thermal txpower adjustment
913 * and 2) mimo txpower balance between Tx chains. */
914 txatten_grp = iwl4965_get_tx_atten_grp(channel);
915 if (txatten_grp < 0) {
916 IWL_ERR(priv, "Can't find txatten group for channel %d.\n",
917 channel);
918 return txatten_grp;
919 }
920
921 IWL_DEBUG_TXPOWER(priv, "channel %d belongs to txatten group %d\n",
922 channel, txatten_grp);
923
924 if (is_ht40) {
925 if (ctrl_chan_high)
926 channel -= 2;
927 else
928 channel += 2;
929 }
930
931 /* hardware txpower limits ...
932 * saturation (clipping distortion) txpowers are in half-dBm */
933 if (band)
934 saturation_power = priv->calib_info->saturation_power24;
935 else
936 saturation_power = priv->calib_info->saturation_power52;
937
938 if (saturation_power < IWL_TX_POWER_SATURATION_MIN ||
939 saturation_power > IWL_TX_POWER_SATURATION_MAX) {
940 if (band)
941 saturation_power = IWL_TX_POWER_DEFAULT_SATURATION_24;
942 else
943 saturation_power = IWL_TX_POWER_DEFAULT_SATURATION_52;
944 }
945
946 /* regulatory txpower limits ... reg_limit values are in half-dBm,
947 * max_power_avg values are in dBm, convert * 2 */
948 if (is_ht40)
949 reg_limit = ch_info->ht40_max_power_avg * 2;
950 else
951 reg_limit = ch_info->max_power_avg * 2;
952
953 if ((reg_limit < IWL_TX_POWER_REGULATORY_MIN) ||
954 (reg_limit > IWL_TX_POWER_REGULATORY_MAX)) {
955 if (band)
956 reg_limit = IWL_TX_POWER_DEFAULT_REGULATORY_24;
957 else
958 reg_limit = IWL_TX_POWER_DEFAULT_REGULATORY_52;
959 }
960
961 /* Interpolate txpower calibration values for this channel,
962 * based on factory calibration tests on spaced channels. */
963 iwl4965_interpolate_chan(priv, channel, &ch_eeprom_info);
964
965 /* calculate tx gain adjustment based on power supply voltage */
966 voltage = le16_to_cpu(priv->calib_info->voltage);
967 init_voltage = (s32)le32_to_cpu(priv->card_alive_init.voltage);
968 voltage_compensation =
969 iwl4965_get_voltage_compensation(voltage, init_voltage);
970
971 IWL_DEBUG_TXPOWER(priv, "curr volt %d eeprom volt %d volt comp %d\n",
972 init_voltage,
973 voltage, voltage_compensation);
974
975 /* get current temperature (Celsius) */
976 current_temp = max(priv->temperature, IWL_TX_POWER_TEMPERATURE_MIN);
977 current_temp = min(priv->temperature, IWL_TX_POWER_TEMPERATURE_MAX);
978 current_temp = KELVIN_TO_CELSIUS(current_temp);
979
980 /* select thermal txpower adjustment params, based on channel group
981 * (same frequency group used for mimo txatten adjustment) */
982 degrees_per_05db_num =
983 tx_power_cmp_tble[txatten_grp].degrees_per_05db_a;
984 degrees_per_05db_denom =
985 tx_power_cmp_tble[txatten_grp].degrees_per_05db_a_denom;
986
987 /* get per-chain txpower values from factory measurements */
988 for (c = 0; c < 2; c++) {
989 measurement = &ch_eeprom_info.measurements[c][1];
990
991 /* txgain adjustment (in half-dB steps) based on difference
992 * between factory and current temperature */
993 factory_temp = measurement->temperature;
994 iwl4965_math_div_round((current_temp - factory_temp) *
995 degrees_per_05db_denom,
996 degrees_per_05db_num,
997 &temperature_comp[c]);
998
999 factory_gain_index[c] = measurement->gain_idx;
1000 factory_actual_pwr[c] = measurement->actual_pow;
1001
1002 IWL_DEBUG_TXPOWER(priv, "chain = %d\n", c);
1003 IWL_DEBUG_TXPOWER(priv, "fctry tmp %d, "
1004 "curr tmp %d, comp %d steps\n",
1005 factory_temp, current_temp,
1006 temperature_comp[c]);
1007
1008 IWL_DEBUG_TXPOWER(priv, "fctry idx %d, fctry pwr %d\n",
1009 factory_gain_index[c],
1010 factory_actual_pwr[c]);
1011 }
1012
1013 /* for each of 33 bit-rates (including 1 for CCK) */
1014 for (i = 0; i < POWER_TABLE_NUM_ENTRIES; i++) {
1015 u8 is_mimo_rate;
1016 union iwl4965_tx_power_dual_stream tx_power;
1017
1018 /* for mimo, reduce each chain's txpower by half
1019 * (3dB, 6 steps), so total output power is regulatory
1020 * compliant. */
1021 if (i & 0x8) {
1022 current_regulatory = reg_limit -
1023 IWL_TX_POWER_MIMO_REGULATORY_COMPENSATION;
1024 is_mimo_rate = 1;
1025 } else {
1026 current_regulatory = reg_limit;
1027 is_mimo_rate = 0;
1028 }
1029
1030 /* find txpower limit, either hardware or regulatory */
1031 power_limit = saturation_power - back_off_table[i];
1032 if (power_limit > current_regulatory)
1033 power_limit = current_regulatory;
1034
1035 /* reduce user's txpower request if necessary
1036 * for this rate on this channel */
1037 target_power = user_target_power;
1038 if (target_power > power_limit)
1039 target_power = power_limit;
1040
1041 IWL_DEBUG_TXPOWER(priv, "rate %d sat %d reg %d usr %d tgt %d\n",
1042 i, saturation_power - back_off_table[i],
1043 current_regulatory, user_target_power,
1044 target_power);
1045
1046 /* for each of 2 Tx chains (radio transmitters) */
1047 for (c = 0; c < 2; c++) {
1048 s32 atten_value;
1049
1050 if (is_mimo_rate)
1051 atten_value =
1052 (s32)le32_to_cpu(priv->card_alive_init.
1053 tx_atten[txatten_grp][c]);
1054 else
1055 atten_value = 0;
1056
1057 /* calculate index; higher index means lower txpower */
1058 power_index = (u8) (factory_gain_index[c] -
1059 (target_power -
1060 factory_actual_pwr[c]) -
1061 temperature_comp[c] -
1062 voltage_compensation +
1063 atten_value);
1064
1065/* IWL_DEBUG_TXPOWER(priv, "calculated txpower index %d\n",
1066 power_index); */
1067
1068 if (power_index < get_min_power_index(i, band))
1069 power_index = get_min_power_index(i, band);
1070
1071 /* adjust 5 GHz index to support negative indexes */
1072 if (!band)
1073 power_index += 9;
1074
1075 /* CCK, rate 32, reduce txpower for CCK */
1076 if (i == POWER_TABLE_CCK_ENTRY)
1077 power_index +=
1078 IWL_TX_POWER_CCK_COMPENSATION_C_STEP;
1079
1080 /* stay within the table! */
1081 if (power_index > 107) {
1082 IWL_WARN(priv, "txpower index %d > 107\n",
1083 power_index);
1084 power_index = 107;
1085 }
1086 if (power_index < 0) {
1087 IWL_WARN(priv, "txpower index %d < 0\n",
1088 power_index);
1089 power_index = 0;
1090 }
1091
1092 /* fill txpower command for this rate/chain */
1093 tx_power.s.radio_tx_gain[c] =
1094 gain_table[band][power_index].radio;
1095 tx_power.s.dsp_predis_atten[c] =
1096 gain_table[band][power_index].dsp;
1097
1098 IWL_DEBUG_TXPOWER(priv, "chain %d mimo %d index %d "
1099 "gain 0x%02x dsp %d\n",
1100 c, atten_value, power_index,
1101 tx_power.s.radio_tx_gain[c],
1102 tx_power.s.dsp_predis_atten[c]);
1103 } /* for each chain */
1104
1105 tx_power_tbl->power_tbl[i].dw = cpu_to_le32(tx_power.dw);
1106
1107 } /* for each rate */
1108
1109 return 0;
1110}
1111
1112/**
1113 * iwl4965_send_tx_power - Configure the TXPOWER level user limit
1114 *
1115 * Uses the active RXON for channel, band, and characteristics (ht40, high)
1116 * The power limit is taken from priv->tx_power_user_lmt.
1117 */
1118static int iwl4965_send_tx_power(struct iwl_priv *priv)
1119{
1120 struct iwl4965_txpowertable_cmd cmd = { 0 };
1121 int ret;
1122 u8 band = 0;
1123 bool is_ht40 = false;
1124 u8 ctrl_chan_high = 0;
1125 struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
1126
1127 if (WARN_ONCE(test_bit(STATUS_SCAN_HW, &priv->status),
1128 "TX Power requested while scanning!\n"))
1129 return -EAGAIN;
1130
1131 band = priv->band == IEEE80211_BAND_2GHZ;
1132
1133 is_ht40 = iw4965_is_ht40_channel(ctx->active.flags);
1134
1135 if (is_ht40 && (ctx->active.flags & RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK))
1136 ctrl_chan_high = 1;
1137
1138 cmd.band = band;
1139 cmd.channel = ctx->active.channel;
1140
1141 ret = iwl4965_fill_txpower_tbl(priv, band,
1142 le16_to_cpu(ctx->active.channel),
1143 is_ht40, ctrl_chan_high, &cmd.tx_power);
1144 if (ret)
1145 goto out;
1146
1147 ret = iwl_legacy_send_cmd_pdu(priv,
1148 REPLY_TX_PWR_TABLE_CMD, sizeof(cmd), &cmd);
1149
1150out:
1151 return ret;
1152}
1153
1154static int iwl4965_send_rxon_assoc(struct iwl_priv *priv,
1155 struct iwl_rxon_context *ctx)
1156{
1157 int ret = 0;
1158 struct iwl4965_rxon_assoc_cmd rxon_assoc;
1159 const struct iwl_legacy_rxon_cmd *rxon1 = &ctx->staging;
1160 const struct iwl_legacy_rxon_cmd *rxon2 = &ctx->active;
1161
1162 if ((rxon1->flags == rxon2->flags) &&
1163 (rxon1->filter_flags == rxon2->filter_flags) &&
1164 (rxon1->cck_basic_rates == rxon2->cck_basic_rates) &&
1165 (rxon1->ofdm_ht_single_stream_basic_rates ==
1166 rxon2->ofdm_ht_single_stream_basic_rates) &&
1167 (rxon1->ofdm_ht_dual_stream_basic_rates ==
1168 rxon2->ofdm_ht_dual_stream_basic_rates) &&
1169 (rxon1->rx_chain == rxon2->rx_chain) &&
1170 (rxon1->ofdm_basic_rates == rxon2->ofdm_basic_rates)) {
1171 IWL_DEBUG_INFO(priv, "Using current RXON_ASSOC. Not resending.\n");
1172 return 0;
1173 }
1174
1175 rxon_assoc.flags = ctx->staging.flags;
1176 rxon_assoc.filter_flags = ctx->staging.filter_flags;
1177 rxon_assoc.ofdm_basic_rates = ctx->staging.ofdm_basic_rates;
1178 rxon_assoc.cck_basic_rates = ctx->staging.cck_basic_rates;
1179 rxon_assoc.reserved = 0;
1180 rxon_assoc.ofdm_ht_single_stream_basic_rates =
1181 ctx->staging.ofdm_ht_single_stream_basic_rates;
1182 rxon_assoc.ofdm_ht_dual_stream_basic_rates =
1183 ctx->staging.ofdm_ht_dual_stream_basic_rates;
1184 rxon_assoc.rx_chain_select_flags = ctx->staging.rx_chain;
1185
1186 ret = iwl_legacy_send_cmd_pdu_async(priv, REPLY_RXON_ASSOC,
1187 sizeof(rxon_assoc), &rxon_assoc, NULL);
1188
1189 return ret;
1190}
1191
1192static int iwl4965_commit_rxon(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
1193{
1194 /* cast away the const for active_rxon in this function */
1195 struct iwl_legacy_rxon_cmd *active_rxon = (void *)&ctx->active;
1196 int ret;
1197 bool new_assoc =
1198 !!(ctx->staging.filter_flags & RXON_FILTER_ASSOC_MSK);
1199
1200 if (!iwl_legacy_is_alive(priv))
1201 return -EBUSY;
1202
1203 if (!ctx->is_active)
1204 return 0;
1205
1206 /* always get timestamp with Rx frame */
1207 ctx->staging.flags |= RXON_FLG_TSF2HOST_MSK;
1208
1209 ret = iwl_legacy_check_rxon_cmd(priv, ctx);
1210 if (ret) {
1211 IWL_ERR(priv, "Invalid RXON configuration. Not committing.\n");
1212 return -EINVAL;
1213 }
1214
1215 /*
1216 * receive commit_rxon request
1217 * abort any previous channel switch if still in process
1218 */
1219 if (test_bit(STATUS_CHANNEL_SWITCH_PENDING, &priv->status) &&
1220 (priv->switch_channel != ctx->staging.channel)) {
1221 IWL_DEBUG_11H(priv, "abort channel switch on %d\n",
1222 le16_to_cpu(priv->switch_channel));
1223 iwl_legacy_chswitch_done(priv, false);
1224 }
1225
1226 /* If we don't need to send a full RXON, we can use
1227 * iwl_rxon_assoc_cmd which is used to reconfigure filter
1228 * and other flags for the current radio configuration. */
1229 if (!iwl_legacy_full_rxon_required(priv, ctx)) {
1230 ret = iwl_legacy_send_rxon_assoc(priv, ctx);
1231 if (ret) {
1232 IWL_ERR(priv, "Error setting RXON_ASSOC (%d)\n", ret);
1233 return ret;
1234 }
1235
1236 memcpy(active_rxon, &ctx->staging, sizeof(*active_rxon));
1237 iwl_legacy_print_rx_config_cmd(priv, ctx);
1238 /*
1239 * We do not commit tx power settings while channel changing,
1240 * do it now if tx power changed.
1241 */
1242 iwl_legacy_set_tx_power(priv, priv->tx_power_next, false);
1243 return 0;
1244 }
1245
1246 /* If we are currently associated and the new config requires
1247 * an RXON_ASSOC and the new config wants the associated mask enabled,
1248 * we must clear the associated from the active configuration
1249 * before we apply the new config */
1250 if (iwl_legacy_is_associated_ctx(ctx) && new_assoc) {
1251 IWL_DEBUG_INFO(priv, "Toggling associated bit on current RXON\n");
1252 active_rxon->filter_flags &= ~RXON_FILTER_ASSOC_MSK;
1253
1254 ret = iwl_legacy_send_cmd_pdu(priv, ctx->rxon_cmd,
1255 sizeof(struct iwl_legacy_rxon_cmd),
1256 active_rxon);
1257
1258 /* If the mask clearing failed then we set
1259 * active_rxon back to what it was previously */
1260 if (ret) {
1261 active_rxon->filter_flags |= RXON_FILTER_ASSOC_MSK;
1262 IWL_ERR(priv, "Error clearing ASSOC_MSK (%d)\n", ret);
1263 return ret;
1264 }
1265 iwl_legacy_clear_ucode_stations(priv, ctx);
1266 iwl_legacy_restore_stations(priv, ctx);
1267 ret = iwl4965_restore_default_wep_keys(priv, ctx);
1268 if (ret) {
1269 IWL_ERR(priv, "Failed to restore WEP keys (%d)\n", ret);
1270 return ret;
1271 }
1272 }
1273
1274 IWL_DEBUG_INFO(priv, "Sending RXON\n"
1275 "* with%s RXON_FILTER_ASSOC_MSK\n"
1276 "* channel = %d\n"
1277 "* bssid = %pM\n",
1278 (new_assoc ? "" : "out"),
1279 le16_to_cpu(ctx->staging.channel),
1280 ctx->staging.bssid_addr);
1281
1282 iwl_legacy_set_rxon_hwcrypto(priv, ctx,
1283 !priv->cfg->mod_params->sw_crypto);
1284
1285 /* Apply the new configuration
1286 * RXON unassoc clears the station table in uCode so restoration of
1287 * stations is needed after it (the RXON command) completes
1288 */
1289 if (!new_assoc) {
1290 ret = iwl_legacy_send_cmd_pdu(priv, ctx->rxon_cmd,
1291 sizeof(struct iwl_legacy_rxon_cmd), &ctx->staging);
1292 if (ret) {
1293 IWL_ERR(priv, "Error setting new RXON (%d)\n", ret);
1294 return ret;
1295 }
1296 IWL_DEBUG_INFO(priv, "Return from !new_assoc RXON.\n");
1297 memcpy(active_rxon, &ctx->staging, sizeof(*active_rxon));
1298 iwl_legacy_clear_ucode_stations(priv, ctx);
1299 iwl_legacy_restore_stations(priv, ctx);
1300 ret = iwl4965_restore_default_wep_keys(priv, ctx);
1301 if (ret) {
1302 IWL_ERR(priv, "Failed to restore WEP keys (%d)\n", ret);
1303 return ret;
1304 }
1305 }
1306 if (new_assoc) {
1307 priv->start_calib = 0;
1308 /* Apply the new configuration
1309 * RXON assoc doesn't clear the station table in uCode,
1310 */
1311 ret = iwl_legacy_send_cmd_pdu(priv, ctx->rxon_cmd,
1312 sizeof(struct iwl_legacy_rxon_cmd), &ctx->staging);
1313 if (ret) {
1314 IWL_ERR(priv, "Error setting new RXON (%d)\n", ret);
1315 return ret;
1316 }
1317 memcpy(active_rxon, &ctx->staging, sizeof(*active_rxon));
1318 }
1319 iwl_legacy_print_rx_config_cmd(priv, ctx);
1320
1321 iwl4965_init_sensitivity(priv);
1322
1323 /* If we issue a new RXON command which required a tune then we must
1324 * send a new TXPOWER command or we won't be able to Tx any frames */
1325 ret = iwl_legacy_set_tx_power(priv, priv->tx_power_next, true);
1326 if (ret) {
1327 IWL_ERR(priv, "Error sending TX power (%d)\n", ret);
1328 return ret;
1329 }
1330
1331 return 0;
1332}
1333
1334static int iwl4965_hw_channel_switch(struct iwl_priv *priv,
1335 struct ieee80211_channel_switch *ch_switch)
1336{
1337 struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
1338 int rc;
1339 u8 band = 0;
1340 bool is_ht40 = false;
1341 u8 ctrl_chan_high = 0;
1342 struct iwl4965_channel_switch_cmd cmd;
1343 const struct iwl_channel_info *ch_info;
1344 u32 switch_time_in_usec, ucode_switch_time;
1345 u16 ch;
1346 u32 tsf_low;
1347 u8 switch_count;
1348 u16 beacon_interval = le16_to_cpu(ctx->timing.beacon_interval);
1349 struct ieee80211_vif *vif = ctx->vif;
1350 band = priv->band == IEEE80211_BAND_2GHZ;
1351
1352 is_ht40 = iw4965_is_ht40_channel(ctx->staging.flags);
1353
1354 if (is_ht40 &&
1355 (ctx->staging.flags & RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK))
1356 ctrl_chan_high = 1;
1357
1358 cmd.band = band;
1359 cmd.expect_beacon = 0;
1360 ch = ch_switch->channel->hw_value;
1361 cmd.channel = cpu_to_le16(ch);
1362 cmd.rxon_flags = ctx->staging.flags;
1363 cmd.rxon_filter_flags = ctx->staging.filter_flags;
1364 switch_count = ch_switch->count;
1365 tsf_low = ch_switch->timestamp & 0x0ffffffff;
1366 /*
1367 * calculate the ucode channel switch time
1368 * adding TSF as one of the factor for when to switch
1369 */
1370 if ((priv->ucode_beacon_time > tsf_low) && beacon_interval) {
1371 if (switch_count > ((priv->ucode_beacon_time - tsf_low) /
1372 beacon_interval)) {
1373 switch_count -= (priv->ucode_beacon_time -
1374 tsf_low) / beacon_interval;
1375 } else
1376 switch_count = 0;
1377 }
1378 if (switch_count <= 1)
1379 cmd.switch_time = cpu_to_le32(priv->ucode_beacon_time);
1380 else {
1381 switch_time_in_usec =
1382 vif->bss_conf.beacon_int * switch_count * TIME_UNIT;
1383 ucode_switch_time = iwl_legacy_usecs_to_beacons(priv,
1384 switch_time_in_usec,
1385 beacon_interval);
1386 cmd.switch_time = iwl_legacy_add_beacon_time(priv,
1387 priv->ucode_beacon_time,
1388 ucode_switch_time,
1389 beacon_interval);
1390 }
1391 IWL_DEBUG_11H(priv, "uCode time for the switch is 0x%x\n",
1392 cmd.switch_time);
1393 ch_info = iwl_legacy_get_channel_info(priv, priv->band, ch);
1394 if (ch_info)
1395 cmd.expect_beacon = iwl_legacy_is_channel_radar(ch_info);
1396 else {
1397 IWL_ERR(priv, "invalid channel switch from %u to %u\n",
1398 ctx->active.channel, ch);
1399 return -EFAULT;
1400 }
1401
1402 rc = iwl4965_fill_txpower_tbl(priv, band, ch, is_ht40,
1403 ctrl_chan_high, &cmd.tx_power);
1404 if (rc) {
1405 IWL_DEBUG_11H(priv, "error:%d fill txpower_tbl\n", rc);
1406 return rc;
1407 }
1408
1409 return iwl_legacy_send_cmd_pdu(priv,
1410 REPLY_CHANNEL_SWITCH, sizeof(cmd), &cmd);
1411}
1412
1413/**
1414 * iwl4965_txq_update_byte_cnt_tbl - Set up entry in Tx byte-count array
1415 */
1416static void iwl4965_txq_update_byte_cnt_tbl(struct iwl_priv *priv,
1417 struct iwl_tx_queue *txq,
1418 u16 byte_cnt)
1419{
1420 struct iwl4965_scd_bc_tbl *scd_bc_tbl = priv->scd_bc_tbls.addr;
1421 int txq_id = txq->q.id;
1422 int write_ptr = txq->q.write_ptr;
1423 int len = byte_cnt + IWL_TX_CRC_SIZE + IWL_TX_DELIMITER_SIZE;
1424 __le16 bc_ent;
1425
1426 WARN_ON(len > 0xFFF || write_ptr >= TFD_QUEUE_SIZE_MAX);
1427
1428 bc_ent = cpu_to_le16(len & 0xFFF);
1429 /* Set up byte count within first 256 entries */
1430 scd_bc_tbl[txq_id].tfd_offset[write_ptr] = bc_ent;
1431
1432 /* If within first 64 entries, duplicate at end */
1433 if (write_ptr < TFD_QUEUE_SIZE_BC_DUP)
1434 scd_bc_tbl[txq_id].
1435 tfd_offset[TFD_QUEUE_SIZE_MAX + write_ptr] = bc_ent;
1436}
1437
1438/**
1439 * iwl4965_hw_get_temperature - return the calibrated temperature (in Kelvin)
1440 * @statistics: Provides the temperature reading from the uCode
1441 *
1442 * A return of <0 indicates bogus data in the statistics
1443 */
1444static int iwl4965_hw_get_temperature(struct iwl_priv *priv)
1445{
1446 s32 temperature;
1447 s32 vt;
1448 s32 R1, R2, R3;
1449 u32 R4;
1450
1451 if (test_bit(STATUS_TEMPERATURE, &priv->status) &&
1452 (priv->_4965.statistics.flag &
1453 STATISTICS_REPLY_FLG_HT40_MODE_MSK)) {
1454 IWL_DEBUG_TEMP(priv, "Running HT40 temperature calibration\n");
1455 R1 = (s32)le32_to_cpu(priv->card_alive_init.therm_r1[1]);
1456 R2 = (s32)le32_to_cpu(priv->card_alive_init.therm_r2[1]);
1457 R3 = (s32)le32_to_cpu(priv->card_alive_init.therm_r3[1]);
1458 R4 = le32_to_cpu(priv->card_alive_init.therm_r4[1]);
1459 } else {
1460 IWL_DEBUG_TEMP(priv, "Running temperature calibration\n");
1461 R1 = (s32)le32_to_cpu(priv->card_alive_init.therm_r1[0]);
1462 R2 = (s32)le32_to_cpu(priv->card_alive_init.therm_r2[0]);
1463 R3 = (s32)le32_to_cpu(priv->card_alive_init.therm_r3[0]);
1464 R4 = le32_to_cpu(priv->card_alive_init.therm_r4[0]);
1465 }
1466
1467 /*
1468 * Temperature is only 23 bits, so sign extend out to 32.
1469 *
1470 * NOTE If we haven't received a statistics notification yet
1471 * with an updated temperature, use R4 provided to us in the
1472 * "initialize" ALIVE response.
1473 */
1474 if (!test_bit(STATUS_TEMPERATURE, &priv->status))
1475 vt = sign_extend32(R4, 23);
1476 else
1477 vt = sign_extend32(le32_to_cpu(priv->_4965.statistics.
1478 general.common.temperature), 23);
1479
1480 IWL_DEBUG_TEMP(priv, "Calib values R[1-3]: %d %d %d R4: %d\n", R1, R2, R3, vt);
1481
1482 if (R3 == R1) {
1483 IWL_ERR(priv, "Calibration conflict R1 == R3\n");
1484 return -1;
1485 }
1486
1487 /* Calculate temperature in degrees Kelvin, adjust by 97%.
1488 * Add offset to center the adjustment around 0 degrees Centigrade. */
1489 temperature = TEMPERATURE_CALIB_A_VAL * (vt - R2);
1490 temperature /= (R3 - R1);
1491 temperature = (temperature * 97) / 100 + TEMPERATURE_CALIB_KELVIN_OFFSET;
1492
1493 IWL_DEBUG_TEMP(priv, "Calibrated temperature: %dK, %dC\n",
1494 temperature, KELVIN_TO_CELSIUS(temperature));
1495
1496 return temperature;
1497}
1498
1499/* Adjust Txpower only if temperature variance is greater than threshold. */
1500#define IWL_TEMPERATURE_THRESHOLD 3
1501
1502/**
1503 * iwl4965_is_temp_calib_needed - determines if new calibration is needed
1504 *
1505 * If the temperature changed has changed sufficiently, then a recalibration
1506 * is needed.
1507 *
1508 * Assumes caller will replace priv->last_temperature once calibration
1509 * executed.
1510 */
1511static int iwl4965_is_temp_calib_needed(struct iwl_priv *priv)
1512{
1513 int temp_diff;
1514
1515 if (!test_bit(STATUS_STATISTICS, &priv->status)) {
1516 IWL_DEBUG_TEMP(priv, "Temperature not updated -- no statistics.\n");
1517 return 0;
1518 }
1519
1520 temp_diff = priv->temperature - priv->last_temperature;
1521
1522 /* get absolute value */
1523 if (temp_diff < 0) {
1524 IWL_DEBUG_POWER(priv, "Getting cooler, delta %d\n", temp_diff);
1525 temp_diff = -temp_diff;
1526 } else if (temp_diff == 0)
1527 IWL_DEBUG_POWER(priv, "Temperature unchanged\n");
1528 else
1529 IWL_DEBUG_POWER(priv, "Getting warmer, delta %d\n", temp_diff);
1530
1531 if (temp_diff < IWL_TEMPERATURE_THRESHOLD) {
1532 IWL_DEBUG_POWER(priv, " => thermal txpower calib not needed\n");
1533 return 0;
1534 }
1535
1536 IWL_DEBUG_POWER(priv, " => thermal txpower calib needed\n");
1537
1538 return 1;
1539}
1540
1541static void iwl4965_temperature_calib(struct iwl_priv *priv)
1542{
1543 s32 temp;
1544
1545 temp = iwl4965_hw_get_temperature(priv);
1546 if (IWL_TX_POWER_TEMPERATURE_OUT_OF_RANGE(temp))
1547 return;
1548
1549 if (priv->temperature != temp) {
1550 if (priv->temperature)
1551 IWL_DEBUG_TEMP(priv, "Temperature changed "
1552 "from %dC to %dC\n",
1553 KELVIN_TO_CELSIUS(priv->temperature),
1554 KELVIN_TO_CELSIUS(temp));
1555 else
1556 IWL_DEBUG_TEMP(priv, "Temperature "
1557 "initialized to %dC\n",
1558 KELVIN_TO_CELSIUS(temp));
1559 }
1560
1561 priv->temperature = temp;
1562 set_bit(STATUS_TEMPERATURE, &priv->status);
1563
1564 if (!priv->disable_tx_power_cal &&
1565 unlikely(!test_bit(STATUS_SCANNING, &priv->status)) &&
1566 iwl4965_is_temp_calib_needed(priv))
1567 queue_work(priv->workqueue, &priv->txpower_work);
1568}
1569
1570static u16 iwl4965_get_hcmd_size(u8 cmd_id, u16 len)
1571{
1572 switch (cmd_id) {
1573 case REPLY_RXON:
1574 return (u16) sizeof(struct iwl4965_rxon_cmd);
1575 default:
1576 return len;
1577 }
1578}
1579
1580static u16 iwl4965_build_addsta_hcmd(const struct iwl_legacy_addsta_cmd *cmd,
1581 u8 *data)
1582{
1583 struct iwl4965_addsta_cmd *addsta = (struct iwl4965_addsta_cmd *)data;
1584 addsta->mode = cmd->mode;
1585 memcpy(&addsta->sta, &cmd->sta, sizeof(struct sta_id_modify));
1586 memcpy(&addsta->key, &cmd->key, sizeof(struct iwl4965_keyinfo));
1587 addsta->station_flags = cmd->station_flags;
1588 addsta->station_flags_msk = cmd->station_flags_msk;
1589 addsta->tid_disable_tx = cmd->tid_disable_tx;
1590 addsta->add_immediate_ba_tid = cmd->add_immediate_ba_tid;
1591 addsta->remove_immediate_ba_tid = cmd->remove_immediate_ba_tid;
1592 addsta->add_immediate_ba_ssn = cmd->add_immediate_ba_ssn;
1593 addsta->sleep_tx_count = cmd->sleep_tx_count;
1594 addsta->reserved1 = cpu_to_le16(0);
1595 addsta->reserved2 = cpu_to_le16(0);
1596
1597 return (u16)sizeof(struct iwl4965_addsta_cmd);
1598}
1599
1600static inline u32 iwl4965_get_scd_ssn(struct iwl4965_tx_resp *tx_resp)
1601{
1602 return le32_to_cpup(&tx_resp->u.status + tx_resp->frame_count) & MAX_SN;
1603}
1604
1605/**
1606 * iwl4965_tx_status_reply_tx - Handle Tx response for frames in aggregation queue
1607 */
1608static int iwl4965_tx_status_reply_tx(struct iwl_priv *priv,
1609 struct iwl_ht_agg *agg,
1610 struct iwl4965_tx_resp *tx_resp,
1611 int txq_id, u16 start_idx)
1612{
1613 u16 status;
1614 struct agg_tx_status *frame_status = tx_resp->u.agg_status;
1615 struct ieee80211_tx_info *info = NULL;
1616 struct ieee80211_hdr *hdr = NULL;
1617 u32 rate_n_flags = le32_to_cpu(tx_resp->rate_n_flags);
1618 int i, sh, idx;
1619 u16 seq;
1620 if (agg->wait_for_ba)
1621 IWL_DEBUG_TX_REPLY(priv, "got tx response w/o block-ack\n");
1622
1623 agg->frame_count = tx_resp->frame_count;
1624 agg->start_idx = start_idx;
1625 agg->rate_n_flags = rate_n_flags;
1626 agg->bitmap = 0;
1627
1628 /* num frames attempted by Tx command */
1629 if (agg->frame_count == 1) {
1630 /* Only one frame was attempted; no block-ack will arrive */
1631 status = le16_to_cpu(frame_status[0].status);
1632 idx = start_idx;
1633
1634 IWL_DEBUG_TX_REPLY(priv, "FrameCnt = %d, StartIdx=%d idx=%d\n",
1635 agg->frame_count, agg->start_idx, idx);
1636
1637 info = IEEE80211_SKB_CB(priv->txq[txq_id].txb[idx].skb);
1638 info->status.rates[0].count = tx_resp->failure_frame + 1;
1639 info->flags &= ~IEEE80211_TX_CTL_AMPDU;
1640 info->flags |= iwl4965_tx_status_to_mac80211(status);
1641 iwl4965_hwrate_to_tx_control(priv, rate_n_flags, info);
1642
1643 IWL_DEBUG_TX_REPLY(priv, "1 Frame 0x%x failure :%d\n",
1644 status & 0xff, tx_resp->failure_frame);
1645 IWL_DEBUG_TX_REPLY(priv, "Rate Info rate_n_flags=%x\n", rate_n_flags);
1646
1647 agg->wait_for_ba = 0;
1648 } else {
1649 /* Two or more frames were attempted; expect block-ack */
1650 u64 bitmap = 0;
1651 int start = agg->start_idx;
1652
1653 /* Construct bit-map of pending frames within Tx window */
1654 for (i = 0; i < agg->frame_count; i++) {
1655 u16 sc;
1656 status = le16_to_cpu(frame_status[i].status);
1657 seq = le16_to_cpu(frame_status[i].sequence);
1658 idx = SEQ_TO_INDEX(seq);
1659 txq_id = SEQ_TO_QUEUE(seq);
1660
1661 if (status & (AGG_TX_STATE_FEW_BYTES_MSK |
1662 AGG_TX_STATE_ABORT_MSK))
1663 continue;
1664
1665 IWL_DEBUG_TX_REPLY(priv, "FrameCnt = %d, txq_id=%d idx=%d\n",
1666 agg->frame_count, txq_id, idx);
1667
1668 hdr = iwl_legacy_tx_queue_get_hdr(priv, txq_id, idx);
1669 if (!hdr) {
1670 IWL_ERR(priv,
1671 "BUG_ON idx doesn't point to valid skb"
1672 " idx=%d, txq_id=%d\n", idx, txq_id);
1673 return -1;
1674 }
1675
1676 sc = le16_to_cpu(hdr->seq_ctrl);
1677 if (idx != (SEQ_TO_SN(sc) & 0xff)) {
1678 IWL_ERR(priv,
1679 "BUG_ON idx doesn't match seq control"
1680 " idx=%d, seq_idx=%d, seq=%d\n",
1681 idx, SEQ_TO_SN(sc), hdr->seq_ctrl);
1682 return -1;
1683 }
1684
1685 IWL_DEBUG_TX_REPLY(priv, "AGG Frame i=%d idx %d seq=%d\n",
1686 i, idx, SEQ_TO_SN(sc));
1687
1688 sh = idx - start;
1689 if (sh > 64) {
1690 sh = (start - idx) + 0xff;
1691 bitmap = bitmap << sh;
1692 sh = 0;
1693 start = idx;
1694 } else if (sh < -64)
1695 sh = 0xff - (start - idx);
1696 else if (sh < 0) {
1697 sh = start - idx;
1698 start = idx;
1699 bitmap = bitmap << sh;
1700 sh = 0;
1701 }
1702 bitmap |= 1ULL << sh;
1703 IWL_DEBUG_TX_REPLY(priv, "start=%d bitmap=0x%llx\n",
1704 start, (unsigned long long)bitmap);
1705 }
1706
1707 agg->bitmap = bitmap;
1708 agg->start_idx = start;
1709 IWL_DEBUG_TX_REPLY(priv, "Frames %d start_idx=%d bitmap=0x%llx\n",
1710 agg->frame_count, agg->start_idx,
1711 (unsigned long long)agg->bitmap);
1712
1713 if (bitmap)
1714 agg->wait_for_ba = 1;
1715 }
1716 return 0;
1717}
1718
1719static u8 iwl4965_find_station(struct iwl_priv *priv, const u8 *addr)
1720{
1721 int i;
1722 int start = 0;
1723 int ret = IWL_INVALID_STATION;
1724 unsigned long flags;
1725
1726 if ((priv->iw_mode == NL80211_IFTYPE_ADHOC))
1727 start = IWL_STA_ID;
1728
1729 if (is_broadcast_ether_addr(addr))
1730 return priv->contexts[IWL_RXON_CTX_BSS].bcast_sta_id;
1731
1732 spin_lock_irqsave(&priv->sta_lock, flags);
1733 for (i = start; i < priv->hw_params.max_stations; i++)
1734 if (priv->stations[i].used &&
1735 (!compare_ether_addr(priv->stations[i].sta.sta.addr,
1736 addr))) {
1737 ret = i;
1738 goto out;
1739 }
1740
1741 IWL_DEBUG_ASSOC_LIMIT(priv, "can not find STA %pM total %d\n",
1742 addr, priv->num_stations);
1743
1744 out:
1745 /*
1746 * It may be possible that more commands interacting with stations
1747 * arrive before we completed processing the adding of
1748 * station
1749 */
1750 if (ret != IWL_INVALID_STATION &&
1751 (!(priv->stations[ret].used & IWL_STA_UCODE_ACTIVE) ||
1752 ((priv->stations[ret].used & IWL_STA_UCODE_ACTIVE) &&
1753 (priv->stations[ret].used & IWL_STA_UCODE_INPROGRESS)))) {
1754 IWL_ERR(priv, "Requested station info for sta %d before ready.\n",
1755 ret);
1756 ret = IWL_INVALID_STATION;
1757 }
1758 spin_unlock_irqrestore(&priv->sta_lock, flags);
1759 return ret;
1760}
1761
1762static int iwl4965_get_ra_sta_id(struct iwl_priv *priv, struct ieee80211_hdr *hdr)
1763{
1764 if (priv->iw_mode == NL80211_IFTYPE_STATION) {
1765 return IWL_AP_ID;
1766 } else {
1767 u8 *da = ieee80211_get_DA(hdr);
1768 return iwl4965_find_station(priv, da);
1769 }
1770}
1771
1772/**
1773 * iwl4965_rx_reply_tx - Handle standard (non-aggregation) Tx response
1774 */
1775static void iwl4965_rx_reply_tx(struct iwl_priv *priv,
1776 struct iwl_rx_mem_buffer *rxb)
1777{
1778 struct iwl_rx_packet *pkt = rxb_addr(rxb);
1779 u16 sequence = le16_to_cpu(pkt->hdr.sequence);
1780 int txq_id = SEQ_TO_QUEUE(sequence);
1781 int index = SEQ_TO_INDEX(sequence);
1782 struct iwl_tx_queue *txq = &priv->txq[txq_id];
1783 struct ieee80211_hdr *hdr;
1784 struct ieee80211_tx_info *info;
1785 struct iwl4965_tx_resp *tx_resp = (void *)&pkt->u.raw[0];
1786 u32 status = le32_to_cpu(tx_resp->u.status);
1787 int uninitialized_var(tid);
1788 int sta_id;
1789 int freed;
1790 u8 *qc = NULL;
1791 unsigned long flags;
1792
1793 if ((index >= txq->q.n_bd) || (iwl_legacy_queue_used(&txq->q, index) == 0)) {
1794 IWL_ERR(priv, "Read index for DMA queue txq_id (%d) index %d "
1795 "is out of range [0-%d] %d %d\n", txq_id,
1796 index, txq->q.n_bd, txq->q.write_ptr,
1797 txq->q.read_ptr);
1798 return;
1799 }
1800
1801 txq->time_stamp = jiffies;
1802 info = IEEE80211_SKB_CB(txq->txb[txq->q.read_ptr].skb);
1803 memset(&info->status, 0, sizeof(info->status));
1804
1805 hdr = iwl_legacy_tx_queue_get_hdr(priv, txq_id, index);
1806 if (ieee80211_is_data_qos(hdr->frame_control)) {
1807 qc = ieee80211_get_qos_ctl(hdr);
1808 tid = qc[0] & 0xf;
1809 }
1810
1811 sta_id = iwl4965_get_ra_sta_id(priv, hdr);
1812 if (txq->sched_retry && unlikely(sta_id == IWL_INVALID_STATION)) {
1813 IWL_ERR(priv, "Station not known\n");
1814 return;
1815 }
1816
1817 spin_lock_irqsave(&priv->sta_lock, flags);
1818 if (txq->sched_retry) {
1819 const u32 scd_ssn = iwl4965_get_scd_ssn(tx_resp);
1820 struct iwl_ht_agg *agg = NULL;
1821 WARN_ON(!qc);
1822
1823 agg = &priv->stations[sta_id].tid[tid].agg;
1824
1825 iwl4965_tx_status_reply_tx(priv, agg, tx_resp, txq_id, index);
1826
1827 /* check if BAR is needed */
1828 if ((tx_resp->frame_count == 1) && !iwl4965_is_tx_success(status))
1829 info->flags |= IEEE80211_TX_STAT_AMPDU_NO_BACK;
1830
1831 if (txq->q.read_ptr != (scd_ssn & 0xff)) {
1832 index = iwl_legacy_queue_dec_wrap(scd_ssn & 0xff,
1833 txq->q.n_bd);
1834 IWL_DEBUG_TX_REPLY(priv, "Retry scheduler reclaim scd_ssn "
1835 "%d index %d\n", scd_ssn , index);
1836 freed = iwl4965_tx_queue_reclaim(priv, txq_id, index);
1837 if (qc)
1838 iwl4965_free_tfds_in_queue(priv, sta_id,
1839 tid, freed);
1840
1841 if (priv->mac80211_registered &&
1842 (iwl_legacy_queue_space(&txq->q) > txq->q.low_mark)
1843 && (agg->state != IWL_EMPTYING_HW_QUEUE_DELBA))
1844 iwl_legacy_wake_queue(priv, txq);
1845 }
1846 } else {
1847 info->status.rates[0].count = tx_resp->failure_frame + 1;
1848 info->flags |= iwl4965_tx_status_to_mac80211(status);
1849 iwl4965_hwrate_to_tx_control(priv,
1850 le32_to_cpu(tx_resp->rate_n_flags),
1851 info);
1852
1853 IWL_DEBUG_TX_REPLY(priv, "TXQ %d status %s (0x%08x) "
1854 "rate_n_flags 0x%x retries %d\n",
1855 txq_id,
1856 iwl4965_get_tx_fail_reason(status), status,
1857 le32_to_cpu(tx_resp->rate_n_flags),
1858 tx_resp->failure_frame);
1859
1860 freed = iwl4965_tx_queue_reclaim(priv, txq_id, index);
1861 if (qc && likely(sta_id != IWL_INVALID_STATION))
1862 iwl4965_free_tfds_in_queue(priv, sta_id, tid, freed);
1863 else if (sta_id == IWL_INVALID_STATION)
1864 IWL_DEBUG_TX_REPLY(priv, "Station not known\n");
1865
1866 if (priv->mac80211_registered &&
1867 (iwl_legacy_queue_space(&txq->q) > txq->q.low_mark))
1868 iwl_legacy_wake_queue(priv, txq);
1869 }
1870 if (qc && likely(sta_id != IWL_INVALID_STATION))
1871 iwl4965_txq_check_empty(priv, sta_id, tid, txq_id);
1872
1873 iwl4965_check_abort_status(priv, tx_resp->frame_count, status);
1874
1875 spin_unlock_irqrestore(&priv->sta_lock, flags);
1876}
1877
1878static void iwl4965_rx_beacon_notif(struct iwl_priv *priv,
1879 struct iwl_rx_mem_buffer *rxb)
1880{
1881 struct iwl_rx_packet *pkt = rxb_addr(rxb);
1882 struct iwl4965_beacon_notif *beacon = (void *)pkt->u.raw;
1883 u8 rate __maybe_unused =
1884 iwl4965_hw_get_rate(beacon->beacon_notify_hdr.rate_n_flags);
1885
1886 IWL_DEBUG_RX(priv, "beacon status %#x, retries:%d ibssmgr:%d "
1887 "tsf:0x%.8x%.8x rate:%d\n",
1888 le32_to_cpu(beacon->beacon_notify_hdr.u.status) & TX_STATUS_MSK,
1889 beacon->beacon_notify_hdr.failure_frame,
1890 le32_to_cpu(beacon->ibss_mgr_status),
1891 le32_to_cpu(beacon->high_tsf),
1892 le32_to_cpu(beacon->low_tsf), rate);
1893
1894 priv->ibss_manager = le32_to_cpu(beacon->ibss_mgr_status);
1895}
1896
1897/* Set up 4965-specific Rx frame reply handlers */
1898static void iwl4965_rx_handler_setup(struct iwl_priv *priv)
1899{
1900 /* Legacy Rx frames */
1901 priv->rx_handlers[REPLY_RX] = iwl4965_rx_reply_rx;
1902 /* Tx response */
1903 priv->rx_handlers[REPLY_TX] = iwl4965_rx_reply_tx;
1904 priv->rx_handlers[BEACON_NOTIFICATION] = iwl4965_rx_beacon_notif;
1905}
1906
1907static struct iwl_hcmd_ops iwl4965_hcmd = {
1908 .rxon_assoc = iwl4965_send_rxon_assoc,
1909 .commit_rxon = iwl4965_commit_rxon,
1910 .set_rxon_chain = iwl4965_set_rxon_chain,
1911};
1912
1913static void iwl4965_post_scan(struct iwl_priv *priv)
1914{
1915 struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
1916
1917 /*
1918 * Since setting the RXON may have been deferred while
1919 * performing the scan, fire one off if needed
1920 */
1921 if (memcmp(&ctx->staging, &ctx->active, sizeof(ctx->staging)))
1922 iwl_legacy_commit_rxon(priv, ctx);
1923}
1924
1925static void iwl4965_post_associate(struct iwl_priv *priv)
1926{
1927 struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
1928 struct ieee80211_vif *vif = ctx->vif;
1929 struct ieee80211_conf *conf = NULL;
1930 int ret = 0;
1931
1932 if (!vif || !priv->is_open)
1933 return;
1934
1935 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
1936 return;
1937
1938 iwl_legacy_scan_cancel_timeout(priv, 200);
1939
1940 conf = iwl_legacy_ieee80211_get_hw_conf(priv->hw);
1941
1942 ctx->staging.filter_flags &= ~RXON_FILTER_ASSOC_MSK;
1943 iwl_legacy_commit_rxon(priv, ctx);
1944
1945 ret = iwl_legacy_send_rxon_timing(priv, ctx);
1946 if (ret)
1947 IWL_WARN(priv, "RXON timing - "
1948 "Attempting to continue.\n");
1949
1950 ctx->staging.filter_flags |= RXON_FILTER_ASSOC_MSK;
1951
1952 iwl_legacy_set_rxon_ht(priv, &priv->current_ht_config);
1953
1954 if (priv->cfg->ops->hcmd->set_rxon_chain)
1955 priv->cfg->ops->hcmd->set_rxon_chain(priv, ctx);
1956
1957 ctx->staging.assoc_id = cpu_to_le16(vif->bss_conf.aid);
1958
1959 IWL_DEBUG_ASSOC(priv, "assoc id %d beacon interval %d\n",
1960 vif->bss_conf.aid, vif->bss_conf.beacon_int);
1961
1962 if (vif->bss_conf.use_short_preamble)
1963 ctx->staging.flags |= RXON_FLG_SHORT_PREAMBLE_MSK;
1964 else
1965 ctx->staging.flags &= ~RXON_FLG_SHORT_PREAMBLE_MSK;
1966
1967 if (ctx->staging.flags & RXON_FLG_BAND_24G_MSK) {
1968 if (vif->bss_conf.use_short_slot)
1969 ctx->staging.flags |= RXON_FLG_SHORT_SLOT_MSK;
1970 else
1971 ctx->staging.flags &= ~RXON_FLG_SHORT_SLOT_MSK;
1972 }
1973
1974 iwl_legacy_commit_rxon(priv, ctx);
1975
1976 IWL_DEBUG_ASSOC(priv, "Associated as %d to: %pM\n",
1977 vif->bss_conf.aid, ctx->active.bssid_addr);
1978
1979 switch (vif->type) {
1980 case NL80211_IFTYPE_STATION:
1981 break;
1982 case NL80211_IFTYPE_ADHOC:
1983 iwl4965_send_beacon_cmd(priv);
1984 break;
1985 default:
1986 IWL_ERR(priv, "%s Should not be called in %d mode\n",
1987 __func__, vif->type);
1988 break;
1989 }
1990
1991 /* the chain noise calibration will enabled PM upon completion
1992 * If chain noise has already been run, then we need to enable
1993 * power management here */
1994 if (priv->chain_noise_data.state == IWL_CHAIN_NOISE_DONE)
1995 iwl_legacy_power_update_mode(priv, false);
1996
1997 /* Enable Rx differential gain and sensitivity calibrations */
1998 iwl4965_chain_noise_reset(priv);
1999 priv->start_calib = 1;
2000}
2001
2002static void iwl4965_config_ap(struct iwl_priv *priv)
2003{
2004 struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
2005 struct ieee80211_vif *vif = ctx->vif;
2006 int ret = 0;
2007
2008 lockdep_assert_held(&priv->mutex);
2009
2010 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
2011 return;
2012
2013 /* The following should be done only at AP bring up */
2014 if (!iwl_legacy_is_associated_ctx(ctx)) {
2015
2016 /* RXON - unassoc (to set timing command) */
2017 ctx->staging.filter_flags &= ~RXON_FILTER_ASSOC_MSK;
2018 iwl_legacy_commit_rxon(priv, ctx);
2019
2020 /* RXON Timing */
2021 ret = iwl_legacy_send_rxon_timing(priv, ctx);
2022 if (ret)
2023 IWL_WARN(priv, "RXON timing failed - "
2024 "Attempting to continue.\n");
2025
2026 /* AP has all antennas */
2027 priv->chain_noise_data.active_chains =
2028 priv->hw_params.valid_rx_ant;
2029 iwl_legacy_set_rxon_ht(priv, &priv->current_ht_config);
2030 if (priv->cfg->ops->hcmd->set_rxon_chain)
2031 priv->cfg->ops->hcmd->set_rxon_chain(priv, ctx);
2032
2033 ctx->staging.assoc_id = 0;
2034
2035 if (vif->bss_conf.use_short_preamble)
2036 ctx->staging.flags |=
2037 RXON_FLG_SHORT_PREAMBLE_MSK;
2038 else
2039 ctx->staging.flags &=
2040 ~RXON_FLG_SHORT_PREAMBLE_MSK;
2041
2042 if (ctx->staging.flags & RXON_FLG_BAND_24G_MSK) {
2043 if (vif->bss_conf.use_short_slot)
2044 ctx->staging.flags |=
2045 RXON_FLG_SHORT_SLOT_MSK;
2046 else
2047 ctx->staging.flags &=
2048 ~RXON_FLG_SHORT_SLOT_MSK;
2049 }
2050 /* need to send beacon cmd before committing assoc RXON! */
2051 iwl4965_send_beacon_cmd(priv);
2052 /* restore RXON assoc */
2053 ctx->staging.filter_flags |= RXON_FILTER_ASSOC_MSK;
2054 iwl_legacy_commit_rxon(priv, ctx);
2055 }
2056 iwl4965_send_beacon_cmd(priv);
2057}
2058
2059static struct iwl_hcmd_utils_ops iwl4965_hcmd_utils = {
2060 .get_hcmd_size = iwl4965_get_hcmd_size,
2061 .build_addsta_hcmd = iwl4965_build_addsta_hcmd,
2062 .request_scan = iwl4965_request_scan,
2063 .post_scan = iwl4965_post_scan,
2064};
2065
2066static struct iwl_lib_ops iwl4965_lib = {
2067 .set_hw_params = iwl4965_hw_set_hw_params,
2068 .txq_update_byte_cnt_tbl = iwl4965_txq_update_byte_cnt_tbl,
2069 .txq_attach_buf_to_tfd = iwl4965_hw_txq_attach_buf_to_tfd,
2070 .txq_free_tfd = iwl4965_hw_txq_free_tfd,
2071 .txq_init = iwl4965_hw_tx_queue_init,
2072 .rx_handler_setup = iwl4965_rx_handler_setup,
2073 .is_valid_rtc_data_addr = iwl4965_hw_valid_rtc_data_addr,
2074 .init_alive_start = iwl4965_init_alive_start,
2075 .load_ucode = iwl4965_load_bsm,
2076 .dump_nic_error_log = iwl4965_dump_nic_error_log,
2077 .dump_fh = iwl4965_dump_fh,
2078 .set_channel_switch = iwl4965_hw_channel_switch,
2079 .apm_ops = {
2080 .init = iwl_legacy_apm_init,
2081 .config = iwl4965_nic_config,
2082 },
2083 .eeprom_ops = {
2084 .regulatory_bands = {
2085 EEPROM_REGULATORY_BAND_1_CHANNELS,
2086 EEPROM_REGULATORY_BAND_2_CHANNELS,
2087 EEPROM_REGULATORY_BAND_3_CHANNELS,
2088 EEPROM_REGULATORY_BAND_4_CHANNELS,
2089 EEPROM_REGULATORY_BAND_5_CHANNELS,
2090 EEPROM_4965_REGULATORY_BAND_24_HT40_CHANNELS,
2091 EEPROM_4965_REGULATORY_BAND_52_HT40_CHANNELS
2092 },
2093 .acquire_semaphore = iwl4965_eeprom_acquire_semaphore,
2094 .release_semaphore = iwl4965_eeprom_release_semaphore,
2095 },
2096 .send_tx_power = iwl4965_send_tx_power,
2097 .update_chain_flags = iwl4965_update_chain_flags,
2098 .temp_ops = {
2099 .temperature = iwl4965_temperature_calib,
2100 },
2101 .debugfs_ops = {
2102 .rx_stats_read = iwl4965_ucode_rx_stats_read,
2103 .tx_stats_read = iwl4965_ucode_tx_stats_read,
2104 .general_stats_read = iwl4965_ucode_general_stats_read,
2105 },
2106};
2107
2108static const struct iwl_legacy_ops iwl4965_legacy_ops = {
2109 .post_associate = iwl4965_post_associate,
2110 .config_ap = iwl4965_config_ap,
2111 .manage_ibss_station = iwl4965_manage_ibss_station,
2112 .update_bcast_stations = iwl4965_update_bcast_stations,
2113};
2114
2115struct ieee80211_ops iwl4965_hw_ops = {
2116 .tx = iwl4965_mac_tx,
2117 .start = iwl4965_mac_start,
2118 .stop = iwl4965_mac_stop,
2119 .add_interface = iwl_legacy_mac_add_interface,
2120 .remove_interface = iwl_legacy_mac_remove_interface,
2121 .change_interface = iwl_legacy_mac_change_interface,
2122 .config = iwl_legacy_mac_config,
2123 .configure_filter = iwl4965_configure_filter,
2124 .set_key = iwl4965_mac_set_key,
2125 .update_tkip_key = iwl4965_mac_update_tkip_key,
2126 .conf_tx = iwl_legacy_mac_conf_tx,
2127 .reset_tsf = iwl_legacy_mac_reset_tsf,
2128 .bss_info_changed = iwl_legacy_mac_bss_info_changed,
2129 .ampdu_action = iwl4965_mac_ampdu_action,
2130 .hw_scan = iwl_legacy_mac_hw_scan,
2131 .sta_add = iwl4965_mac_sta_add,
2132 .sta_remove = iwl_legacy_mac_sta_remove,
2133 .channel_switch = iwl4965_mac_channel_switch,
2134 .tx_last_beacon = iwl_legacy_mac_tx_last_beacon,
2135};
2136
2137static const struct iwl_ops iwl4965_ops = {
2138 .lib = &iwl4965_lib,
2139 .hcmd = &iwl4965_hcmd,
2140 .utils = &iwl4965_hcmd_utils,
2141 .led = &iwl4965_led_ops,
2142 .legacy = &iwl4965_legacy_ops,
2143 .ieee80211_ops = &iwl4965_hw_ops,
2144};
2145
2146static struct iwl_base_params iwl4965_base_params = {
2147 .eeprom_size = IWL4965_EEPROM_IMG_SIZE,
2148 .num_of_queues = IWL49_NUM_QUEUES,
2149 .num_of_ampdu_queues = IWL49_NUM_AMPDU_QUEUES,
2150 .pll_cfg_val = 0,
2151 .set_l0s = true,
2152 .use_bsm = true,
2153 .led_compensation = 61,
2154 .chain_noise_num_beacons = IWL4965_CAL_NUM_BEACONS,
2155 .wd_timeout = IWL_DEF_WD_TIMEOUT,
2156 .temperature_kelvin = true,
2157 .ucode_tracing = true,
2158 .sensitivity_calib_by_driver = true,
2159 .chain_noise_calib_by_driver = true,
2160};
2161
2162struct iwl_cfg iwl4965_cfg = {
2163 .name = "Intel(R) Wireless WiFi Link 4965AGN",
2164 .fw_name_pre = IWL4965_FW_PRE,
2165 .ucode_api_max = IWL4965_UCODE_API_MAX,
2166 .ucode_api_min = IWL4965_UCODE_API_MIN,
2167 .sku = IWL_SKU_A|IWL_SKU_G|IWL_SKU_N,
2168 .valid_tx_ant = ANT_AB,
2169 .valid_rx_ant = ANT_ABC,
2170 .eeprom_ver = EEPROM_4965_EEPROM_VERSION,
2171 .eeprom_calib_ver = EEPROM_4965_TX_POWER_VERSION,
2172 .ops = &iwl4965_ops,
2173 .mod_params = &iwl4965_mod_params,
2174 .base_params = &iwl4965_base_params,
2175 .led_mode = IWL_LED_BLINK,
2176 /*
2177 * Force use of chains B and C for scan RX on 5 GHz band
2178 * because the device has off-channel reception on chain A.
2179 */
2180 .scan_rx_antennas[IEEE80211_BAND_5GHZ] = ANT_BC,
2181};
2182
2183/* Module firmware */
2184MODULE_FIRMWARE(IWL4965_MODULE_FIRMWARE(IWL4965_UCODE_API_MAX));
diff --git a/drivers/net/wireless/iwlegacy/iwl-4965.h b/drivers/net/wireless/iwlegacy/iwl-4965.h
new file mode 100644
index 00000000000..01f8163daf1
--- /dev/null
+++ b/drivers/net/wireless/iwlegacy/iwl-4965.h
@@ -0,0 +1,282 @@
1/******************************************************************************
2 *
3 * This file is provided under a dual BSD/GPLv2 license. When using or
4 * redistributing this file, you may do so under either license.
5 *
6 * GPL LICENSE SUMMARY
7 *
8 * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of version 2 of the GNU General Public License as
12 * published by the Free Software Foundation.
13 *
14 * This program is distributed in the hope that it will be useful, but
15 * WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
22 * USA
23 *
24 * The full GNU General Public License is included in this distribution
25 * in the file called LICENSE.GPL.
26 *
27 * Contact Information:
28 * Intel Linux Wireless <ilw@linux.intel.com>
29 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
30 *
31 * BSD LICENSE
32 *
33 * Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved.
34 * All rights reserved.
35 *
36 * Redistribution and use in source and binary forms, with or without
37 * modification, are permitted provided that the following conditions
38 * are met:
39 *
40 * * Redistributions of source code must retain the above copyright
41 * notice, this list of conditions and the following disclaimer.
42 * * Redistributions in binary form must reproduce the above copyright
43 * notice, this list of conditions and the following disclaimer in
44 * the documentation and/or other materials provided with the
45 * distribution.
46 * * Neither the name Intel Corporation nor the names of its
47 * contributors may be used to endorse or promote products derived
48 * from this software without specific prior written permission.
49 *
50 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
51 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
52 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
53 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
54 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
55 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
56 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
57 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
58 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
59 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
60 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
61 *****************************************************************************/
62
63#ifndef __iwl_4965_h__
64#define __iwl_4965_h__
65
66#include "iwl-dev.h"
67
68/* configuration for the _4965 devices */
69extern struct iwl_cfg iwl4965_cfg;
70
71extern struct iwl_mod_params iwl4965_mod_params;
72
73extern struct ieee80211_ops iwl4965_hw_ops;
74
75/* tx queue */
76void iwl4965_free_tfds_in_queue(struct iwl_priv *priv,
77 int sta_id, int tid, int freed);
78
79/* RXON */
80void iwl4965_set_rxon_chain(struct iwl_priv *priv,
81 struct iwl_rxon_context *ctx);
82
83/* uCode */
84int iwl4965_verify_ucode(struct iwl_priv *priv);
85
86/* lib */
87void iwl4965_check_abort_status(struct iwl_priv *priv,
88 u8 frame_count, u32 status);
89
90void iwl4965_rx_queue_reset(struct iwl_priv *priv, struct iwl_rx_queue *rxq);
91int iwl4965_rx_init(struct iwl_priv *priv, struct iwl_rx_queue *rxq);
92int iwl4965_hw_nic_init(struct iwl_priv *priv);
93int iwl4965_dump_fh(struct iwl_priv *priv, char **buf, bool display);
94
95/* rx */
96void iwl4965_rx_queue_restock(struct iwl_priv *priv);
97void iwl4965_rx_replenish(struct iwl_priv *priv);
98void iwl4965_rx_replenish_now(struct iwl_priv *priv);
99void iwl4965_rx_queue_free(struct iwl_priv *priv, struct iwl_rx_queue *rxq);
100int iwl4965_rxq_stop(struct iwl_priv *priv);
101int iwl4965_hwrate_to_mac80211_idx(u32 rate_n_flags, enum ieee80211_band band);
102void iwl4965_rx_reply_rx(struct iwl_priv *priv,
103 struct iwl_rx_mem_buffer *rxb);
104void iwl4965_rx_reply_rx_phy(struct iwl_priv *priv,
105 struct iwl_rx_mem_buffer *rxb);
106void iwl4965_rx_handle(struct iwl_priv *priv);
107
108/* tx */
109void iwl4965_hw_txq_free_tfd(struct iwl_priv *priv, struct iwl_tx_queue *txq);
110int iwl4965_hw_txq_attach_buf_to_tfd(struct iwl_priv *priv,
111 struct iwl_tx_queue *txq,
112 dma_addr_t addr, u16 len, u8 reset, u8 pad);
113int iwl4965_hw_tx_queue_init(struct iwl_priv *priv,
114 struct iwl_tx_queue *txq);
115void iwl4965_hwrate_to_tx_control(struct iwl_priv *priv, u32 rate_n_flags,
116 struct ieee80211_tx_info *info);
117int iwl4965_tx_skb(struct iwl_priv *priv, struct sk_buff *skb);
118int iwl4965_tx_agg_start(struct iwl_priv *priv, struct ieee80211_vif *vif,
119 struct ieee80211_sta *sta, u16 tid, u16 *ssn);
120int iwl4965_tx_agg_stop(struct iwl_priv *priv, struct ieee80211_vif *vif,
121 struct ieee80211_sta *sta, u16 tid);
122int iwl4965_txq_check_empty(struct iwl_priv *priv,
123 int sta_id, u8 tid, int txq_id);
124void iwl4965_rx_reply_compressed_ba(struct iwl_priv *priv,
125 struct iwl_rx_mem_buffer *rxb);
126int iwl4965_tx_queue_reclaim(struct iwl_priv *priv, int txq_id, int index);
127void iwl4965_hw_txq_ctx_free(struct iwl_priv *priv);
128int iwl4965_txq_ctx_alloc(struct iwl_priv *priv);
129void iwl4965_txq_ctx_reset(struct iwl_priv *priv);
130void iwl4965_txq_ctx_stop(struct iwl_priv *priv);
131void iwl4965_txq_set_sched(struct iwl_priv *priv, u32 mask);
132
133/*
134 * Acquire priv->lock before calling this function !
135 */
136void iwl4965_set_wr_ptrs(struct iwl_priv *priv, int txq_id, u32 index);
137/**
138 * iwl4965_tx_queue_set_status - (optionally) start Tx/Cmd queue
139 * @tx_fifo_id: Tx DMA/FIFO channel (range 0-7) that the queue will feed
140 * @scd_retry: (1) Indicates queue will be used in aggregation mode
141 *
142 * NOTE: Acquire priv->lock before calling this function !
143 */
144void iwl4965_tx_queue_set_status(struct iwl_priv *priv,
145 struct iwl_tx_queue *txq,
146 int tx_fifo_id, int scd_retry);
147
148static inline u32 iwl4965_tx_status_to_mac80211(u32 status)
149{
150 status &= TX_STATUS_MSK;
151
152 switch (status) {
153 case TX_STATUS_SUCCESS:
154 case TX_STATUS_DIRECT_DONE:
155 return IEEE80211_TX_STAT_ACK;
156 case TX_STATUS_FAIL_DEST_PS:
157 return IEEE80211_TX_STAT_TX_FILTERED;
158 default:
159 return 0;
160 }
161}
162
163static inline bool iwl4965_is_tx_success(u32 status)
164{
165 status &= TX_STATUS_MSK;
166 return (status == TX_STATUS_SUCCESS) ||
167 (status == TX_STATUS_DIRECT_DONE);
168}
169
170u8 iwl4965_toggle_tx_ant(struct iwl_priv *priv, u8 ant_idx, u8 valid);
171
172/* rx */
173void iwl4965_rx_missed_beacon_notif(struct iwl_priv *priv,
174 struct iwl_rx_mem_buffer *rxb);
175bool iwl4965_good_plcp_health(struct iwl_priv *priv,
176 struct iwl_rx_packet *pkt);
177void iwl4965_rx_statistics(struct iwl_priv *priv,
178 struct iwl_rx_mem_buffer *rxb);
179void iwl4965_reply_statistics(struct iwl_priv *priv,
180 struct iwl_rx_mem_buffer *rxb);
181
182/* scan */
183int iwl4965_request_scan(struct iwl_priv *priv, struct ieee80211_vif *vif);
184
185/* station mgmt */
186int iwl4965_manage_ibss_station(struct iwl_priv *priv,
187 struct ieee80211_vif *vif, bool add);
188
189/* hcmd */
190int iwl4965_send_beacon_cmd(struct iwl_priv *priv);
191
192#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
193const char *iwl4965_get_tx_fail_reason(u32 status);
194#else
195static inline const char *
196iwl4965_get_tx_fail_reason(u32 status) { return ""; }
197#endif
198
199/* station management */
200int iwl4965_alloc_bcast_station(struct iwl_priv *priv,
201 struct iwl_rxon_context *ctx);
202int iwl4965_add_bssid_station(struct iwl_priv *priv,
203 struct iwl_rxon_context *ctx,
204 const u8 *addr, u8 *sta_id_r);
205int iwl4965_remove_default_wep_key(struct iwl_priv *priv,
206 struct iwl_rxon_context *ctx,
207 struct ieee80211_key_conf *key);
208int iwl4965_set_default_wep_key(struct iwl_priv *priv,
209 struct iwl_rxon_context *ctx,
210 struct ieee80211_key_conf *key);
211int iwl4965_restore_default_wep_keys(struct iwl_priv *priv,
212 struct iwl_rxon_context *ctx);
213int iwl4965_set_dynamic_key(struct iwl_priv *priv,
214 struct iwl_rxon_context *ctx,
215 struct ieee80211_key_conf *key, u8 sta_id);
216int iwl4965_remove_dynamic_key(struct iwl_priv *priv,
217 struct iwl_rxon_context *ctx,
218 struct ieee80211_key_conf *key, u8 sta_id);
219void iwl4965_update_tkip_key(struct iwl_priv *priv,
220 struct iwl_rxon_context *ctx,
221 struct ieee80211_key_conf *keyconf,
222 struct ieee80211_sta *sta, u32 iv32, u16 *phase1key);
223int iwl4965_sta_tx_modify_enable_tid(struct iwl_priv *priv,
224 int sta_id, int tid);
225int iwl4965_sta_rx_agg_start(struct iwl_priv *priv, struct ieee80211_sta *sta,
226 int tid, u16 ssn);
227int iwl4965_sta_rx_agg_stop(struct iwl_priv *priv, struct ieee80211_sta *sta,
228 int tid);
229void iwl4965_sta_modify_sleep_tx_count(struct iwl_priv *priv,
230 int sta_id, int cnt);
231int iwl4965_update_bcast_stations(struct iwl_priv *priv);
232
233/* rate */
234static inline u32 iwl4965_ant_idx_to_flags(u8 ant_idx)
235{
236 return BIT(ant_idx) << RATE_MCS_ANT_POS;
237}
238
239static inline u8 iwl4965_hw_get_rate(__le32 rate_n_flags)
240{
241 return le32_to_cpu(rate_n_flags) & 0xFF;
242}
243
244static inline __le32 iwl4965_hw_set_rate_n_flags(u8 rate, u32 flags)
245{
246 return cpu_to_le32(flags|(u32)rate);
247}
248
249/* eeprom */
250void iwl4965_eeprom_get_mac(const struct iwl_priv *priv, u8 *mac);
251int iwl4965_eeprom_acquire_semaphore(struct iwl_priv *priv);
252void iwl4965_eeprom_release_semaphore(struct iwl_priv *priv);
253int iwl4965_eeprom_check_version(struct iwl_priv *priv);
254
255/* mac80211 handlers (for 4965) */
256void iwl4965_mac_tx(struct ieee80211_hw *hw, struct sk_buff *skb);
257int iwl4965_mac_start(struct ieee80211_hw *hw);
258void iwl4965_mac_stop(struct ieee80211_hw *hw);
259void iwl4965_configure_filter(struct ieee80211_hw *hw,
260 unsigned int changed_flags,
261 unsigned int *total_flags,
262 u64 multicast);
263int iwl4965_mac_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
264 struct ieee80211_vif *vif, struct ieee80211_sta *sta,
265 struct ieee80211_key_conf *key);
266void iwl4965_mac_update_tkip_key(struct ieee80211_hw *hw,
267 struct ieee80211_vif *vif,
268 struct ieee80211_key_conf *keyconf,
269 struct ieee80211_sta *sta,
270 u32 iv32, u16 *phase1key);
271int iwl4965_mac_ampdu_action(struct ieee80211_hw *hw,
272 struct ieee80211_vif *vif,
273 enum ieee80211_ampdu_mlme_action action,
274 struct ieee80211_sta *sta, u16 tid, u16 *ssn,
275 u8 buf_size);
276int iwl4965_mac_sta_add(struct ieee80211_hw *hw,
277 struct ieee80211_vif *vif,
278 struct ieee80211_sta *sta);
279void iwl4965_mac_channel_switch(struct ieee80211_hw *hw,
280 struct ieee80211_channel_switch *ch_switch);
281
282#endif /* __iwl_4965_h__ */
diff --git a/drivers/net/wireless/iwlegacy/iwl-commands.h b/drivers/net/wireless/iwlegacy/iwl-commands.h
new file mode 100644
index 00000000000..89904054473
--- /dev/null
+++ b/drivers/net/wireless/iwlegacy/iwl-commands.h
@@ -0,0 +1,3398 @@
1/******************************************************************************
2 *
3 * This file is provided under a dual BSD/GPLv2 license. When using or
4 * redistributing this file, you may do so under either license.
5 *
6 * GPL LICENSE SUMMARY
7 *
8 * Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved.
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of version 2 of the GNU General Public License as
12 * published by the Free Software Foundation.
13 *
14 * This program is distributed in the hope that it will be useful, but
15 * WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
22 * USA
23 *
24 * The full GNU General Public License is included in this distribution
25 * in the file called LICENSE.GPL.
26 *
27 * Contact Information:
28 * Intel Linux Wireless <ilw@linux.intel.com>
29 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
30 *
31 * BSD LICENSE
32 *
33 * Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved.
34 * All rights reserved.
35 *
36 * Redistribution and use in source and binary forms, with or without
37 * modification, are permitted provided that the following conditions
38 * are met:
39 *
40 * * Redistributions of source code must retain the above copyright
41 * notice, this list of conditions and the following disclaimer.
42 * * Redistributions in binary form must reproduce the above copyright
43 * notice, this list of conditions and the following disclaimer in
44 * the documentation and/or other materials provided with the
45 * distribution.
46 * * Neither the name Intel Corporation nor the names of its
47 * contributors may be used to endorse or promote products derived
48 * from this software without specific prior written permission.
49 *
50 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
51 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
52 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
53 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
54 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
55 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
56 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
57 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
58 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
59 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
60 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
61 *
62 *****************************************************************************/
63/*
64 * Please use this file (iwl-commands.h) only for uCode API definitions.
65 * Please use iwl-xxxx-hw.h for hardware-related definitions.
66 * Please use iwl-dev.h for driver implementation definitions.
67 */
68
69#ifndef __iwl_legacy_commands_h__
70#define __iwl_legacy_commands_h__
71
72struct iwl_priv;
73
74/* uCode version contains 4 values: Major/Minor/API/Serial */
75#define IWL_UCODE_MAJOR(ver) (((ver) & 0xFF000000) >> 24)
76#define IWL_UCODE_MINOR(ver) (((ver) & 0x00FF0000) >> 16)
77#define IWL_UCODE_API(ver) (((ver) & 0x0000FF00) >> 8)
78#define IWL_UCODE_SERIAL(ver) ((ver) & 0x000000FF)
79
80
81/* Tx rates */
82#define IWL_CCK_RATES 4
83#define IWL_OFDM_RATES 8
84#define IWL_MAX_RATES (IWL_CCK_RATES + IWL_OFDM_RATES)
85
86enum {
87 REPLY_ALIVE = 0x1,
88 REPLY_ERROR = 0x2,
89
90 /* RXON and QOS commands */
91 REPLY_RXON = 0x10,
92 REPLY_RXON_ASSOC = 0x11,
93 REPLY_QOS_PARAM = 0x13,
94 REPLY_RXON_TIMING = 0x14,
95
96 /* Multi-Station support */
97 REPLY_ADD_STA = 0x18,
98 REPLY_REMOVE_STA = 0x19,
99
100 /* Security */
101 REPLY_WEPKEY = 0x20,
102
103 /* RX, TX, LEDs */
104 REPLY_3945_RX = 0x1b, /* 3945 only */
105 REPLY_TX = 0x1c,
106 REPLY_RATE_SCALE = 0x47, /* 3945 only */
107 REPLY_LEDS_CMD = 0x48,
108 REPLY_TX_LINK_QUALITY_CMD = 0x4e, /* for 4965 and up */
109
110 /* 802.11h related */
111 REPLY_CHANNEL_SWITCH = 0x72,
112 CHANNEL_SWITCH_NOTIFICATION = 0x73,
113 REPLY_SPECTRUM_MEASUREMENT_CMD = 0x74,
114 SPECTRUM_MEASURE_NOTIFICATION = 0x75,
115
116 /* Power Management */
117 POWER_TABLE_CMD = 0x77,
118 PM_SLEEP_NOTIFICATION = 0x7A,
119 PM_DEBUG_STATISTIC_NOTIFIC = 0x7B,
120
121 /* Scan commands and notifications */
122 REPLY_SCAN_CMD = 0x80,
123 REPLY_SCAN_ABORT_CMD = 0x81,
124 SCAN_START_NOTIFICATION = 0x82,
125 SCAN_RESULTS_NOTIFICATION = 0x83,
126 SCAN_COMPLETE_NOTIFICATION = 0x84,
127
128 /* IBSS/AP commands */
129 BEACON_NOTIFICATION = 0x90,
130 REPLY_TX_BEACON = 0x91,
131
132 /* Miscellaneous commands */
133 REPLY_TX_PWR_TABLE_CMD = 0x97,
134
135 /* Bluetooth device coexistence config command */
136 REPLY_BT_CONFIG = 0x9b,
137
138 /* Statistics */
139 REPLY_STATISTICS_CMD = 0x9c,
140 STATISTICS_NOTIFICATION = 0x9d,
141
142 /* RF-KILL commands and notifications */
143 CARD_STATE_NOTIFICATION = 0xa1,
144
145 /* Missed beacons notification */
146 MISSED_BEACONS_NOTIFICATION = 0xa2,
147
148 REPLY_CT_KILL_CONFIG_CMD = 0xa4,
149 SENSITIVITY_CMD = 0xa8,
150 REPLY_PHY_CALIBRATION_CMD = 0xb0,
151 REPLY_RX_PHY_CMD = 0xc0,
152 REPLY_RX_MPDU_CMD = 0xc1,
153 REPLY_RX = 0xc3,
154 REPLY_COMPRESSED_BA = 0xc5,
155
156 REPLY_MAX = 0xff
157};
158
159/******************************************************************************
160 * (0)
161 * Commonly used structures and definitions:
162 * Command header, rate_n_flags, txpower
163 *
164 *****************************************************************************/
165
166/* iwl_cmd_header flags value */
167#define IWL_CMD_FAILED_MSK 0x40
168
169#define SEQ_TO_QUEUE(s) (((s) >> 8) & 0x1f)
170#define QUEUE_TO_SEQ(q) (((q) & 0x1f) << 8)
171#define SEQ_TO_INDEX(s) ((s) & 0xff)
172#define INDEX_TO_SEQ(i) ((i) & 0xff)
173#define SEQ_HUGE_FRAME cpu_to_le16(0x4000)
174#define SEQ_RX_FRAME cpu_to_le16(0x8000)
175
176/**
177 * struct iwl_cmd_header
178 *
179 * This header format appears in the beginning of each command sent from the
180 * driver, and each response/notification received from uCode.
181 */
182struct iwl_cmd_header {
183 u8 cmd; /* Command ID: REPLY_RXON, etc. */
184 u8 flags; /* 0:5 reserved, 6 abort, 7 internal */
185 /*
186 * The driver sets up the sequence number to values of its choosing.
187 * uCode does not use this value, but passes it back to the driver
188 * when sending the response to each driver-originated command, so
189 * the driver can match the response to the command. Since the values
190 * don't get used by uCode, the driver may set up an arbitrary format.
191 *
192 * There is one exception: uCode sets bit 15 when it originates
193 * the response/notification, i.e. when the response/notification
194 * is not a direct response to a command sent by the driver. For
195 * example, uCode issues REPLY_3945_RX when it sends a received frame
196 * to the driver; it is not a direct response to any driver command.
197 *
198 * The Linux driver uses the following format:
199 *
200 * 0:7 tfd index - position within TX queue
201 * 8:12 TX queue id
202 * 13 reserved
203 * 14 huge - driver sets this to indicate command is in the
204 * 'huge' storage at the end of the command buffers
205 * 15 unsolicited RX or uCode-originated notification
206 */
207 __le16 sequence;
208
209 /* command or response/notification data follows immediately */
210 u8 data[0];
211} __packed;
212
213
214/**
215 * struct iwl3945_tx_power
216 *
217 * Used in REPLY_TX_PWR_TABLE_CMD, REPLY_SCAN_CMD, REPLY_CHANNEL_SWITCH
218 *
219 * Each entry contains two values:
220 * 1) DSP gain (or sometimes called DSP attenuation). This is a fine-grained
221 * linear value that multiplies the output of the digital signal processor,
222 * before being sent to the analog radio.
223 * 2) Radio gain. This sets the analog gain of the radio Tx path.
224 * It is a coarser setting, and behaves in a logarithmic (dB) fashion.
225 *
226 * Driver obtains values from struct iwl3945_tx_power power_gain_table[][].
227 */
228struct iwl3945_tx_power {
229 u8 tx_gain; /* gain for analog radio */
230 u8 dsp_atten; /* gain for DSP */
231} __packed;
232
233/**
234 * struct iwl3945_power_per_rate
235 *
236 * Used in REPLY_TX_PWR_TABLE_CMD, REPLY_CHANNEL_SWITCH
237 */
238struct iwl3945_power_per_rate {
239 u8 rate; /* plcp */
240 struct iwl3945_tx_power tpc;
241 u8 reserved;
242} __packed;
243
244/**
245 * iwl4965 rate_n_flags bit fields
246 *
247 * rate_n_flags format is used in following iwl4965 commands:
248 * REPLY_RX (response only)
249 * REPLY_RX_MPDU (response only)
250 * REPLY_TX (both command and response)
251 * REPLY_TX_LINK_QUALITY_CMD
252 *
253 * High-throughput (HT) rate format for bits 7:0 (bit 8 must be "1"):
254 * 2-0: 0) 6 Mbps
255 * 1) 12 Mbps
256 * 2) 18 Mbps
257 * 3) 24 Mbps
258 * 4) 36 Mbps
259 * 5) 48 Mbps
260 * 6) 54 Mbps
261 * 7) 60 Mbps
262 *
263 * 4-3: 0) Single stream (SISO)
264 * 1) Dual stream (MIMO)
265 * 2) Triple stream (MIMO)
266 *
267 * 5: Value of 0x20 in bits 7:0 indicates 6 Mbps HT40 duplicate data
268 *
269 * Legacy OFDM rate format for bits 7:0 (bit 8 must be "0", bit 9 "0"):
270 * 3-0: 0xD) 6 Mbps
271 * 0xF) 9 Mbps
272 * 0x5) 12 Mbps
273 * 0x7) 18 Mbps
274 * 0x9) 24 Mbps
275 * 0xB) 36 Mbps
276 * 0x1) 48 Mbps
277 * 0x3) 54 Mbps
278 *
279 * Legacy CCK rate format for bits 7:0 (bit 8 must be "0", bit 9 "1"):
280 * 6-0: 10) 1 Mbps
281 * 20) 2 Mbps
282 * 55) 5.5 Mbps
283 * 110) 11 Mbps
284 */
285#define RATE_MCS_CODE_MSK 0x7
286#define RATE_MCS_SPATIAL_POS 3
287#define RATE_MCS_SPATIAL_MSK 0x18
288#define RATE_MCS_HT_DUP_POS 5
289#define RATE_MCS_HT_DUP_MSK 0x20
290
291/* Bit 8: (1) HT format, (0) legacy format in bits 7:0 */
292#define RATE_MCS_FLAGS_POS 8
293#define RATE_MCS_HT_POS 8
294#define RATE_MCS_HT_MSK 0x100
295
296/* Bit 9: (1) CCK, (0) OFDM. HT (bit 8) must be "0" for this bit to be valid */
297#define RATE_MCS_CCK_POS 9
298#define RATE_MCS_CCK_MSK 0x200
299
300/* Bit 10: (1) Use Green Field preamble */
301#define RATE_MCS_GF_POS 10
302#define RATE_MCS_GF_MSK 0x400
303
304/* Bit 11: (1) Use 40Mhz HT40 chnl width, (0) use 20 MHz legacy chnl width */
305#define RATE_MCS_HT40_POS 11
306#define RATE_MCS_HT40_MSK 0x800
307
308/* Bit 12: (1) Duplicate data on both 20MHz chnls. HT40 (bit 11) must be set. */
309#define RATE_MCS_DUP_POS 12
310#define RATE_MCS_DUP_MSK 0x1000
311
312/* Bit 13: (1) Short guard interval (0.4 usec), (0) normal GI (0.8 usec) */
313#define RATE_MCS_SGI_POS 13
314#define RATE_MCS_SGI_MSK 0x2000
315
316/**
317 * rate_n_flags Tx antenna masks
318 * 4965 has 2 transmitters
319 * bit14:16
320 */
321#define RATE_MCS_ANT_POS 14
322#define RATE_MCS_ANT_A_MSK 0x04000
323#define RATE_MCS_ANT_B_MSK 0x08000
324#define RATE_MCS_ANT_C_MSK 0x10000
325#define RATE_MCS_ANT_AB_MSK (RATE_MCS_ANT_A_MSK | RATE_MCS_ANT_B_MSK)
326#define RATE_MCS_ANT_ABC_MSK (RATE_MCS_ANT_AB_MSK | RATE_MCS_ANT_C_MSK)
327#define RATE_ANT_NUM 3
328
329#define POWER_TABLE_NUM_ENTRIES 33
330#define POWER_TABLE_NUM_HT_OFDM_ENTRIES 32
331#define POWER_TABLE_CCK_ENTRY 32
332
333#define IWL_PWR_NUM_HT_OFDM_ENTRIES 24
334#define IWL_PWR_CCK_ENTRIES 2
335
336/**
337 * union iwl4965_tx_power_dual_stream
338 *
339 * Host format used for REPLY_TX_PWR_TABLE_CMD, REPLY_CHANNEL_SWITCH
340 * Use __le32 version (struct tx_power_dual_stream) when building command.
341 *
342 * Driver provides radio gain and DSP attenuation settings to device in pairs,
343 * one value for each transmitter chain. The first value is for transmitter A,
344 * second for transmitter B.
345 *
346 * For SISO bit rates, both values in a pair should be identical.
347 * For MIMO rates, one value may be different from the other,
348 * in order to balance the Tx output between the two transmitters.
349 *
350 * See more details in doc for TXPOWER in iwl-4965-hw.h.
351 */
352union iwl4965_tx_power_dual_stream {
353 struct {
354 u8 radio_tx_gain[2];
355 u8 dsp_predis_atten[2];
356 } s;
357 u32 dw;
358};
359
360/**
361 * struct tx_power_dual_stream
362 *
363 * Table entries in REPLY_TX_PWR_TABLE_CMD, REPLY_CHANNEL_SWITCH
364 *
365 * Same format as iwl_tx_power_dual_stream, but __le32
366 */
367struct tx_power_dual_stream {
368 __le32 dw;
369} __packed;
370
371/**
372 * struct iwl4965_tx_power_db
373 *
374 * Entire table within REPLY_TX_PWR_TABLE_CMD, REPLY_CHANNEL_SWITCH
375 */
376struct iwl4965_tx_power_db {
377 struct tx_power_dual_stream power_tbl[POWER_TABLE_NUM_ENTRIES];
378} __packed;
379
380/******************************************************************************
381 * (0a)
382 * Alive and Error Commands & Responses:
383 *
384 *****************************************************************************/
385
386#define UCODE_VALID_OK cpu_to_le32(0x1)
387#define INITIALIZE_SUBTYPE (9)
388
389/*
390 * ("Initialize") REPLY_ALIVE = 0x1 (response only, not a command)
391 *
392 * uCode issues this "initialize alive" notification once the initialization
393 * uCode image has completed its work, and is ready to load the runtime image.
394 * This is the *first* "alive" notification that the driver will receive after
395 * rebooting uCode; the "initialize" alive is indicated by subtype field == 9.
396 *
397 * See comments documenting "BSM" (bootstrap state machine).
398 *
399 * For 4965, this notification contains important calibration data for
400 * calculating txpower settings:
401 *
402 * 1) Power supply voltage indication. The voltage sensor outputs higher
403 * values for lower voltage, and vice verse.
404 *
405 * 2) Temperature measurement parameters, for each of two channel widths
406 * (20 MHz and 40 MHz) supported by the radios. Temperature sensing
407 * is done via one of the receiver chains, and channel width influences
408 * the results.
409 *
410 * 3) Tx gain compensation to balance 4965's 2 Tx chains for MIMO operation,
411 * for each of 5 frequency ranges.
412 */
413struct iwl_init_alive_resp {
414 u8 ucode_minor;
415 u8 ucode_major;
416 __le16 reserved1;
417 u8 sw_rev[8];
418 u8 ver_type;
419 u8 ver_subtype; /* "9" for initialize alive */
420 __le16 reserved2;
421 __le32 log_event_table_ptr;
422 __le32 error_event_table_ptr;
423 __le32 timestamp;
424 __le32 is_valid;
425
426 /* calibration values from "initialize" uCode */
427 __le32 voltage; /* signed, higher value is lower voltage */
428 __le32 therm_r1[2]; /* signed, 1st for normal, 2nd for HT40 */
429 __le32 therm_r2[2]; /* signed */
430 __le32 therm_r3[2]; /* signed */
431 __le32 therm_r4[2]; /* signed */
432 __le32 tx_atten[5][2]; /* signed MIMO gain comp, 5 freq groups,
433 * 2 Tx chains */
434} __packed;
435
436
437/**
438 * REPLY_ALIVE = 0x1 (response only, not a command)
439 *
440 * uCode issues this "alive" notification once the runtime image is ready
441 * to receive commands from the driver. This is the *second* "alive"
442 * notification that the driver will receive after rebooting uCode;
443 * this "alive" is indicated by subtype field != 9.
444 *
445 * See comments documenting "BSM" (bootstrap state machine).
446 *
447 * This response includes two pointers to structures within the device's
448 * data SRAM (access via HBUS_TARG_MEM_* regs) that are useful for debugging:
449 *
450 * 1) log_event_table_ptr indicates base of the event log. This traces
451 * a 256-entry history of uCode execution within a circular buffer.
452 * Its header format is:
453 *
454 * __le32 log_size; log capacity (in number of entries)
455 * __le32 type; (1) timestamp with each entry, (0) no timestamp
456 * __le32 wraps; # times uCode has wrapped to top of circular buffer
457 * __le32 write_index; next circular buffer entry that uCode would fill
458 *
459 * The header is followed by the circular buffer of log entries. Entries
460 * with timestamps have the following format:
461 *
462 * __le32 event_id; range 0 - 1500
463 * __le32 timestamp; low 32 bits of TSF (of network, if associated)
464 * __le32 data; event_id-specific data value
465 *
466 * Entries without timestamps contain only event_id and data.
467 *
468 *
469 * 2) error_event_table_ptr indicates base of the error log. This contains
470 * information about any uCode error that occurs. For 4965, the format
471 * of the error log is:
472 *
473 * __le32 valid; (nonzero) valid, (0) log is empty
474 * __le32 error_id; type of error
475 * __le32 pc; program counter
476 * __le32 blink1; branch link
477 * __le32 blink2; branch link
478 * __le32 ilink1; interrupt link
479 * __le32 ilink2; interrupt link
480 * __le32 data1; error-specific data
481 * __le32 data2; error-specific data
482 * __le32 line; source code line of error
483 * __le32 bcon_time; beacon timer
484 * __le32 tsf_low; network timestamp function timer
485 * __le32 tsf_hi; network timestamp function timer
486 * __le32 gp1; GP1 timer register
487 * __le32 gp2; GP2 timer register
488 * __le32 gp3; GP3 timer register
489 * __le32 ucode_ver; uCode version
490 * __le32 hw_ver; HW Silicon version
491 * __le32 brd_ver; HW board version
492 * __le32 log_pc; log program counter
493 * __le32 frame_ptr; frame pointer
494 * __le32 stack_ptr; stack pointer
495 * __le32 hcmd; last host command
496 * __le32 isr0; isr status register LMPM_NIC_ISR0: rxtx_flag
497 * __le32 isr1; isr status register LMPM_NIC_ISR1: host_flag
498 * __le32 isr2; isr status register LMPM_NIC_ISR2: enc_flag
499 * __le32 isr3; isr status register LMPM_NIC_ISR3: time_flag
500 * __le32 isr4; isr status register LMPM_NIC_ISR4: wico interrupt
501 * __le32 isr_pref; isr status register LMPM_NIC_PREF_STAT
502 * __le32 wait_event; wait event() caller address
503 * __le32 l2p_control; L2pControlField
504 * __le32 l2p_duration; L2pDurationField
505 * __le32 l2p_mhvalid; L2pMhValidBits
506 * __le32 l2p_addr_match; L2pAddrMatchStat
507 * __le32 lmpm_pmg_sel; indicate which clocks are turned on (LMPM_PMG_SEL)
508 * __le32 u_timestamp; indicate when the date and time of the compilation
509 * __le32 reserved;
510 *
511 * The Linux driver can print both logs to the system log when a uCode error
512 * occurs.
513 */
514struct iwl_alive_resp {
515 u8 ucode_minor;
516 u8 ucode_major;
517 __le16 reserved1;
518 u8 sw_rev[8];
519 u8 ver_type;
520 u8 ver_subtype; /* not "9" for runtime alive */
521 __le16 reserved2;
522 __le32 log_event_table_ptr; /* SRAM address for event log */
523 __le32 error_event_table_ptr; /* SRAM address for error log */
524 __le32 timestamp;
525 __le32 is_valid;
526} __packed;
527
528/*
529 * REPLY_ERROR = 0x2 (response only, not a command)
530 */
531struct iwl_error_resp {
532 __le32 error_type;
533 u8 cmd_id;
534 u8 reserved1;
535 __le16 bad_cmd_seq_num;
536 __le32 error_info;
537 __le64 timestamp;
538} __packed;
539
540/******************************************************************************
541 * (1)
542 * RXON Commands & Responses:
543 *
544 *****************************************************************************/
545
546/*
547 * Rx config defines & structure
548 */
549/* rx_config device types */
550enum {
551 RXON_DEV_TYPE_AP = 1,
552 RXON_DEV_TYPE_ESS = 3,
553 RXON_DEV_TYPE_IBSS = 4,
554 RXON_DEV_TYPE_SNIFFER = 6,
555};
556
557
558#define RXON_RX_CHAIN_DRIVER_FORCE_MSK cpu_to_le16(0x1 << 0)
559#define RXON_RX_CHAIN_DRIVER_FORCE_POS (0)
560#define RXON_RX_CHAIN_VALID_MSK cpu_to_le16(0x7 << 1)
561#define RXON_RX_CHAIN_VALID_POS (1)
562#define RXON_RX_CHAIN_FORCE_SEL_MSK cpu_to_le16(0x7 << 4)
563#define RXON_RX_CHAIN_FORCE_SEL_POS (4)
564#define RXON_RX_CHAIN_FORCE_MIMO_SEL_MSK cpu_to_le16(0x7 << 7)
565#define RXON_RX_CHAIN_FORCE_MIMO_SEL_POS (7)
566#define RXON_RX_CHAIN_CNT_MSK cpu_to_le16(0x3 << 10)
567#define RXON_RX_CHAIN_CNT_POS (10)
568#define RXON_RX_CHAIN_MIMO_CNT_MSK cpu_to_le16(0x3 << 12)
569#define RXON_RX_CHAIN_MIMO_CNT_POS (12)
570#define RXON_RX_CHAIN_MIMO_FORCE_MSK cpu_to_le16(0x1 << 14)
571#define RXON_RX_CHAIN_MIMO_FORCE_POS (14)
572
573/* rx_config flags */
574/* band & modulation selection */
575#define RXON_FLG_BAND_24G_MSK cpu_to_le32(1 << 0)
576#define RXON_FLG_CCK_MSK cpu_to_le32(1 << 1)
577/* auto detection enable */
578#define RXON_FLG_AUTO_DETECT_MSK cpu_to_le32(1 << 2)
579/* TGg protection when tx */
580#define RXON_FLG_TGG_PROTECT_MSK cpu_to_le32(1 << 3)
581/* cck short slot & preamble */
582#define RXON_FLG_SHORT_SLOT_MSK cpu_to_le32(1 << 4)
583#define RXON_FLG_SHORT_PREAMBLE_MSK cpu_to_le32(1 << 5)
584/* antenna selection */
585#define RXON_FLG_DIS_DIV_MSK cpu_to_le32(1 << 7)
586#define RXON_FLG_ANT_SEL_MSK cpu_to_le32(0x0f00)
587#define RXON_FLG_ANT_A_MSK cpu_to_le32(1 << 8)
588#define RXON_FLG_ANT_B_MSK cpu_to_le32(1 << 9)
589/* radar detection enable */
590#define RXON_FLG_RADAR_DETECT_MSK cpu_to_le32(1 << 12)
591#define RXON_FLG_TGJ_NARROW_BAND_MSK cpu_to_le32(1 << 13)
592/* rx response to host with 8-byte TSF
593* (according to ON_AIR deassertion) */
594#define RXON_FLG_TSF2HOST_MSK cpu_to_le32(1 << 15)
595
596
597/* HT flags */
598#define RXON_FLG_CTRL_CHANNEL_LOC_POS (22)
599#define RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK cpu_to_le32(0x1 << 22)
600
601#define RXON_FLG_HT_OPERATING_MODE_POS (23)
602
603#define RXON_FLG_HT_PROT_MSK cpu_to_le32(0x1 << 23)
604#define RXON_FLG_HT40_PROT_MSK cpu_to_le32(0x2 << 23)
605
606#define RXON_FLG_CHANNEL_MODE_POS (25)
607#define RXON_FLG_CHANNEL_MODE_MSK cpu_to_le32(0x3 << 25)
608
609/* channel mode */
610enum {
611 CHANNEL_MODE_LEGACY = 0,
612 CHANNEL_MODE_PURE_40 = 1,
613 CHANNEL_MODE_MIXED = 2,
614 CHANNEL_MODE_RESERVED = 3,
615};
616#define RXON_FLG_CHANNEL_MODE_LEGACY \
617 cpu_to_le32(CHANNEL_MODE_LEGACY << RXON_FLG_CHANNEL_MODE_POS)
618#define RXON_FLG_CHANNEL_MODE_PURE_40 \
619 cpu_to_le32(CHANNEL_MODE_PURE_40 << RXON_FLG_CHANNEL_MODE_POS)
620#define RXON_FLG_CHANNEL_MODE_MIXED \
621 cpu_to_le32(CHANNEL_MODE_MIXED << RXON_FLG_CHANNEL_MODE_POS)
622
623/* CTS to self (if spec allows) flag */
624#define RXON_FLG_SELF_CTS_EN cpu_to_le32(0x1<<30)
625
626/* rx_config filter flags */
627/* accept all data frames */
628#define RXON_FILTER_PROMISC_MSK cpu_to_le32(1 << 0)
629/* pass control & management to host */
630#define RXON_FILTER_CTL2HOST_MSK cpu_to_le32(1 << 1)
631/* accept multi-cast */
632#define RXON_FILTER_ACCEPT_GRP_MSK cpu_to_le32(1 << 2)
633/* don't decrypt uni-cast frames */
634#define RXON_FILTER_DIS_DECRYPT_MSK cpu_to_le32(1 << 3)
635/* don't decrypt multi-cast frames */
636#define RXON_FILTER_DIS_GRP_DECRYPT_MSK cpu_to_le32(1 << 4)
637/* STA is associated */
638#define RXON_FILTER_ASSOC_MSK cpu_to_le32(1 << 5)
639/* transfer to host non bssid beacons in associated state */
640#define RXON_FILTER_BCON_AWARE_MSK cpu_to_le32(1 << 6)
641
642/**
643 * REPLY_RXON = 0x10 (command, has simple generic response)
644 *
645 * RXON tunes the radio tuner to a service channel, and sets up a number
646 * of parameters that are used primarily for Rx, but also for Tx operations.
647 *
648 * NOTE: When tuning to a new channel, driver must set the
649 * RXON_FILTER_ASSOC_MSK to 0. This will clear station-dependent
650 * info within the device, including the station tables, tx retry
651 * rate tables, and txpower tables. Driver must build a new station
652 * table and txpower table before transmitting anything on the RXON
653 * channel.
654 *
655 * NOTE: All RXONs wipe clean the internal txpower table. Driver must
656 * issue a new REPLY_TX_PWR_TABLE_CMD after each REPLY_RXON (0x10),
657 * regardless of whether RXON_FILTER_ASSOC_MSK is set.
658 */
659
660struct iwl3945_rxon_cmd {
661 u8 node_addr[6];
662 __le16 reserved1;
663 u8 bssid_addr[6];
664 __le16 reserved2;
665 u8 wlap_bssid_addr[6];
666 __le16 reserved3;
667 u8 dev_type;
668 u8 air_propagation;
669 __le16 reserved4;
670 u8 ofdm_basic_rates;
671 u8 cck_basic_rates;
672 __le16 assoc_id;
673 __le32 flags;
674 __le32 filter_flags;
675 __le16 channel;
676 __le16 reserved5;
677} __packed;
678
679struct iwl4965_rxon_cmd {
680 u8 node_addr[6];
681 __le16 reserved1;
682 u8 bssid_addr[6];
683 __le16 reserved2;
684 u8 wlap_bssid_addr[6];
685 __le16 reserved3;
686 u8 dev_type;
687 u8 air_propagation;
688 __le16 rx_chain;
689 u8 ofdm_basic_rates;
690 u8 cck_basic_rates;
691 __le16 assoc_id;
692 __le32 flags;
693 __le32 filter_flags;
694 __le16 channel;
695 u8 ofdm_ht_single_stream_basic_rates;
696 u8 ofdm_ht_dual_stream_basic_rates;
697} __packed;
698
699/* Create a common rxon cmd which will be typecast into the 3945 or 4965
700 * specific rxon cmd, depending on where it is called from.
701 */
702struct iwl_legacy_rxon_cmd {
703 u8 node_addr[6];
704 __le16 reserved1;
705 u8 bssid_addr[6];
706 __le16 reserved2;
707 u8 wlap_bssid_addr[6];
708 __le16 reserved3;
709 u8 dev_type;
710 u8 air_propagation;
711 __le16 rx_chain;
712 u8 ofdm_basic_rates;
713 u8 cck_basic_rates;
714 __le16 assoc_id;
715 __le32 flags;
716 __le32 filter_flags;
717 __le16 channel;
718 u8 ofdm_ht_single_stream_basic_rates;
719 u8 ofdm_ht_dual_stream_basic_rates;
720 u8 reserved4;
721 u8 reserved5;
722} __packed;
723
724
725/*
726 * REPLY_RXON_ASSOC = 0x11 (command, has simple generic response)
727 */
728struct iwl3945_rxon_assoc_cmd {
729 __le32 flags;
730 __le32 filter_flags;
731 u8 ofdm_basic_rates;
732 u8 cck_basic_rates;
733 __le16 reserved;
734} __packed;
735
736struct iwl4965_rxon_assoc_cmd {
737 __le32 flags;
738 __le32 filter_flags;
739 u8 ofdm_basic_rates;
740 u8 cck_basic_rates;
741 u8 ofdm_ht_single_stream_basic_rates;
742 u8 ofdm_ht_dual_stream_basic_rates;
743 __le16 rx_chain_select_flags;
744 __le16 reserved;
745} __packed;
746
747#define IWL_CONN_MAX_LISTEN_INTERVAL 10
748#define IWL_MAX_UCODE_BEACON_INTERVAL 4 /* 4096 */
749#define IWL39_MAX_UCODE_BEACON_INTERVAL 1 /* 1024 */
750
751/*
752 * REPLY_RXON_TIMING = 0x14 (command, has simple generic response)
753 */
754struct iwl_rxon_time_cmd {
755 __le64 timestamp;
756 __le16 beacon_interval;
757 __le16 atim_window;
758 __le32 beacon_init_val;
759 __le16 listen_interval;
760 u8 dtim_period;
761 u8 delta_cp_bss_tbtts;
762} __packed;
763
764/*
765 * REPLY_CHANNEL_SWITCH = 0x72 (command, has simple generic response)
766 */
767struct iwl3945_channel_switch_cmd {
768 u8 band;
769 u8 expect_beacon;
770 __le16 channel;
771 __le32 rxon_flags;
772 __le32 rxon_filter_flags;
773 __le32 switch_time;
774 struct iwl3945_power_per_rate power[IWL_MAX_RATES];
775} __packed;
776
777struct iwl4965_channel_switch_cmd {
778 u8 band;
779 u8 expect_beacon;
780 __le16 channel;
781 __le32 rxon_flags;
782 __le32 rxon_filter_flags;
783 __le32 switch_time;
784 struct iwl4965_tx_power_db tx_power;
785} __packed;
786
787/*
788 * CHANNEL_SWITCH_NOTIFICATION = 0x73 (notification only, not a command)
789 */
790struct iwl_csa_notification {
791 __le16 band;
792 __le16 channel;
793 __le32 status; /* 0 - OK, 1 - fail */
794} __packed;
795
796/******************************************************************************
797 * (2)
798 * Quality-of-Service (QOS) Commands & Responses:
799 *
800 *****************************************************************************/
801
802/**
803 * struct iwl_ac_qos -- QOS timing params for REPLY_QOS_PARAM
804 * One for each of 4 EDCA access categories in struct iwl_qosparam_cmd
805 *
806 * @cw_min: Contention window, start value in numbers of slots.
807 * Should be a power-of-2, minus 1. Device's default is 0x0f.
808 * @cw_max: Contention window, max value in numbers of slots.
809 * Should be a power-of-2, minus 1. Device's default is 0x3f.
810 * @aifsn: Number of slots in Arbitration Interframe Space (before
811 * performing random backoff timing prior to Tx). Device default 1.
812 * @edca_txop: Length of Tx opportunity, in uSecs. Device default is 0.
813 *
814 * Device will automatically increase contention window by (2*CW) + 1 for each
815 * transmission retry. Device uses cw_max as a bit mask, ANDed with new CW
816 * value, to cap the CW value.
817 */
818struct iwl_ac_qos {
819 __le16 cw_min;
820 __le16 cw_max;
821 u8 aifsn;
822 u8 reserved1;
823 __le16 edca_txop;
824} __packed;
825
826/* QoS flags defines */
827#define QOS_PARAM_FLG_UPDATE_EDCA_MSK cpu_to_le32(0x01)
828#define QOS_PARAM_FLG_TGN_MSK cpu_to_le32(0x02)
829#define QOS_PARAM_FLG_TXOP_TYPE_MSK cpu_to_le32(0x10)
830
831/* Number of Access Categories (AC) (EDCA), queues 0..3 */
832#define AC_NUM 4
833
834/*
835 * REPLY_QOS_PARAM = 0x13 (command, has simple generic response)
836 *
837 * This command sets up timings for each of the 4 prioritized EDCA Tx FIFOs
838 * 0: Background, 1: Best Effort, 2: Video, 3: Voice.
839 */
840struct iwl_qosparam_cmd {
841 __le32 qos_flags;
842 struct iwl_ac_qos ac[AC_NUM];
843} __packed;
844
845/******************************************************************************
846 * (3)
847 * Add/Modify Stations Commands & Responses:
848 *
849 *****************************************************************************/
850/*
851 * Multi station support
852 */
853
854/* Special, dedicated locations within device's station table */
855#define IWL_AP_ID 0
856#define IWL_STA_ID 2
857#define IWL3945_BROADCAST_ID 24
858#define IWL3945_STATION_COUNT 25
859#define IWL4965_BROADCAST_ID 31
860#define IWL4965_STATION_COUNT 32
861
862#define IWL_STATION_COUNT 32 /* MAX(3945,4965)*/
863#define IWL_INVALID_STATION 255
864
865#define STA_FLG_TX_RATE_MSK cpu_to_le32(1 << 2)
866#define STA_FLG_PWR_SAVE_MSK cpu_to_le32(1 << 8)
867#define STA_FLG_RTS_MIMO_PROT_MSK cpu_to_le32(1 << 17)
868#define STA_FLG_AGG_MPDU_8US_MSK cpu_to_le32(1 << 18)
869#define STA_FLG_MAX_AGG_SIZE_POS (19)
870#define STA_FLG_MAX_AGG_SIZE_MSK cpu_to_le32(3 << 19)
871#define STA_FLG_HT40_EN_MSK cpu_to_le32(1 << 21)
872#define STA_FLG_MIMO_DIS_MSK cpu_to_le32(1 << 22)
873#define STA_FLG_AGG_MPDU_DENSITY_POS (23)
874#define STA_FLG_AGG_MPDU_DENSITY_MSK cpu_to_le32(7 << 23)
875
876/* Use in mode field. 1: modify existing entry, 0: add new station entry */
877#define STA_CONTROL_MODIFY_MSK 0x01
878
879/* key flags __le16*/
880#define STA_KEY_FLG_ENCRYPT_MSK cpu_to_le16(0x0007)
881#define STA_KEY_FLG_NO_ENC cpu_to_le16(0x0000)
882#define STA_KEY_FLG_WEP cpu_to_le16(0x0001)
883#define STA_KEY_FLG_CCMP cpu_to_le16(0x0002)
884#define STA_KEY_FLG_TKIP cpu_to_le16(0x0003)
885
886#define STA_KEY_FLG_KEYID_POS 8
887#define STA_KEY_FLG_INVALID cpu_to_le16(0x0800)
888/* wep key is either from global key (0) or from station info array (1) */
889#define STA_KEY_FLG_MAP_KEY_MSK cpu_to_le16(0x0008)
890
891/* wep key in STA: 5-bytes (0) or 13-bytes (1) */
892#define STA_KEY_FLG_KEY_SIZE_MSK cpu_to_le16(0x1000)
893#define STA_KEY_MULTICAST_MSK cpu_to_le16(0x4000)
894#define STA_KEY_MAX_NUM 8
895
896/* Flags indicate whether to modify vs. don't change various station params */
897#define STA_MODIFY_KEY_MASK 0x01
898#define STA_MODIFY_TID_DISABLE_TX 0x02
899#define STA_MODIFY_TX_RATE_MSK 0x04
900#define STA_MODIFY_ADDBA_TID_MSK 0x08
901#define STA_MODIFY_DELBA_TID_MSK 0x10
902#define STA_MODIFY_SLEEP_TX_COUNT_MSK 0x20
903
904/* Receiver address (actually, Rx station's index into station table),
905 * combined with Traffic ID (QOS priority), in format used by Tx Scheduler */
906#define BUILD_RAxTID(sta_id, tid) (((sta_id) << 4) + (tid))
907
908struct iwl4965_keyinfo {
909 __le16 key_flags;
910 u8 tkip_rx_tsc_byte2; /* TSC[2] for key mix ph1 detection */
911 u8 reserved1;
912 __le16 tkip_rx_ttak[5]; /* 10-byte unicast TKIP TTAK */
913 u8 key_offset;
914 u8 reserved2;
915 u8 key[16]; /* 16-byte unicast decryption key */
916} __packed;
917
918/**
919 * struct sta_id_modify
920 * @addr[ETH_ALEN]: station's MAC address
921 * @sta_id: index of station in uCode's station table
922 * @modify_mask: STA_MODIFY_*, 1: modify, 0: don't change
923 *
924 * Driver selects unused table index when adding new station,
925 * or the index to a pre-existing station entry when modifying that station.
926 * Some indexes have special purposes (IWL_AP_ID, index 0, is for AP).
927 *
928 * modify_mask flags select which parameters to modify vs. leave alone.
929 */
930struct sta_id_modify {
931 u8 addr[ETH_ALEN];
932 __le16 reserved1;
933 u8 sta_id;
934 u8 modify_mask;
935 __le16 reserved2;
936} __packed;
937
938/*
939 * REPLY_ADD_STA = 0x18 (command)
940 *
941 * The device contains an internal table of per-station information,
942 * with info on security keys, aggregation parameters, and Tx rates for
943 * initial Tx attempt and any retries (4965 devices uses
944 * REPLY_TX_LINK_QUALITY_CMD,
945 * 3945 uses REPLY_RATE_SCALE to set up rate tables).
946 *
947 * REPLY_ADD_STA sets up the table entry for one station, either creating
948 * a new entry, or modifying a pre-existing one.
949 *
950 * NOTE: RXON command (without "associated" bit set) wipes the station table
951 * clean. Moving into RF_KILL state does this also. Driver must set up
952 * new station table before transmitting anything on the RXON channel
953 * (except active scans or active measurements; those commands carry
954 * their own txpower/rate setup data).
955 *
956 * When getting started on a new channel, driver must set up the
957 * IWL_BROADCAST_ID entry (last entry in the table). For a client
958 * station in a BSS, once an AP is selected, driver sets up the AP STA
959 * in the IWL_AP_ID entry (1st entry in the table). BROADCAST and AP
960 * are all that are needed for a BSS client station. If the device is
961 * used as AP, or in an IBSS network, driver must set up station table
962 * entries for all STAs in network, starting with index IWL_STA_ID.
963 */
964
965struct iwl3945_addsta_cmd {
966 u8 mode; /* 1: modify existing, 0: add new station */
967 u8 reserved[3];
968 struct sta_id_modify sta;
969 struct iwl4965_keyinfo key;
970 __le32 station_flags; /* STA_FLG_* */
971 __le32 station_flags_msk; /* STA_FLG_* */
972
973 /* bit field to disable (1) or enable (0) Tx for Traffic ID (TID)
974 * corresponding to bit (e.g. bit 5 controls TID 5).
975 * Set modify_mask bit STA_MODIFY_TID_DISABLE_TX to use this field. */
976 __le16 tid_disable_tx;
977
978 __le16 rate_n_flags;
979
980 /* TID for which to add block-ack support.
981 * Set modify_mask bit STA_MODIFY_ADDBA_TID_MSK to use this field. */
982 u8 add_immediate_ba_tid;
983
984 /* TID for which to remove block-ack support.
985 * Set modify_mask bit STA_MODIFY_DELBA_TID_MSK to use this field. */
986 u8 remove_immediate_ba_tid;
987
988 /* Starting Sequence Number for added block-ack support.
989 * Set modify_mask bit STA_MODIFY_ADDBA_TID_MSK to use this field. */
990 __le16 add_immediate_ba_ssn;
991} __packed;
992
993struct iwl4965_addsta_cmd {
994 u8 mode; /* 1: modify existing, 0: add new station */
995 u8 reserved[3];
996 struct sta_id_modify sta;
997 struct iwl4965_keyinfo key;
998 __le32 station_flags; /* STA_FLG_* */
999 __le32 station_flags_msk; /* STA_FLG_* */
1000
1001 /* bit field to disable (1) or enable (0) Tx for Traffic ID (TID)
1002 * corresponding to bit (e.g. bit 5 controls TID 5).
1003 * Set modify_mask bit STA_MODIFY_TID_DISABLE_TX to use this field. */
1004 __le16 tid_disable_tx;
1005
1006 __le16 reserved1;
1007
1008 /* TID for which to add block-ack support.
1009 * Set modify_mask bit STA_MODIFY_ADDBA_TID_MSK to use this field. */
1010 u8 add_immediate_ba_tid;
1011
1012 /* TID for which to remove block-ack support.
1013 * Set modify_mask bit STA_MODIFY_DELBA_TID_MSK to use this field. */
1014 u8 remove_immediate_ba_tid;
1015
1016 /* Starting Sequence Number for added block-ack support.
1017 * Set modify_mask bit STA_MODIFY_ADDBA_TID_MSK to use this field. */
1018 __le16 add_immediate_ba_ssn;
1019
1020 /*
1021 * Number of packets OK to transmit to station even though
1022 * it is asleep -- used to synchronise PS-poll and u-APSD
1023 * responses while ucode keeps track of STA sleep state.
1024 */
1025 __le16 sleep_tx_count;
1026
1027 __le16 reserved2;
1028} __packed;
1029
1030/* Wrapper struct for 3945 and 4965 addsta_cmd structures */
1031struct iwl_legacy_addsta_cmd {
1032 u8 mode; /* 1: modify existing, 0: add new station */
1033 u8 reserved[3];
1034 struct sta_id_modify sta;
1035 struct iwl4965_keyinfo key;
1036 __le32 station_flags; /* STA_FLG_* */
1037 __le32 station_flags_msk; /* STA_FLG_* */
1038
1039 /* bit field to disable (1) or enable (0) Tx for Traffic ID (TID)
1040 * corresponding to bit (e.g. bit 5 controls TID 5).
1041 * Set modify_mask bit STA_MODIFY_TID_DISABLE_TX to use this field. */
1042 __le16 tid_disable_tx;
1043
1044 __le16 rate_n_flags; /* 3945 only */
1045
1046 /* TID for which to add block-ack support.
1047 * Set modify_mask bit STA_MODIFY_ADDBA_TID_MSK to use this field. */
1048 u8 add_immediate_ba_tid;
1049
1050 /* TID for which to remove block-ack support.
1051 * Set modify_mask bit STA_MODIFY_DELBA_TID_MSK to use this field. */
1052 u8 remove_immediate_ba_tid;
1053
1054 /* Starting Sequence Number for added block-ack support.
1055 * Set modify_mask bit STA_MODIFY_ADDBA_TID_MSK to use this field. */
1056 __le16 add_immediate_ba_ssn;
1057
1058 /*
1059 * Number of packets OK to transmit to station even though
1060 * it is asleep -- used to synchronise PS-poll and u-APSD
1061 * responses while ucode keeps track of STA sleep state.
1062 */
1063 __le16 sleep_tx_count;
1064
1065 __le16 reserved2;
1066} __packed;
1067
1068
1069#define ADD_STA_SUCCESS_MSK 0x1
1070#define ADD_STA_NO_ROOM_IN_TABLE 0x2
1071#define ADD_STA_NO_BLOCK_ACK_RESOURCE 0x4
1072#define ADD_STA_MODIFY_NON_EXIST_STA 0x8
1073/*
1074 * REPLY_ADD_STA = 0x18 (response)
1075 */
1076struct iwl_add_sta_resp {
1077 u8 status; /* ADD_STA_* */
1078} __packed;
1079
1080#define REM_STA_SUCCESS_MSK 0x1
1081/*
1082 * REPLY_REM_STA = 0x19 (response)
1083 */
1084struct iwl_rem_sta_resp {
1085 u8 status;
1086} __packed;
1087
1088/*
1089 * REPLY_REM_STA = 0x19 (command)
1090 */
1091struct iwl_rem_sta_cmd {
1092 u8 num_sta; /* number of removed stations */
1093 u8 reserved[3];
1094 u8 addr[ETH_ALEN]; /* MAC addr of the first station */
1095 u8 reserved2[2];
1096} __packed;
1097
1098#define IWL_TX_FIFO_BK_MSK cpu_to_le32(BIT(0))
1099#define IWL_TX_FIFO_BE_MSK cpu_to_le32(BIT(1))
1100#define IWL_TX_FIFO_VI_MSK cpu_to_le32(BIT(2))
1101#define IWL_TX_FIFO_VO_MSK cpu_to_le32(BIT(3))
1102#define IWL_AGG_TX_QUEUE_MSK cpu_to_le32(0xffc00)
1103
1104#define IWL_DROP_SINGLE 0
1105#define IWL_DROP_SELECTED 1
1106#define IWL_DROP_ALL 2
1107
1108/*
1109 * REPLY_WEP_KEY = 0x20
1110 */
1111struct iwl_wep_key {
1112 u8 key_index;
1113 u8 key_offset;
1114 u8 reserved1[2];
1115 u8 key_size;
1116 u8 reserved2[3];
1117 u8 key[16];
1118} __packed;
1119
1120struct iwl_wep_cmd {
1121 u8 num_keys;
1122 u8 global_key_type;
1123 u8 flags;
1124 u8 reserved;
1125 struct iwl_wep_key key[0];
1126} __packed;
1127
1128#define WEP_KEY_WEP_TYPE 1
1129#define WEP_KEYS_MAX 4
1130#define WEP_INVALID_OFFSET 0xff
1131#define WEP_KEY_LEN_64 5
1132#define WEP_KEY_LEN_128 13
1133
1134/******************************************************************************
1135 * (4)
1136 * Rx Responses:
1137 *
1138 *****************************************************************************/
1139
1140#define RX_RES_STATUS_NO_CRC32_ERROR cpu_to_le32(1 << 0)
1141#define RX_RES_STATUS_NO_RXE_OVERFLOW cpu_to_le32(1 << 1)
1142
1143#define RX_RES_PHY_FLAGS_BAND_24_MSK cpu_to_le16(1 << 0)
1144#define RX_RES_PHY_FLAGS_MOD_CCK_MSK cpu_to_le16(1 << 1)
1145#define RX_RES_PHY_FLAGS_SHORT_PREAMBLE_MSK cpu_to_le16(1 << 2)
1146#define RX_RES_PHY_FLAGS_NARROW_BAND_MSK cpu_to_le16(1 << 3)
1147#define RX_RES_PHY_FLAGS_ANTENNA_MSK 0xf0
1148#define RX_RES_PHY_FLAGS_ANTENNA_POS 4
1149
1150#define RX_RES_STATUS_SEC_TYPE_MSK (0x7 << 8)
1151#define RX_RES_STATUS_SEC_TYPE_NONE (0x0 << 8)
1152#define RX_RES_STATUS_SEC_TYPE_WEP (0x1 << 8)
1153#define RX_RES_STATUS_SEC_TYPE_CCMP (0x2 << 8)
1154#define RX_RES_STATUS_SEC_TYPE_TKIP (0x3 << 8)
1155#define RX_RES_STATUS_SEC_TYPE_ERR (0x7 << 8)
1156
1157#define RX_RES_STATUS_STATION_FOUND (1<<6)
1158#define RX_RES_STATUS_NO_STATION_INFO_MISMATCH (1<<7)
1159
1160#define RX_RES_STATUS_DECRYPT_TYPE_MSK (0x3 << 11)
1161#define RX_RES_STATUS_NOT_DECRYPT (0x0 << 11)
1162#define RX_RES_STATUS_DECRYPT_OK (0x3 << 11)
1163#define RX_RES_STATUS_BAD_ICV_MIC (0x1 << 11)
1164#define RX_RES_STATUS_BAD_KEY_TTAK (0x2 << 11)
1165
1166#define RX_MPDU_RES_STATUS_ICV_OK (0x20)
1167#define RX_MPDU_RES_STATUS_MIC_OK (0x40)
1168#define RX_MPDU_RES_STATUS_TTAK_OK (1 << 7)
1169#define RX_MPDU_RES_STATUS_DEC_DONE_MSK (0x800)
1170
1171
1172struct iwl3945_rx_frame_stats {
1173 u8 phy_count;
1174 u8 id;
1175 u8 rssi;
1176 u8 agc;
1177 __le16 sig_avg;
1178 __le16 noise_diff;
1179 u8 payload[0];
1180} __packed;
1181
1182struct iwl3945_rx_frame_hdr {
1183 __le16 channel;
1184 __le16 phy_flags;
1185 u8 reserved1;
1186 u8 rate;
1187 __le16 len;
1188 u8 payload[0];
1189} __packed;
1190
1191struct iwl3945_rx_frame_end {
1192 __le32 status;
1193 __le64 timestamp;
1194 __le32 beacon_timestamp;
1195} __packed;
1196
1197/*
1198 * REPLY_3945_RX = 0x1b (response only, not a command)
1199 *
1200 * NOTE: DO NOT dereference from casts to this structure
1201 * It is provided only for calculating minimum data set size.
1202 * The actual offsets of the hdr and end are dynamic based on
1203 * stats.phy_count
1204 */
1205struct iwl3945_rx_frame {
1206 struct iwl3945_rx_frame_stats stats;
1207 struct iwl3945_rx_frame_hdr hdr;
1208 struct iwl3945_rx_frame_end end;
1209} __packed;
1210
1211#define IWL39_RX_FRAME_SIZE (4 + sizeof(struct iwl3945_rx_frame))
1212
1213/* Fixed (non-configurable) rx data from phy */
1214
1215#define IWL49_RX_RES_PHY_CNT 14
1216#define IWL49_RX_PHY_FLAGS_ANTENNAE_OFFSET (4)
1217#define IWL49_RX_PHY_FLAGS_ANTENNAE_MASK (0x70)
1218#define IWL49_AGC_DB_MASK (0x3f80) /* MASK(7,13) */
1219#define IWL49_AGC_DB_POS (7)
1220struct iwl4965_rx_non_cfg_phy {
1221 __le16 ant_selection; /* ant A bit 4, ant B bit 5, ant C bit 6 */
1222 __le16 agc_info; /* agc code 0:6, agc dB 7:13, reserved 14:15 */
1223 u8 rssi_info[6]; /* we use even entries, 0/2/4 for A/B/C rssi */
1224 u8 pad[0];
1225} __packed;
1226
1227
1228/*
1229 * REPLY_RX = 0xc3 (response only, not a command)
1230 * Used only for legacy (non 11n) frames.
1231 */
1232struct iwl_rx_phy_res {
1233 u8 non_cfg_phy_cnt; /* non configurable DSP phy data byte count */
1234 u8 cfg_phy_cnt; /* configurable DSP phy data byte count */
1235 u8 stat_id; /* configurable DSP phy data set ID */
1236 u8 reserved1;
1237 __le64 timestamp; /* TSF at on air rise */
1238 __le32 beacon_time_stamp; /* beacon at on-air rise */
1239 __le16 phy_flags; /* general phy flags: band, modulation, ... */
1240 __le16 channel; /* channel number */
1241 u8 non_cfg_phy_buf[32]; /* for various implementations of non_cfg_phy */
1242 __le32 rate_n_flags; /* RATE_MCS_* */
1243 __le16 byte_count; /* frame's byte-count */
1244 __le16 frame_time; /* frame's time on the air */
1245} __packed;
1246
1247struct iwl_rx_mpdu_res_start {
1248 __le16 byte_count;
1249 __le16 reserved;
1250} __packed;
1251
1252
1253/******************************************************************************
1254 * (5)
1255 * Tx Commands & Responses:
1256 *
1257 * Driver must place each REPLY_TX command into one of the prioritized Tx
1258 * queues in host DRAM, shared between driver and device (see comments for
1259 * SCD registers and Tx/Rx Queues). When the device's Tx scheduler and uCode
1260 * are preparing to transmit, the device pulls the Tx command over the PCI
1261 * bus via one of the device's Tx DMA channels, to fill an internal FIFO
1262 * from which data will be transmitted.
1263 *
1264 * uCode handles all timing and protocol related to control frames
1265 * (RTS/CTS/ACK), based on flags in the Tx command. uCode and Tx scheduler
1266 * handle reception of block-acks; uCode updates the host driver via
1267 * REPLY_COMPRESSED_BA.
1268 *
1269 * uCode handles retrying Tx when an ACK is expected but not received.
1270 * This includes trying lower data rates than the one requested in the Tx
1271 * command, as set up by the REPLY_RATE_SCALE (for 3945) or
1272 * REPLY_TX_LINK_QUALITY_CMD (4965).
1273 *
1274 * Driver sets up transmit power for various rates via REPLY_TX_PWR_TABLE_CMD.
1275 * This command must be executed after every RXON command, before Tx can occur.
1276 *****************************************************************************/
1277
1278/* REPLY_TX Tx flags field */
1279
1280/*
1281 * 1: Use Request-To-Send protocol before this frame.
1282 * Mutually exclusive vs. TX_CMD_FLG_CTS_MSK.
1283 */
1284#define TX_CMD_FLG_RTS_MSK cpu_to_le32(1 << 1)
1285
1286/*
1287 * 1: Transmit Clear-To-Send to self before this frame.
1288 * Driver should set this for AUTH/DEAUTH/ASSOC-REQ/REASSOC mgmnt frames.
1289 * Mutually exclusive vs. TX_CMD_FLG_RTS_MSK.
1290 */
1291#define TX_CMD_FLG_CTS_MSK cpu_to_le32(1 << 2)
1292
1293/* 1: Expect ACK from receiving station
1294 * 0: Don't expect ACK (MAC header's duration field s/b 0)
1295 * Set this for unicast frames, but not broadcast/multicast. */
1296#define TX_CMD_FLG_ACK_MSK cpu_to_le32(1 << 3)
1297
1298/* For 4965 devices:
1299 * 1: Use rate scale table (see REPLY_TX_LINK_QUALITY_CMD).
1300 * Tx command's initial_rate_index indicates first rate to try;
1301 * uCode walks through table for additional Tx attempts.
1302 * 0: Use Tx rate/MCS from Tx command's rate_n_flags field.
1303 * This rate will be used for all Tx attempts; it will not be scaled. */
1304#define TX_CMD_FLG_STA_RATE_MSK cpu_to_le32(1 << 4)
1305
1306/* 1: Expect immediate block-ack.
1307 * Set when Txing a block-ack request frame. Also set TX_CMD_FLG_ACK_MSK. */
1308#define TX_CMD_FLG_IMM_BA_RSP_MASK cpu_to_le32(1 << 6)
1309
1310/*
1311 * 1: Frame requires full Tx-Op protection.
1312 * Set this if either RTS or CTS Tx Flag gets set.
1313 */
1314#define TX_CMD_FLG_FULL_TXOP_PROT_MSK cpu_to_le32(1 << 7)
1315
1316/* Tx antenna selection field; used only for 3945, reserved (0) for 4965 devices.
1317 * Set field to "0" to allow 3945 uCode to select antenna (normal usage). */
1318#define TX_CMD_FLG_ANT_SEL_MSK cpu_to_le32(0xf00)
1319#define TX_CMD_FLG_ANT_A_MSK cpu_to_le32(1 << 8)
1320#define TX_CMD_FLG_ANT_B_MSK cpu_to_le32(1 << 9)
1321
1322/* 1: uCode overrides sequence control field in MAC header.
1323 * 0: Driver provides sequence control field in MAC header.
1324 * Set this for management frames, non-QOS data frames, non-unicast frames,
1325 * and also in Tx command embedded in REPLY_SCAN_CMD for active scans. */
1326#define TX_CMD_FLG_SEQ_CTL_MSK cpu_to_le32(1 << 13)
1327
1328/* 1: This frame is non-last MPDU; more fragments are coming.
1329 * 0: Last fragment, or not using fragmentation. */
1330#define TX_CMD_FLG_MORE_FRAG_MSK cpu_to_le32(1 << 14)
1331
1332/* 1: uCode calculates and inserts Timestamp Function (TSF) in outgoing frame.
1333 * 0: No TSF required in outgoing frame.
1334 * Set this for transmitting beacons and probe responses. */
1335#define TX_CMD_FLG_TSF_MSK cpu_to_le32(1 << 16)
1336
1337/* 1: Driver inserted 2 bytes pad after the MAC header, for (required) dword
1338 * alignment of frame's payload data field.
1339 * 0: No pad
1340 * Set this for MAC headers with 26 or 30 bytes, i.e. those with QOS or ADDR4
1341 * field (but not both). Driver must align frame data (i.e. data following
1342 * MAC header) to DWORD boundary. */
1343#define TX_CMD_FLG_MH_PAD_MSK cpu_to_le32(1 << 20)
1344
1345/* accelerate aggregation support
1346 * 0 - no CCMP encryption; 1 - CCMP encryption */
1347#define TX_CMD_FLG_AGG_CCMP_MSK cpu_to_le32(1 << 22)
1348
1349/* HCCA-AP - disable duration overwriting. */
1350#define TX_CMD_FLG_DUR_MSK cpu_to_le32(1 << 25)
1351
1352
1353/*
1354 * TX command security control
1355 */
1356#define TX_CMD_SEC_WEP 0x01
1357#define TX_CMD_SEC_CCM 0x02
1358#define TX_CMD_SEC_TKIP 0x03
1359#define TX_CMD_SEC_MSK 0x03
1360#define TX_CMD_SEC_SHIFT 6
1361#define TX_CMD_SEC_KEY128 0x08
1362
1363/*
1364 * security overhead sizes
1365 */
1366#define WEP_IV_LEN 4
1367#define WEP_ICV_LEN 4
1368#define CCMP_MIC_LEN 8
1369#define TKIP_ICV_LEN 4
1370
1371/*
1372 * REPLY_TX = 0x1c (command)
1373 */
1374
1375struct iwl3945_tx_cmd {
1376 /*
1377 * MPDU byte count:
1378 * MAC header (24/26/30/32 bytes) + 2 bytes pad if 26/30 header size,
1379 * + 8 byte IV for CCM or TKIP (not used for WEP)
1380 * + Data payload
1381 * + 8-byte MIC (not used for CCM/WEP)
1382 * NOTE: Does not include Tx command bytes, post-MAC pad bytes,
1383 * MIC (CCM) 8 bytes, ICV (WEP/TKIP/CKIP) 4 bytes, CRC 4 bytes.i
1384 * Range: 14-2342 bytes.
1385 */
1386 __le16 len;
1387
1388 /*
1389 * MPDU or MSDU byte count for next frame.
1390 * Used for fragmentation and bursting, but not 11n aggregation.
1391 * Same as "len", but for next frame. Set to 0 if not applicable.
1392 */
1393 __le16 next_frame_len;
1394
1395 __le32 tx_flags; /* TX_CMD_FLG_* */
1396
1397 u8 rate;
1398
1399 /* Index of recipient station in uCode's station table */
1400 u8 sta_id;
1401 u8 tid_tspec;
1402 u8 sec_ctl;
1403 u8 key[16];
1404 union {
1405 u8 byte[8];
1406 __le16 word[4];
1407 __le32 dw[2];
1408 } tkip_mic;
1409 __le32 next_frame_info;
1410 union {
1411 __le32 life_time;
1412 __le32 attempt;
1413 } stop_time;
1414 u8 supp_rates[2];
1415 u8 rts_retry_limit; /*byte 50 */
1416 u8 data_retry_limit; /*byte 51 */
1417 union {
1418 __le16 pm_frame_timeout;
1419 __le16 attempt_duration;
1420 } timeout;
1421
1422 /*
1423 * Duration of EDCA burst Tx Opportunity, in 32-usec units.
1424 * Set this if txop time is not specified by HCCA protocol (e.g. by AP).
1425 */
1426 __le16 driver_txop;
1427
1428 /*
1429 * MAC header goes here, followed by 2 bytes padding if MAC header
1430 * length is 26 or 30 bytes, followed by payload data
1431 */
1432 u8 payload[0];
1433 struct ieee80211_hdr hdr[0];
1434} __packed;
1435
1436/*
1437 * REPLY_TX = 0x1c (response)
1438 */
1439struct iwl3945_tx_resp {
1440 u8 failure_rts;
1441 u8 failure_frame;
1442 u8 bt_kill_count;
1443 u8 rate;
1444 __le32 wireless_media_time;
1445 __le32 status; /* TX status */
1446} __packed;
1447
1448
1449/*
1450 * 4965 uCode updates these Tx attempt count values in host DRAM.
1451 * Used for managing Tx retries when expecting block-acks.
1452 * Driver should set these fields to 0.
1453 */
1454struct iwl_dram_scratch {
1455 u8 try_cnt; /* Tx attempts */
1456 u8 bt_kill_cnt; /* Tx attempts blocked by Bluetooth device */
1457 __le16 reserved;
1458} __packed;
1459
1460struct iwl_tx_cmd {
1461 /*
1462 * MPDU byte count:
1463 * MAC header (24/26/30/32 bytes) + 2 bytes pad if 26/30 header size,
1464 * + 8 byte IV for CCM or TKIP (not used for WEP)
1465 * + Data payload
1466 * + 8-byte MIC (not used for CCM/WEP)
1467 * NOTE: Does not include Tx command bytes, post-MAC pad bytes,
1468 * MIC (CCM) 8 bytes, ICV (WEP/TKIP/CKIP) 4 bytes, CRC 4 bytes.i
1469 * Range: 14-2342 bytes.
1470 */
1471 __le16 len;
1472
1473 /*
1474 * MPDU or MSDU byte count for next frame.
1475 * Used for fragmentation and bursting, but not 11n aggregation.
1476 * Same as "len", but for next frame. Set to 0 if not applicable.
1477 */
1478 __le16 next_frame_len;
1479
1480 __le32 tx_flags; /* TX_CMD_FLG_* */
1481
1482 /* uCode may modify this field of the Tx command (in host DRAM!).
1483 * Driver must also set dram_lsb_ptr and dram_msb_ptr in this cmd. */
1484 struct iwl_dram_scratch scratch;
1485
1486 /* Rate for *all* Tx attempts, if TX_CMD_FLG_STA_RATE_MSK is cleared. */
1487 __le32 rate_n_flags; /* RATE_MCS_* */
1488
1489 /* Index of destination station in uCode's station table */
1490 u8 sta_id;
1491
1492 /* Type of security encryption: CCM or TKIP */
1493 u8 sec_ctl; /* TX_CMD_SEC_* */
1494
1495 /*
1496 * Index into rate table (see REPLY_TX_LINK_QUALITY_CMD) for initial
1497 * Tx attempt, if TX_CMD_FLG_STA_RATE_MSK is set. Normally "0" for
1498 * data frames, this field may be used to selectively reduce initial
1499 * rate (via non-0 value) for special frames (e.g. management), while
1500 * still supporting rate scaling for all frames.
1501 */
1502 u8 initial_rate_index;
1503 u8 reserved;
1504 u8 key[16];
1505 __le16 next_frame_flags;
1506 __le16 reserved2;
1507 union {
1508 __le32 life_time;
1509 __le32 attempt;
1510 } stop_time;
1511
1512 /* Host DRAM physical address pointer to "scratch" in this command.
1513 * Must be dword aligned. "0" in dram_lsb_ptr disables usage. */
1514 __le32 dram_lsb_ptr;
1515 u8 dram_msb_ptr;
1516
1517 u8 rts_retry_limit; /*byte 50 */
1518 u8 data_retry_limit; /*byte 51 */
1519 u8 tid_tspec;
1520 union {
1521 __le16 pm_frame_timeout;
1522 __le16 attempt_duration;
1523 } timeout;
1524
1525 /*
1526 * Duration of EDCA burst Tx Opportunity, in 32-usec units.
1527 * Set this if txop time is not specified by HCCA protocol (e.g. by AP).
1528 */
1529 __le16 driver_txop;
1530
1531 /*
1532 * MAC header goes here, followed by 2 bytes padding if MAC header
1533 * length is 26 or 30 bytes, followed by payload data
1534 */
1535 u8 payload[0];
1536 struct ieee80211_hdr hdr[0];
1537} __packed;
1538
1539/* TX command response is sent after *3945* transmission attempts.
1540 *
1541 * NOTES:
1542 *
1543 * TX_STATUS_FAIL_NEXT_FRAG
1544 *
1545 * If the fragment flag in the MAC header for the frame being transmitted
1546 * is set and there is insufficient time to transmit the next frame, the
1547 * TX status will be returned with 'TX_STATUS_FAIL_NEXT_FRAG'.
1548 *
1549 * TX_STATUS_FIFO_UNDERRUN
1550 *
1551 * Indicates the host did not provide bytes to the FIFO fast enough while
1552 * a TX was in progress.
1553 *
1554 * TX_STATUS_FAIL_MGMNT_ABORT
1555 *
1556 * This status is only possible if the ABORT ON MGMT RX parameter was
1557 * set to true with the TX command.
1558 *
1559 * If the MSB of the status parameter is set then an abort sequence is
1560 * required. This sequence consists of the host activating the TX Abort
1561 * control line, and then waiting for the TX Abort command response. This
1562 * indicates that a the device is no longer in a transmit state, and that the
1563 * command FIFO has been cleared. The host must then deactivate the TX Abort
1564 * control line. Receiving is still allowed in this case.
1565 */
1566enum {
1567 TX_3945_STATUS_SUCCESS = 0x01,
1568 TX_3945_STATUS_DIRECT_DONE = 0x02,
1569 TX_3945_STATUS_FAIL_SHORT_LIMIT = 0x82,
1570 TX_3945_STATUS_FAIL_LONG_LIMIT = 0x83,
1571 TX_3945_STATUS_FAIL_FIFO_UNDERRUN = 0x84,
1572 TX_3945_STATUS_FAIL_MGMNT_ABORT = 0x85,
1573 TX_3945_STATUS_FAIL_NEXT_FRAG = 0x86,
1574 TX_3945_STATUS_FAIL_LIFE_EXPIRE = 0x87,
1575 TX_3945_STATUS_FAIL_DEST_PS = 0x88,
1576 TX_3945_STATUS_FAIL_ABORTED = 0x89,
1577 TX_3945_STATUS_FAIL_BT_RETRY = 0x8a,
1578 TX_3945_STATUS_FAIL_STA_INVALID = 0x8b,
1579 TX_3945_STATUS_FAIL_FRAG_DROPPED = 0x8c,
1580 TX_3945_STATUS_FAIL_TID_DISABLE = 0x8d,
1581 TX_3945_STATUS_FAIL_FRAME_FLUSHED = 0x8e,
1582 TX_3945_STATUS_FAIL_INSUFFICIENT_CF_POLL = 0x8f,
1583 TX_3945_STATUS_FAIL_TX_LOCKED = 0x90,
1584 TX_3945_STATUS_FAIL_NO_BEACON_ON_RADAR = 0x91,
1585};
1586
1587/*
1588 * TX command response is sent after *4965* transmission attempts.
1589 *
1590 * both postpone and abort status are expected behavior from uCode. there is
1591 * no special operation required from driver; except for RFKILL_FLUSH,
1592 * which required tx flush host command to flush all the tx frames in queues
1593 */
1594enum {
1595 TX_STATUS_SUCCESS = 0x01,
1596 TX_STATUS_DIRECT_DONE = 0x02,
1597 /* postpone TX */
1598 TX_STATUS_POSTPONE_DELAY = 0x40,
1599 TX_STATUS_POSTPONE_FEW_BYTES = 0x41,
1600 TX_STATUS_POSTPONE_QUIET_PERIOD = 0x43,
1601 TX_STATUS_POSTPONE_CALC_TTAK = 0x44,
1602 /* abort TX */
1603 TX_STATUS_FAIL_INTERNAL_CROSSED_RETRY = 0x81,
1604 TX_STATUS_FAIL_SHORT_LIMIT = 0x82,
1605 TX_STATUS_FAIL_LONG_LIMIT = 0x83,
1606 TX_STATUS_FAIL_FIFO_UNDERRUN = 0x84,
1607 TX_STATUS_FAIL_DRAIN_FLOW = 0x85,
1608 TX_STATUS_FAIL_RFKILL_FLUSH = 0x86,
1609 TX_STATUS_FAIL_LIFE_EXPIRE = 0x87,
1610 TX_STATUS_FAIL_DEST_PS = 0x88,
1611 TX_STATUS_FAIL_HOST_ABORTED = 0x89,
1612 TX_STATUS_FAIL_BT_RETRY = 0x8a,
1613 TX_STATUS_FAIL_STA_INVALID = 0x8b,
1614 TX_STATUS_FAIL_FRAG_DROPPED = 0x8c,
1615 TX_STATUS_FAIL_TID_DISABLE = 0x8d,
1616 TX_STATUS_FAIL_FIFO_FLUSHED = 0x8e,
1617 TX_STATUS_FAIL_INSUFFICIENT_CF_POLL = 0x8f,
1618 TX_STATUS_FAIL_PASSIVE_NO_RX = 0x90,
1619 TX_STATUS_FAIL_NO_BEACON_ON_RADAR = 0x91,
1620};
1621
1622#define TX_PACKET_MODE_REGULAR 0x0000
1623#define TX_PACKET_MODE_BURST_SEQ 0x0100
1624#define TX_PACKET_MODE_BURST_FIRST 0x0200
1625
1626enum {
1627 TX_POWER_PA_NOT_ACTIVE = 0x0,
1628};
1629
1630enum {
1631 TX_STATUS_MSK = 0x000000ff, /* bits 0:7 */
1632 TX_STATUS_DELAY_MSK = 0x00000040,
1633 TX_STATUS_ABORT_MSK = 0x00000080,
1634 TX_PACKET_MODE_MSK = 0x0000ff00, /* bits 8:15 */
1635 TX_FIFO_NUMBER_MSK = 0x00070000, /* bits 16:18 */
1636 TX_RESERVED = 0x00780000, /* bits 19:22 */
1637 TX_POWER_PA_DETECT_MSK = 0x7f800000, /* bits 23:30 */
1638 TX_ABORT_REQUIRED_MSK = 0x80000000, /* bits 31:31 */
1639};
1640
1641/* *******************************
1642 * TX aggregation status
1643 ******************************* */
1644
1645enum {
1646 AGG_TX_STATE_TRANSMITTED = 0x00,
1647 AGG_TX_STATE_UNDERRUN_MSK = 0x01,
1648 AGG_TX_STATE_FEW_BYTES_MSK = 0x04,
1649 AGG_TX_STATE_ABORT_MSK = 0x08,
1650 AGG_TX_STATE_LAST_SENT_TTL_MSK = 0x10,
1651 AGG_TX_STATE_LAST_SENT_TRY_CNT_MSK = 0x20,
1652 AGG_TX_STATE_SCD_QUERY_MSK = 0x80,
1653 AGG_TX_STATE_TEST_BAD_CRC32_MSK = 0x100,
1654 AGG_TX_STATE_RESPONSE_MSK = 0x1ff,
1655 AGG_TX_STATE_DUMP_TX_MSK = 0x200,
1656 AGG_TX_STATE_DELAY_TX_MSK = 0x400
1657};
1658
1659#define AGG_TX_STATUS_MSK 0x00000fff /* bits 0:11 */
1660#define AGG_TX_TRY_MSK 0x0000f000 /* bits 12:15 */
1661
1662#define AGG_TX_STATE_LAST_SENT_MSK (AGG_TX_STATE_LAST_SENT_TTL_MSK | \
1663 AGG_TX_STATE_LAST_SENT_TRY_CNT_MSK)
1664
1665/* # tx attempts for first frame in aggregation */
1666#define AGG_TX_STATE_TRY_CNT_POS 12
1667#define AGG_TX_STATE_TRY_CNT_MSK 0xf000
1668
1669/* Command ID and sequence number of Tx command for this frame */
1670#define AGG_TX_STATE_SEQ_NUM_POS 16
1671#define AGG_TX_STATE_SEQ_NUM_MSK 0xffff0000
1672
1673/*
1674 * REPLY_TX = 0x1c (response)
1675 *
1676 * This response may be in one of two slightly different formats, indicated
1677 * by the frame_count field:
1678 *
1679 * 1) No aggregation (frame_count == 1). This reports Tx results for
1680 * a single frame. Multiple attempts, at various bit rates, may have
1681 * been made for this frame.
1682 *
1683 * 2) Aggregation (frame_count > 1). This reports Tx results for
1684 * 2 or more frames that used block-acknowledge. All frames were
1685 * transmitted at same rate. Rate scaling may have been used if first
1686 * frame in this new agg block failed in previous agg block(s).
1687 *
1688 * Note that, for aggregation, ACK (block-ack) status is not delivered here;
1689 * block-ack has not been received by the time the 4965 device records
1690 * this status.
1691 * This status relates to reasons the tx might have been blocked or aborted
1692 * within the sending station (this 4965 device), rather than whether it was
1693 * received successfully by the destination station.
1694 */
1695struct agg_tx_status {
1696 __le16 status;
1697 __le16 sequence;
1698} __packed;
1699
1700struct iwl4965_tx_resp {
1701 u8 frame_count; /* 1 no aggregation, >1 aggregation */
1702 u8 bt_kill_count; /* # blocked by bluetooth (unused for agg) */
1703 u8 failure_rts; /* # failures due to unsuccessful RTS */
1704 u8 failure_frame; /* # failures due to no ACK (unused for agg) */
1705
1706 /* For non-agg: Rate at which frame was successful.
1707 * For agg: Rate at which all frames were transmitted. */
1708 __le32 rate_n_flags; /* RATE_MCS_* */
1709
1710 /* For non-agg: RTS + CTS + frame tx attempts time + ACK.
1711 * For agg: RTS + CTS + aggregation tx time + block-ack time. */
1712 __le16 wireless_media_time; /* uSecs */
1713
1714 __le16 reserved;
1715 __le32 pa_power1; /* RF power amplifier measurement (not used) */
1716 __le32 pa_power2;
1717
1718 /*
1719 * For non-agg: frame status TX_STATUS_*
1720 * For agg: status of 1st frame, AGG_TX_STATE_*; other frame status
1721 * fields follow this one, up to frame_count.
1722 * Bit fields:
1723 * 11- 0: AGG_TX_STATE_* status code
1724 * 15-12: Retry count for 1st frame in aggregation (retries
1725 * occur if tx failed for this frame when it was a
1726 * member of a previous aggregation block). If rate
1727 * scaling is used, retry count indicates the rate
1728 * table entry used for all frames in the new agg.
1729 * 31-16: Sequence # for this frame's Tx cmd (not SSN!)
1730 */
1731 union {
1732 __le32 status;
1733 struct agg_tx_status agg_status[0]; /* for each agg frame */
1734 } u;
1735} __packed;
1736
1737/*
1738 * REPLY_COMPRESSED_BA = 0xc5 (response only, not a command)
1739 *
1740 * Reports Block-Acknowledge from recipient station
1741 */
1742struct iwl_compressed_ba_resp {
1743 __le32 sta_addr_lo32;
1744 __le16 sta_addr_hi16;
1745 __le16 reserved;
1746
1747 /* Index of recipient (BA-sending) station in uCode's station table */
1748 u8 sta_id;
1749 u8 tid;
1750 __le16 seq_ctl;
1751 __le64 bitmap;
1752 __le16 scd_flow;
1753 __le16 scd_ssn;
1754} __packed;
1755
1756/*
1757 * REPLY_TX_PWR_TABLE_CMD = 0x97 (command, has simple generic response)
1758 *
1759 * See details under "TXPOWER" in iwl-4965-hw.h.
1760 */
1761
1762struct iwl3945_txpowertable_cmd {
1763 u8 band; /* 0: 5 GHz, 1: 2.4 GHz */
1764 u8 reserved;
1765 __le16 channel;
1766 struct iwl3945_power_per_rate power[IWL_MAX_RATES];
1767} __packed;
1768
1769struct iwl4965_txpowertable_cmd {
1770 u8 band; /* 0: 5 GHz, 1: 2.4 GHz */
1771 u8 reserved;
1772 __le16 channel;
1773 struct iwl4965_tx_power_db tx_power;
1774} __packed;
1775
1776
1777/**
1778 * struct iwl3945_rate_scaling_cmd - Rate Scaling Command & Response
1779 *
1780 * REPLY_RATE_SCALE = 0x47 (command, has simple generic response)
1781 *
1782 * NOTE: The table of rates passed to the uCode via the
1783 * RATE_SCALE command sets up the corresponding order of
1784 * rates used for all related commands, including rate
1785 * masks, etc.
1786 *
1787 * For example, if you set 9MB (PLCP 0x0f) as the first
1788 * rate in the rate table, the bit mask for that rate
1789 * when passed through ofdm_basic_rates on the REPLY_RXON
1790 * command would be bit 0 (1 << 0)
1791 */
1792struct iwl3945_rate_scaling_info {
1793 __le16 rate_n_flags;
1794 u8 try_cnt;
1795 u8 next_rate_index;
1796} __packed;
1797
1798struct iwl3945_rate_scaling_cmd {
1799 u8 table_id;
1800 u8 reserved[3];
1801 struct iwl3945_rate_scaling_info table[IWL_MAX_RATES];
1802} __packed;
1803
1804
1805/*RS_NEW_API: only TLC_RTS remains and moved to bit 0 */
1806#define LINK_QUAL_FLAGS_SET_STA_TLC_RTS_MSK (1 << 0)
1807
1808/* # of EDCA prioritized tx fifos */
1809#define LINK_QUAL_AC_NUM AC_NUM
1810
1811/* # entries in rate scale table to support Tx retries */
1812#define LINK_QUAL_MAX_RETRY_NUM 16
1813
1814/* Tx antenna selection values */
1815#define LINK_QUAL_ANT_A_MSK (1 << 0)
1816#define LINK_QUAL_ANT_B_MSK (1 << 1)
1817#define LINK_QUAL_ANT_MSK (LINK_QUAL_ANT_A_MSK|LINK_QUAL_ANT_B_MSK)
1818
1819
1820/**
1821 * struct iwl_link_qual_general_params
1822 *
1823 * Used in REPLY_TX_LINK_QUALITY_CMD
1824 */
1825struct iwl_link_qual_general_params {
1826 u8 flags;
1827
1828 /* No entries at or above this (driver chosen) index contain MIMO */
1829 u8 mimo_delimiter;
1830
1831 /* Best single antenna to use for single stream (legacy, SISO). */
1832 u8 single_stream_ant_msk; /* LINK_QUAL_ANT_* */
1833
1834 /* Best antennas to use for MIMO (unused for 4965, assumes both). */
1835 u8 dual_stream_ant_msk; /* LINK_QUAL_ANT_* */
1836
1837 /*
1838 * If driver needs to use different initial rates for different
1839 * EDCA QOS access categories (as implemented by tx fifos 0-3),
1840 * this table will set that up, by indicating the indexes in the
1841 * rs_table[LINK_QUAL_MAX_RETRY_NUM] rate table at which to start.
1842 * Otherwise, driver should set all entries to 0.
1843 *
1844 * Entry usage:
1845 * 0 = Background, 1 = Best Effort (normal), 2 = Video, 3 = Voice
1846 * TX FIFOs above 3 use same value (typically 0) as TX FIFO 3.
1847 */
1848 u8 start_rate_index[LINK_QUAL_AC_NUM];
1849} __packed;
1850
1851#define LINK_QUAL_AGG_TIME_LIMIT_DEF (4000) /* 4 milliseconds */
1852#define LINK_QUAL_AGG_TIME_LIMIT_MAX (8000)
1853#define LINK_QUAL_AGG_TIME_LIMIT_MIN (100)
1854
1855#define LINK_QUAL_AGG_DISABLE_START_DEF (3)
1856#define LINK_QUAL_AGG_DISABLE_START_MAX (255)
1857#define LINK_QUAL_AGG_DISABLE_START_MIN (0)
1858
1859#define LINK_QUAL_AGG_FRAME_LIMIT_DEF (31)
1860#define LINK_QUAL_AGG_FRAME_LIMIT_MAX (63)
1861#define LINK_QUAL_AGG_FRAME_LIMIT_MIN (0)
1862
1863/**
1864 * struct iwl_link_qual_agg_params
1865 *
1866 * Used in REPLY_TX_LINK_QUALITY_CMD
1867 */
1868struct iwl_link_qual_agg_params {
1869
1870 /*
1871 *Maximum number of uSec in aggregation.
1872 * default set to 4000 (4 milliseconds) if not configured in .cfg
1873 */
1874 __le16 agg_time_limit;
1875
1876 /*
1877 * Number of Tx retries allowed for a frame, before that frame will
1878 * no longer be considered for the start of an aggregation sequence
1879 * (scheduler will then try to tx it as single frame).
1880 * Driver should set this to 3.
1881 */
1882 u8 agg_dis_start_th;
1883
1884 /*
1885 * Maximum number of frames in aggregation.
1886 * 0 = no limit (default). 1 = no aggregation.
1887 * Other values = max # frames in aggregation.
1888 */
1889 u8 agg_frame_cnt_limit;
1890
1891 __le32 reserved;
1892} __packed;
1893
1894/*
1895 * REPLY_TX_LINK_QUALITY_CMD = 0x4e (command, has simple generic response)
1896 *
1897 * For 4965 devices only; 3945 uses REPLY_RATE_SCALE.
1898 *
1899 * Each station in the 4965 device's internal station table has its own table
1900 * of 16
1901 * Tx rates and modulation modes (e.g. legacy/SISO/MIMO) for retrying Tx when
1902 * an ACK is not received. This command replaces the entire table for
1903 * one station.
1904 *
1905 * NOTE: Station must already be in 4965 device's station table.
1906 * Use REPLY_ADD_STA.
1907 *
1908 * The rate scaling procedures described below work well. Of course, other
1909 * procedures are possible, and may work better for particular environments.
1910 *
1911 *
1912 * FILLING THE RATE TABLE
1913 *
1914 * Given a particular initial rate and mode, as determined by the rate
1915 * scaling algorithm described below, the Linux driver uses the following
1916 * formula to fill the rs_table[LINK_QUAL_MAX_RETRY_NUM] rate table in the
1917 * Link Quality command:
1918 *
1919 *
1920 * 1) If using High-throughput (HT) (SISO or MIMO) initial rate:
1921 * a) Use this same initial rate for first 3 entries.
1922 * b) Find next lower available rate using same mode (SISO or MIMO),
1923 * use for next 3 entries. If no lower rate available, switch to
1924 * legacy mode (no HT40 channel, no MIMO, no short guard interval).
1925 * c) If using MIMO, set command's mimo_delimiter to number of entries
1926 * using MIMO (3 or 6).
1927 * d) After trying 2 HT rates, switch to legacy mode (no HT40 channel,
1928 * no MIMO, no short guard interval), at the next lower bit rate
1929 * (e.g. if second HT bit rate was 54, try 48 legacy), and follow
1930 * legacy procedure for remaining table entries.
1931 *
1932 * 2) If using legacy initial rate:
1933 * a) Use the initial rate for only one entry.
1934 * b) For each following entry, reduce the rate to next lower available
1935 * rate, until reaching the lowest available rate.
1936 * c) When reducing rate, also switch antenna selection.
1937 * d) Once lowest available rate is reached, repeat this rate until
1938 * rate table is filled (16 entries), switching antenna each entry.
1939 *
1940 *
1941 * ACCUMULATING HISTORY
1942 *
1943 * The rate scaling algorithm for 4965 devices, as implemented in Linux driver,
1944 * uses two sets of frame Tx success history: One for the current/active
1945 * modulation mode, and one for a speculative/search mode that is being
1946 * attempted. If the speculative mode turns out to be more effective (i.e.
1947 * actual transfer rate is better), then the driver continues to use the
1948 * speculative mode as the new current active mode.
1949 *
1950 * Each history set contains, separately for each possible rate, data for a
1951 * sliding window of the 62 most recent tx attempts at that rate. The data
1952 * includes a shifting bitmap of success(1)/failure(0), and sums of successful
1953 * and attempted frames, from which the driver can additionally calculate a
1954 * success ratio (success / attempted) and number of failures
1955 * (attempted - success), and control the size of the window (attempted).
1956 * The driver uses the bit map to remove successes from the success sum, as
1957 * the oldest tx attempts fall out of the window.
1958 *
1959 * When the 4965 device makes multiple tx attempts for a given frame, each
1960 * attempt might be at a different rate, and have different modulation
1961 * characteristics (e.g. antenna, fat channel, short guard interval), as set
1962 * up in the rate scaling table in the Link Quality command. The driver must
1963 * determine which rate table entry was used for each tx attempt, to determine
1964 * which rate-specific history to update, and record only those attempts that
1965 * match the modulation characteristics of the history set.
1966 *
1967 * When using block-ack (aggregation), all frames are transmitted at the same
1968 * rate, since there is no per-attempt acknowledgment from the destination
1969 * station. The Tx response struct iwl_tx_resp indicates the Tx rate in
1970 * rate_n_flags field. After receiving a block-ack, the driver can update
1971 * history for the entire block all at once.
1972 *
1973 *
1974 * FINDING BEST STARTING RATE:
1975 *
1976 * When working with a selected initial modulation mode (see below), the
1977 * driver attempts to find a best initial rate. The initial rate is the
1978 * first entry in the Link Quality command's rate table.
1979 *
1980 * 1) Calculate actual throughput (success ratio * expected throughput, see
1981 * table below) for current initial rate. Do this only if enough frames
1982 * have been attempted to make the value meaningful: at least 6 failed
1983 * tx attempts, or at least 8 successes. If not enough, don't try rate
1984 * scaling yet.
1985 *
1986 * 2) Find available rates adjacent to current initial rate. Available means:
1987 * a) supported by hardware &&
1988 * b) supported by association &&
1989 * c) within any constraints selected by user
1990 *
1991 * 3) Gather measured throughputs for adjacent rates. These might not have
1992 * enough history to calculate a throughput. That's okay, we might try
1993 * using one of them anyway!
1994 *
1995 * 4) Try decreasing rate if, for current rate:
1996 * a) success ratio is < 15% ||
1997 * b) lower adjacent rate has better measured throughput ||
1998 * c) higher adjacent rate has worse throughput, and lower is unmeasured
1999 *
2000 * As a sanity check, if decrease was determined above, leave rate
2001 * unchanged if:
2002 * a) lower rate unavailable
2003 * b) success ratio at current rate > 85% (very good)
2004 * c) current measured throughput is better than expected throughput
2005 * of lower rate (under perfect 100% tx conditions, see table below)
2006 *
2007 * 5) Try increasing rate if, for current rate:
2008 * a) success ratio is < 15% ||
2009 * b) both adjacent rates' throughputs are unmeasured (try it!) ||
2010 * b) higher adjacent rate has better measured throughput ||
2011 * c) lower adjacent rate has worse throughput, and higher is unmeasured
2012 *
2013 * As a sanity check, if increase was determined above, leave rate
2014 * unchanged if:
2015 * a) success ratio at current rate < 70%. This is not particularly
2016 * good performance; higher rate is sure to have poorer success.
2017 *
2018 * 6) Re-evaluate the rate after each tx frame. If working with block-
2019 * acknowledge, history and statistics may be calculated for the entire
2020 * block (including prior history that fits within the history windows),
2021 * before re-evaluation.
2022 *
2023 * FINDING BEST STARTING MODULATION MODE:
2024 *
2025 * After working with a modulation mode for a "while" (and doing rate scaling),
2026 * the driver searches for a new initial mode in an attempt to improve
2027 * throughput. The "while" is measured by numbers of attempted frames:
2028 *
2029 * For legacy mode, search for new mode after:
2030 * 480 successful frames, or 160 failed frames
2031 * For high-throughput modes (SISO or MIMO), search for new mode after:
2032 * 4500 successful frames, or 400 failed frames
2033 *
2034 * Mode switch possibilities are (3 for each mode):
2035 *
2036 * For legacy:
2037 * Change antenna, try SISO (if HT association), try MIMO (if HT association)
2038 * For SISO:
2039 * Change antenna, try MIMO, try shortened guard interval (SGI)
2040 * For MIMO:
2041 * Try SISO antenna A, SISO antenna B, try shortened guard interval (SGI)
2042 *
2043 * When trying a new mode, use the same bit rate as the old/current mode when
2044 * trying antenna switches and shortened guard interval. When switching to
2045 * SISO from MIMO or legacy, or to MIMO from SISO or legacy, use a rate
2046 * for which the expected throughput (under perfect conditions) is about the
2047 * same or slightly better than the actual measured throughput delivered by
2048 * the old/current mode.
2049 *
2050 * Actual throughput can be estimated by multiplying the expected throughput
2051 * by the success ratio (successful / attempted tx frames). Frame size is
2052 * not considered in this calculation; it assumes that frame size will average
2053 * out to be fairly consistent over several samples. The following are
2054 * metric values for expected throughput assuming 100% success ratio.
2055 * Only G band has support for CCK rates:
2056 *
2057 * RATE: 1 2 5 11 6 9 12 18 24 36 48 54 60
2058 *
2059 * G: 7 13 35 58 40 57 72 98 121 154 177 186 186
2060 * A: 0 0 0 0 40 57 72 98 121 154 177 186 186
2061 * SISO 20MHz: 0 0 0 0 42 42 76 102 124 159 183 193 202
2062 * SGI SISO 20MHz: 0 0 0 0 46 46 82 110 132 168 192 202 211
2063 * MIMO 20MHz: 0 0 0 0 74 74 123 155 179 214 236 244 251
2064 * SGI MIMO 20MHz: 0 0 0 0 81 81 131 164 188 222 243 251 257
2065 * SISO 40MHz: 0 0 0 0 77 77 127 160 184 220 242 250 257
2066 * SGI SISO 40MHz: 0 0 0 0 83 83 135 169 193 229 250 257 264
2067 * MIMO 40MHz: 0 0 0 0 123 123 182 214 235 264 279 285 289
2068 * SGI MIMO 40MHz: 0 0 0 0 131 131 191 222 242 270 284 289 293
2069 *
2070 * After the new mode has been tried for a short while (minimum of 6 failed
2071 * frames or 8 successful frames), compare success ratio and actual throughput
2072 * estimate of the new mode with the old. If either is better with the new
2073 * mode, continue to use the new mode.
2074 *
2075 * Continue comparing modes until all 3 possibilities have been tried.
2076 * If moving from legacy to HT, try all 3 possibilities from the new HT
2077 * mode. After trying all 3, a best mode is found. Continue to use this mode
2078 * for the longer "while" described above (e.g. 480 successful frames for
2079 * legacy), and then repeat the search process.
2080 *
2081 */
2082struct iwl_link_quality_cmd {
2083
2084 /* Index of destination/recipient station in uCode's station table */
2085 u8 sta_id;
2086 u8 reserved1;
2087 __le16 control; /* not used */
2088 struct iwl_link_qual_general_params general_params;
2089 struct iwl_link_qual_agg_params agg_params;
2090
2091 /*
2092 * Rate info; when using rate-scaling, Tx command's initial_rate_index
2093 * specifies 1st Tx rate attempted, via index into this table.
2094 * 4965 devices works its way through table when retrying Tx.
2095 */
2096 struct {
2097 __le32 rate_n_flags; /* RATE_MCS_*, IWL_RATE_* */
2098 } rs_table[LINK_QUAL_MAX_RETRY_NUM];
2099 __le32 reserved2;
2100} __packed;
2101
2102/*
2103 * BT configuration enable flags:
2104 * bit 0 - 1: BT channel announcement enabled
2105 * 0: disable
2106 * bit 1 - 1: priority of BT device enabled
2107 * 0: disable
2108 */
2109#define BT_COEX_DISABLE (0x0)
2110#define BT_ENABLE_CHANNEL_ANNOUNCE BIT(0)
2111#define BT_ENABLE_PRIORITY BIT(1)
2112
2113#define BT_COEX_ENABLE (BT_ENABLE_CHANNEL_ANNOUNCE | BT_ENABLE_PRIORITY)
2114
2115#define BT_LEAD_TIME_DEF (0x1E)
2116
2117#define BT_MAX_KILL_DEF (0x5)
2118
2119/*
2120 * REPLY_BT_CONFIG = 0x9b (command, has simple generic response)
2121 *
2122 * 3945 and 4965 devices support hardware handshake with Bluetooth device on
2123 * same platform. Bluetooth device alerts wireless device when it will Tx;
2124 * wireless device can delay or kill its own Tx to accommodate.
2125 */
2126struct iwl_bt_cmd {
2127 u8 flags;
2128 u8 lead_time;
2129 u8 max_kill;
2130 u8 reserved;
2131 __le32 kill_ack_mask;
2132 __le32 kill_cts_mask;
2133} __packed;
2134
2135
2136/******************************************************************************
2137 * (6)
2138 * Spectrum Management (802.11h) Commands, Responses, Notifications:
2139 *
2140 *****************************************************************************/
2141
2142/*
2143 * Spectrum Management
2144 */
2145#define MEASUREMENT_FILTER_FLAG (RXON_FILTER_PROMISC_MSK | \
2146 RXON_FILTER_CTL2HOST_MSK | \
2147 RXON_FILTER_ACCEPT_GRP_MSK | \
2148 RXON_FILTER_DIS_DECRYPT_MSK | \
2149 RXON_FILTER_DIS_GRP_DECRYPT_MSK | \
2150 RXON_FILTER_ASSOC_MSK | \
2151 RXON_FILTER_BCON_AWARE_MSK)
2152
2153struct iwl_measure_channel {
2154 __le32 duration; /* measurement duration in extended beacon
2155 * format */
2156 u8 channel; /* channel to measure */
2157 u8 type; /* see enum iwl_measure_type */
2158 __le16 reserved;
2159} __packed;
2160
2161/*
2162 * REPLY_SPECTRUM_MEASUREMENT_CMD = 0x74 (command)
2163 */
2164struct iwl_spectrum_cmd {
2165 __le16 len; /* number of bytes starting from token */
2166 u8 token; /* token id */
2167 u8 id; /* measurement id -- 0 or 1 */
2168 u8 origin; /* 0 = TGh, 1 = other, 2 = TGk */
2169 u8 periodic; /* 1 = periodic */
2170 __le16 path_loss_timeout;
2171 __le32 start_time; /* start time in extended beacon format */
2172 __le32 reserved2;
2173 __le32 flags; /* rxon flags */
2174 __le32 filter_flags; /* rxon filter flags */
2175 __le16 channel_count; /* minimum 1, maximum 10 */
2176 __le16 reserved3;
2177 struct iwl_measure_channel channels[10];
2178} __packed;
2179
2180/*
2181 * REPLY_SPECTRUM_MEASUREMENT_CMD = 0x74 (response)
2182 */
2183struct iwl_spectrum_resp {
2184 u8 token;
2185 u8 id; /* id of the prior command replaced, or 0xff */
2186 __le16 status; /* 0 - command will be handled
2187 * 1 - cannot handle (conflicts with another
2188 * measurement) */
2189} __packed;
2190
2191enum iwl_measurement_state {
2192 IWL_MEASUREMENT_START = 0,
2193 IWL_MEASUREMENT_STOP = 1,
2194};
2195
2196enum iwl_measurement_status {
2197 IWL_MEASUREMENT_OK = 0,
2198 IWL_MEASUREMENT_CONCURRENT = 1,
2199 IWL_MEASUREMENT_CSA_CONFLICT = 2,
2200 IWL_MEASUREMENT_TGH_CONFLICT = 3,
2201 /* 4-5 reserved */
2202 IWL_MEASUREMENT_STOPPED = 6,
2203 IWL_MEASUREMENT_TIMEOUT = 7,
2204 IWL_MEASUREMENT_PERIODIC_FAILED = 8,
2205};
2206
2207#define NUM_ELEMENTS_IN_HISTOGRAM 8
2208
2209struct iwl_measurement_histogram {
2210 __le32 ofdm[NUM_ELEMENTS_IN_HISTOGRAM]; /* in 0.8usec counts */
2211 __le32 cck[NUM_ELEMENTS_IN_HISTOGRAM]; /* in 1usec counts */
2212} __packed;
2213
2214/* clear channel availability counters */
2215struct iwl_measurement_cca_counters {
2216 __le32 ofdm;
2217 __le32 cck;
2218} __packed;
2219
2220enum iwl_measure_type {
2221 IWL_MEASURE_BASIC = (1 << 0),
2222 IWL_MEASURE_CHANNEL_LOAD = (1 << 1),
2223 IWL_MEASURE_HISTOGRAM_RPI = (1 << 2),
2224 IWL_MEASURE_HISTOGRAM_NOISE = (1 << 3),
2225 IWL_MEASURE_FRAME = (1 << 4),
2226 /* bits 5:6 are reserved */
2227 IWL_MEASURE_IDLE = (1 << 7),
2228};
2229
2230/*
2231 * SPECTRUM_MEASURE_NOTIFICATION = 0x75 (notification only, not a command)
2232 */
2233struct iwl_spectrum_notification {
2234 u8 id; /* measurement id -- 0 or 1 */
2235 u8 token;
2236 u8 channel_index; /* index in measurement channel list */
2237 u8 state; /* 0 - start, 1 - stop */
2238 __le32 start_time; /* lower 32-bits of TSF */
2239 u8 band; /* 0 - 5.2GHz, 1 - 2.4GHz */
2240 u8 channel;
2241 u8 type; /* see enum iwl_measurement_type */
2242 u8 reserved1;
2243 /* NOTE: cca_ofdm, cca_cck, basic_type, and histogram are only only
2244 * valid if applicable for measurement type requested. */
2245 __le32 cca_ofdm; /* cca fraction time in 40Mhz clock periods */
2246 __le32 cca_cck; /* cca fraction time in 44Mhz clock periods */
2247 __le32 cca_time; /* channel load time in usecs */
2248 u8 basic_type; /* 0 - bss, 1 - ofdm preamble, 2 -
2249 * unidentified */
2250 u8 reserved2[3];
2251 struct iwl_measurement_histogram histogram;
2252 __le32 stop_time; /* lower 32-bits of TSF */
2253 __le32 status; /* see iwl_measurement_status */
2254} __packed;
2255
2256/******************************************************************************
2257 * (7)
2258 * Power Management Commands, Responses, Notifications:
2259 *
2260 *****************************************************************************/
2261
2262/**
2263 * struct iwl_powertable_cmd - Power Table Command
2264 * @flags: See below:
2265 *
2266 * POWER_TABLE_CMD = 0x77 (command, has simple generic response)
2267 *
2268 * PM allow:
2269 * bit 0 - '0' Driver not allow power management
2270 * '1' Driver allow PM (use rest of parameters)
2271 *
2272 * uCode send sleep notifications:
2273 * bit 1 - '0' Don't send sleep notification
2274 * '1' send sleep notification (SEND_PM_NOTIFICATION)
2275 *
2276 * Sleep over DTIM
2277 * bit 2 - '0' PM have to walk up every DTIM
2278 * '1' PM could sleep over DTIM till listen Interval.
2279 *
2280 * PCI power managed
2281 * bit 3 - '0' (PCI_CFG_LINK_CTRL & 0x1)
2282 * '1' !(PCI_CFG_LINK_CTRL & 0x1)
2283 *
2284 * Fast PD
2285 * bit 4 - '1' Put radio to sleep when receiving frame for others
2286 *
2287 * Force sleep Modes
2288 * bit 31/30- '00' use both mac/xtal sleeps
2289 * '01' force Mac sleep
2290 * '10' force xtal sleep
2291 * '11' Illegal set
2292 *
2293 * NOTE: if sleep_interval[SLEEP_INTRVL_TABLE_SIZE-1] > DTIM period then
2294 * ucode assume sleep over DTIM is allowed and we don't need to wake up
2295 * for every DTIM.
2296 */
2297#define IWL_POWER_VEC_SIZE 5
2298
2299#define IWL_POWER_DRIVER_ALLOW_SLEEP_MSK cpu_to_le16(BIT(0))
2300#define IWL_POWER_PCI_PM_MSK cpu_to_le16(BIT(3))
2301
2302struct iwl3945_powertable_cmd {
2303 __le16 flags;
2304 u8 reserved[2];
2305 __le32 rx_data_timeout;
2306 __le32 tx_data_timeout;
2307 __le32 sleep_interval[IWL_POWER_VEC_SIZE];
2308} __packed;
2309
2310struct iwl_powertable_cmd {
2311 __le16 flags;
2312 u8 keep_alive_seconds; /* 3945 reserved */
2313 u8 debug_flags; /* 3945 reserved */
2314 __le32 rx_data_timeout;
2315 __le32 tx_data_timeout;
2316 __le32 sleep_interval[IWL_POWER_VEC_SIZE];
2317 __le32 keep_alive_beacons;
2318} __packed;
2319
2320/*
2321 * PM_SLEEP_NOTIFICATION = 0x7A (notification only, not a command)
2322 * all devices identical.
2323 */
2324struct iwl_sleep_notification {
2325 u8 pm_sleep_mode;
2326 u8 pm_wakeup_src;
2327 __le16 reserved;
2328 __le32 sleep_time;
2329 __le32 tsf_low;
2330 __le32 bcon_timer;
2331} __packed;
2332
2333/* Sleep states. all devices identical. */
2334enum {
2335 IWL_PM_NO_SLEEP = 0,
2336 IWL_PM_SLP_MAC = 1,
2337 IWL_PM_SLP_FULL_MAC_UNASSOCIATE = 2,
2338 IWL_PM_SLP_FULL_MAC_CARD_STATE = 3,
2339 IWL_PM_SLP_PHY = 4,
2340 IWL_PM_SLP_REPENT = 5,
2341 IWL_PM_WAKEUP_BY_TIMER = 6,
2342 IWL_PM_WAKEUP_BY_DRIVER = 7,
2343 IWL_PM_WAKEUP_BY_RFKILL = 8,
2344 /* 3 reserved */
2345 IWL_PM_NUM_OF_MODES = 12,
2346};
2347
2348/*
2349 * CARD_STATE_NOTIFICATION = 0xa1 (notification only, not a command)
2350 */
2351struct iwl_card_state_notif {
2352 __le32 flags;
2353} __packed;
2354
2355#define HW_CARD_DISABLED 0x01
2356#define SW_CARD_DISABLED 0x02
2357#define CT_CARD_DISABLED 0x04
2358#define RXON_CARD_DISABLED 0x10
2359
2360struct iwl_ct_kill_config {
2361 __le32 reserved;
2362 __le32 critical_temperature_M;
2363 __le32 critical_temperature_R;
2364} __packed;
2365
2366/******************************************************************************
2367 * (8)
2368 * Scan Commands, Responses, Notifications:
2369 *
2370 *****************************************************************************/
2371
2372#define SCAN_CHANNEL_TYPE_PASSIVE cpu_to_le32(0)
2373#define SCAN_CHANNEL_TYPE_ACTIVE cpu_to_le32(1)
2374
2375/**
2376 * struct iwl_scan_channel - entry in REPLY_SCAN_CMD channel table
2377 *
2378 * One for each channel in the scan list.
2379 * Each channel can independently select:
2380 * 1) SSID for directed active scans
2381 * 2) Txpower setting (for rate specified within Tx command)
2382 * 3) How long to stay on-channel (behavior may be modified by quiet_time,
2383 * quiet_plcp_th, good_CRC_th)
2384 *
2385 * To avoid uCode errors, make sure the following are true (see comments
2386 * under struct iwl_scan_cmd about max_out_time and quiet_time):
2387 * 1) If using passive_dwell (i.e. passive_dwell != 0):
2388 * active_dwell <= passive_dwell (< max_out_time if max_out_time != 0)
2389 * 2) quiet_time <= active_dwell
2390 * 3) If restricting off-channel time (i.e. max_out_time !=0):
2391 * passive_dwell < max_out_time
2392 * active_dwell < max_out_time
2393 */
2394struct iwl3945_scan_channel {
2395 /*
2396 * type is defined as:
2397 * 0:0 1 = active, 0 = passive
2398 * 1:4 SSID direct bit map; if a bit is set, then corresponding
2399 * SSID IE is transmitted in probe request.
2400 * 5:7 reserved
2401 */
2402 u8 type;
2403 u8 channel; /* band is selected by iwl3945_scan_cmd "flags" field */
2404 struct iwl3945_tx_power tpc;
2405 __le16 active_dwell; /* in 1024-uSec TU (time units), typ 5-50 */
2406 __le16 passive_dwell; /* in 1024-uSec TU (time units), typ 20-500 */
2407} __packed;
2408
2409/* set number of direct probes u8 type */
2410#define IWL39_SCAN_PROBE_MASK(n) ((BIT(n) | (BIT(n) - BIT(1))))
2411
2412struct iwl_scan_channel {
2413 /*
2414 * type is defined as:
2415 * 0:0 1 = active, 0 = passive
2416 * 1:20 SSID direct bit map; if a bit is set, then corresponding
2417 * SSID IE is transmitted in probe request.
2418 * 21:31 reserved
2419 */
2420 __le32 type;
2421 __le16 channel; /* band is selected by iwl_scan_cmd "flags" field */
2422 u8 tx_gain; /* gain for analog radio */
2423 u8 dsp_atten; /* gain for DSP */
2424 __le16 active_dwell; /* in 1024-uSec TU (time units), typ 5-50 */
2425 __le16 passive_dwell; /* in 1024-uSec TU (time units), typ 20-500 */
2426} __packed;
2427
2428/* set number of direct probes __le32 type */
2429#define IWL_SCAN_PROBE_MASK(n) cpu_to_le32((BIT(n) | (BIT(n) - BIT(1))))
2430
2431/**
2432 * struct iwl_ssid_ie - directed scan network information element
2433 *
2434 * Up to 20 of these may appear in REPLY_SCAN_CMD (Note: Only 4 are in
2435 * 3945 SCAN api), selected by "type" bit field in struct iwl_scan_channel;
2436 * each channel may select different ssids from among the 20 (4) entries.
2437 * SSID IEs get transmitted in reverse order of entry.
2438 */
2439struct iwl_ssid_ie {
2440 u8 id;
2441 u8 len;
2442 u8 ssid[32];
2443} __packed;
2444
2445#define PROBE_OPTION_MAX_3945 4
2446#define PROBE_OPTION_MAX 20
2447#define TX_CMD_LIFE_TIME_INFINITE cpu_to_le32(0xFFFFFFFF)
2448#define IWL_GOOD_CRC_TH_DISABLED 0
2449#define IWL_GOOD_CRC_TH_DEFAULT cpu_to_le16(1)
2450#define IWL_GOOD_CRC_TH_NEVER cpu_to_le16(0xffff)
2451#define IWL_MAX_SCAN_SIZE 1024
2452#define IWL_MAX_CMD_SIZE 4096
2453
2454/*
2455 * REPLY_SCAN_CMD = 0x80 (command)
2456 *
2457 * The hardware scan command is very powerful; the driver can set it up to
2458 * maintain (relatively) normal network traffic while doing a scan in the
2459 * background. The max_out_time and suspend_time control the ratio of how
2460 * long the device stays on an associated network channel ("service channel")
2461 * vs. how long it's away from the service channel, i.e. tuned to other channels
2462 * for scanning.
2463 *
2464 * max_out_time is the max time off-channel (in usec), and suspend_time
2465 * is how long (in "extended beacon" format) that the scan is "suspended"
2466 * after returning to the service channel. That is, suspend_time is the
2467 * time that we stay on the service channel, doing normal work, between
2468 * scan segments. The driver may set these parameters differently to support
2469 * scanning when associated vs. not associated, and light vs. heavy traffic
2470 * loads when associated.
2471 *
2472 * After receiving this command, the device's scan engine does the following;
2473 *
2474 * 1) Sends SCAN_START notification to driver
2475 * 2) Checks to see if it has time to do scan for one channel
2476 * 3) Sends NULL packet, with power-save (PS) bit set to 1,
2477 * to tell AP that we're going off-channel
2478 * 4) Tunes to first channel in scan list, does active or passive scan
2479 * 5) Sends SCAN_RESULT notification to driver
2480 * 6) Checks to see if it has time to do scan on *next* channel in list
2481 * 7) Repeats 4-6 until it no longer has time to scan the next channel
2482 * before max_out_time expires
2483 * 8) Returns to service channel
2484 * 9) Sends NULL packet with PS=0 to tell AP that we're back
2485 * 10) Stays on service channel until suspend_time expires
2486 * 11) Repeats entire process 2-10 until list is complete
2487 * 12) Sends SCAN_COMPLETE notification
2488 *
2489 * For fast, efficient scans, the scan command also has support for staying on
2490 * a channel for just a short time, if doing active scanning and getting no
2491 * responses to the transmitted probe request. This time is controlled by
2492 * quiet_time, and the number of received packets below which a channel is
2493 * considered "quiet" is controlled by quiet_plcp_threshold.
2494 *
2495 * For active scanning on channels that have regulatory restrictions against
2496 * blindly transmitting, the scan can listen before transmitting, to make sure
2497 * that there is already legitimate activity on the channel. If enough
2498 * packets are cleanly received on the channel (controlled by good_CRC_th,
2499 * typical value 1), the scan engine starts transmitting probe requests.
2500 *
2501 * Driver must use separate scan commands for 2.4 vs. 5 GHz bands.
2502 *
2503 * To avoid uCode errors, see timing restrictions described under
2504 * struct iwl_scan_channel.
2505 */
2506
2507struct iwl3945_scan_cmd {
2508 __le16 len;
2509 u8 reserved0;
2510 u8 channel_count; /* # channels in channel list */
2511 __le16 quiet_time; /* dwell only this # millisecs on quiet channel
2512 * (only for active scan) */
2513 __le16 quiet_plcp_th; /* quiet chnl is < this # pkts (typ. 1) */
2514 __le16 good_CRC_th; /* passive -> active promotion threshold */
2515 __le16 reserved1;
2516 __le32 max_out_time; /* max usec to be away from associated (service)
2517 * channel */
2518 __le32 suspend_time; /* pause scan this long (in "extended beacon
2519 * format") when returning to service channel:
2520 * 3945; 31:24 # beacons, 19:0 additional usec,
2521 * 4965; 31:22 # beacons, 21:0 additional usec.
2522 */
2523 __le32 flags; /* RXON_FLG_* */
2524 __le32 filter_flags; /* RXON_FILTER_* */
2525
2526 /* For active scans (set to all-0s for passive scans).
2527 * Does not include payload. Must specify Tx rate; no rate scaling. */
2528 struct iwl3945_tx_cmd tx_cmd;
2529
2530 /* For directed active scans (set to all-0s otherwise) */
2531 struct iwl_ssid_ie direct_scan[PROBE_OPTION_MAX_3945];
2532
2533 /*
2534 * Probe request frame, followed by channel list.
2535 *
2536 * Size of probe request frame is specified by byte count in tx_cmd.
2537 * Channel list follows immediately after probe request frame.
2538 * Number of channels in list is specified by channel_count.
2539 * Each channel in list is of type:
2540 *
2541 * struct iwl3945_scan_channel channels[0];
2542 *
2543 * NOTE: Only one band of channels can be scanned per pass. You
2544 * must not mix 2.4GHz channels and 5.2GHz channels, and you must wait
2545 * for one scan to complete (i.e. receive SCAN_COMPLETE_NOTIFICATION)
2546 * before requesting another scan.
2547 */
2548 u8 data[0];
2549} __packed;
2550
2551struct iwl_scan_cmd {
2552 __le16 len;
2553 u8 reserved0;
2554 u8 channel_count; /* # channels in channel list */
2555 __le16 quiet_time; /* dwell only this # millisecs on quiet channel
2556 * (only for active scan) */
2557 __le16 quiet_plcp_th; /* quiet chnl is < this # pkts (typ. 1) */
2558 __le16 good_CRC_th; /* passive -> active promotion threshold */
2559 __le16 rx_chain; /* RXON_RX_CHAIN_* */
2560 __le32 max_out_time; /* max usec to be away from associated (service)
2561 * channel */
2562 __le32 suspend_time; /* pause scan this long (in "extended beacon
2563 * format") when returning to service chnl:
2564 * 3945; 31:24 # beacons, 19:0 additional usec,
2565 * 4965; 31:22 # beacons, 21:0 additional usec.
2566 */
2567 __le32 flags; /* RXON_FLG_* */
2568 __le32 filter_flags; /* RXON_FILTER_* */
2569
2570 /* For active scans (set to all-0s for passive scans).
2571 * Does not include payload. Must specify Tx rate; no rate scaling. */
2572 struct iwl_tx_cmd tx_cmd;
2573
2574 /* For directed active scans (set to all-0s otherwise) */
2575 struct iwl_ssid_ie direct_scan[PROBE_OPTION_MAX];
2576
2577 /*
2578 * Probe request frame, followed by channel list.
2579 *
2580 * Size of probe request frame is specified by byte count in tx_cmd.
2581 * Channel list follows immediately after probe request frame.
2582 * Number of channels in list is specified by channel_count.
2583 * Each channel in list is of type:
2584 *
2585 * struct iwl_scan_channel channels[0];
2586 *
2587 * NOTE: Only one band of channels can be scanned per pass. You
2588 * must not mix 2.4GHz channels and 5.2GHz channels, and you must wait
2589 * for one scan to complete (i.e. receive SCAN_COMPLETE_NOTIFICATION)
2590 * before requesting another scan.
2591 */
2592 u8 data[0];
2593} __packed;
2594
2595/* Can abort will notify by complete notification with abort status. */
2596#define CAN_ABORT_STATUS cpu_to_le32(0x1)
2597/* complete notification statuses */
2598#define ABORT_STATUS 0x2
2599
2600/*
2601 * REPLY_SCAN_CMD = 0x80 (response)
2602 */
2603struct iwl_scanreq_notification {
2604 __le32 status; /* 1: okay, 2: cannot fulfill request */
2605} __packed;
2606
2607/*
2608 * SCAN_START_NOTIFICATION = 0x82 (notification only, not a command)
2609 */
2610struct iwl_scanstart_notification {
2611 __le32 tsf_low;
2612 __le32 tsf_high;
2613 __le32 beacon_timer;
2614 u8 channel;
2615 u8 band;
2616 u8 reserved[2];
2617 __le32 status;
2618} __packed;
2619
2620#define SCAN_OWNER_STATUS 0x1
2621#define MEASURE_OWNER_STATUS 0x2
2622
2623#define IWL_PROBE_STATUS_OK 0
2624#define IWL_PROBE_STATUS_TX_FAILED BIT(0)
2625/* error statuses combined with TX_FAILED */
2626#define IWL_PROBE_STATUS_FAIL_TTL BIT(1)
2627#define IWL_PROBE_STATUS_FAIL_BT BIT(2)
2628
2629#define NUMBER_OF_STATISTICS 1 /* first __le32 is good CRC */
2630/*
2631 * SCAN_RESULTS_NOTIFICATION = 0x83 (notification only, not a command)
2632 */
2633struct iwl_scanresults_notification {
2634 u8 channel;
2635 u8 band;
2636 u8 probe_status;
2637 u8 num_probe_not_sent; /* not enough time to send */
2638 __le32 tsf_low;
2639 __le32 tsf_high;
2640 __le32 statistics[NUMBER_OF_STATISTICS];
2641} __packed;
2642
2643/*
2644 * SCAN_COMPLETE_NOTIFICATION = 0x84 (notification only, not a command)
2645 */
2646struct iwl_scancomplete_notification {
2647 u8 scanned_channels;
2648 u8 status;
2649 u8 last_channel;
2650 __le32 tsf_low;
2651 __le32 tsf_high;
2652} __packed;
2653
2654
2655/******************************************************************************
2656 * (9)
2657 * IBSS/AP Commands and Notifications:
2658 *
2659 *****************************************************************************/
2660
2661enum iwl_ibss_manager {
2662 IWL_NOT_IBSS_MANAGER = 0,
2663 IWL_IBSS_MANAGER = 1,
2664};
2665
2666/*
2667 * BEACON_NOTIFICATION = 0x90 (notification only, not a command)
2668 */
2669
2670struct iwl3945_beacon_notif {
2671 struct iwl3945_tx_resp beacon_notify_hdr;
2672 __le32 low_tsf;
2673 __le32 high_tsf;
2674 __le32 ibss_mgr_status;
2675} __packed;
2676
2677struct iwl4965_beacon_notif {
2678 struct iwl4965_tx_resp beacon_notify_hdr;
2679 __le32 low_tsf;
2680 __le32 high_tsf;
2681 __le32 ibss_mgr_status;
2682} __packed;
2683
2684/*
2685 * REPLY_TX_BEACON = 0x91 (command, has simple generic response)
2686 */
2687
2688struct iwl3945_tx_beacon_cmd {
2689 struct iwl3945_tx_cmd tx;
2690 __le16 tim_idx;
2691 u8 tim_size;
2692 u8 reserved1;
2693 struct ieee80211_hdr frame[0]; /* beacon frame */
2694} __packed;
2695
2696struct iwl_tx_beacon_cmd {
2697 struct iwl_tx_cmd tx;
2698 __le16 tim_idx;
2699 u8 tim_size;
2700 u8 reserved1;
2701 struct ieee80211_hdr frame[0]; /* beacon frame */
2702} __packed;
2703
2704/******************************************************************************
2705 * (10)
2706 * Statistics Commands and Notifications:
2707 *
2708 *****************************************************************************/
2709
2710#define IWL_TEMP_CONVERT 260
2711
2712#define SUP_RATE_11A_MAX_NUM_CHANNELS 8
2713#define SUP_RATE_11B_MAX_NUM_CHANNELS 4
2714#define SUP_RATE_11G_MAX_NUM_CHANNELS 12
2715
2716/* Used for passing to driver number of successes and failures per rate */
2717struct rate_histogram {
2718 union {
2719 __le32 a[SUP_RATE_11A_MAX_NUM_CHANNELS];
2720 __le32 b[SUP_RATE_11B_MAX_NUM_CHANNELS];
2721 __le32 g[SUP_RATE_11G_MAX_NUM_CHANNELS];
2722 } success;
2723 union {
2724 __le32 a[SUP_RATE_11A_MAX_NUM_CHANNELS];
2725 __le32 b[SUP_RATE_11B_MAX_NUM_CHANNELS];
2726 __le32 g[SUP_RATE_11G_MAX_NUM_CHANNELS];
2727 } failed;
2728} __packed;
2729
2730/* statistics command response */
2731
2732struct iwl39_statistics_rx_phy {
2733 __le32 ina_cnt;
2734 __le32 fina_cnt;
2735 __le32 plcp_err;
2736 __le32 crc32_err;
2737 __le32 overrun_err;
2738 __le32 early_overrun_err;
2739 __le32 crc32_good;
2740 __le32 false_alarm_cnt;
2741 __le32 fina_sync_err_cnt;
2742 __le32 sfd_timeout;
2743 __le32 fina_timeout;
2744 __le32 unresponded_rts;
2745 __le32 rxe_frame_limit_overrun;
2746 __le32 sent_ack_cnt;
2747 __le32 sent_cts_cnt;
2748} __packed;
2749
2750struct iwl39_statistics_rx_non_phy {
2751 __le32 bogus_cts; /* CTS received when not expecting CTS */
2752 __le32 bogus_ack; /* ACK received when not expecting ACK */
2753 __le32 non_bssid_frames; /* number of frames with BSSID that
2754 * doesn't belong to the STA BSSID */
2755 __le32 filtered_frames; /* count frames that were dumped in the
2756 * filtering process */
2757 __le32 non_channel_beacons; /* beacons with our bss id but not on
2758 * our serving channel */
2759} __packed;
2760
2761struct iwl39_statistics_rx {
2762 struct iwl39_statistics_rx_phy ofdm;
2763 struct iwl39_statistics_rx_phy cck;
2764 struct iwl39_statistics_rx_non_phy general;
2765} __packed;
2766
2767struct iwl39_statistics_tx {
2768 __le32 preamble_cnt;
2769 __le32 rx_detected_cnt;
2770 __le32 bt_prio_defer_cnt;
2771 __le32 bt_prio_kill_cnt;
2772 __le32 few_bytes_cnt;
2773 __le32 cts_timeout;
2774 __le32 ack_timeout;
2775 __le32 expected_ack_cnt;
2776 __le32 actual_ack_cnt;
2777} __packed;
2778
2779struct statistics_dbg {
2780 __le32 burst_check;
2781 __le32 burst_count;
2782 __le32 wait_for_silence_timeout_cnt;
2783 __le32 reserved[3];
2784} __packed;
2785
2786struct iwl39_statistics_div {
2787 __le32 tx_on_a;
2788 __le32 tx_on_b;
2789 __le32 exec_time;
2790 __le32 probe_time;
2791} __packed;
2792
2793struct iwl39_statistics_general {
2794 __le32 temperature;
2795 struct statistics_dbg dbg;
2796 __le32 sleep_time;
2797 __le32 slots_out;
2798 __le32 slots_idle;
2799 __le32 ttl_timestamp;
2800 struct iwl39_statistics_div div;
2801} __packed;
2802
2803struct statistics_rx_phy {
2804 __le32 ina_cnt;
2805 __le32 fina_cnt;
2806 __le32 plcp_err;
2807 __le32 crc32_err;
2808 __le32 overrun_err;
2809 __le32 early_overrun_err;
2810 __le32 crc32_good;
2811 __le32 false_alarm_cnt;
2812 __le32 fina_sync_err_cnt;
2813 __le32 sfd_timeout;
2814 __le32 fina_timeout;
2815 __le32 unresponded_rts;
2816 __le32 rxe_frame_limit_overrun;
2817 __le32 sent_ack_cnt;
2818 __le32 sent_cts_cnt;
2819 __le32 sent_ba_rsp_cnt;
2820 __le32 dsp_self_kill;
2821 __le32 mh_format_err;
2822 __le32 re_acq_main_rssi_sum;
2823 __le32 reserved3;
2824} __packed;
2825
2826struct statistics_rx_ht_phy {
2827 __le32 plcp_err;
2828 __le32 overrun_err;
2829 __le32 early_overrun_err;
2830 __le32 crc32_good;
2831 __le32 crc32_err;
2832 __le32 mh_format_err;
2833 __le32 agg_crc32_good;
2834 __le32 agg_mpdu_cnt;
2835 __le32 agg_cnt;
2836 __le32 unsupport_mcs;
2837} __packed;
2838
2839#define INTERFERENCE_DATA_AVAILABLE cpu_to_le32(1)
2840
2841struct statistics_rx_non_phy {
2842 __le32 bogus_cts; /* CTS received when not expecting CTS */
2843 __le32 bogus_ack; /* ACK received when not expecting ACK */
2844 __le32 non_bssid_frames; /* number of frames with BSSID that
2845 * doesn't belong to the STA BSSID */
2846 __le32 filtered_frames; /* count frames that were dumped in the
2847 * filtering process */
2848 __le32 non_channel_beacons; /* beacons with our bss id but not on
2849 * our serving channel */
2850 __le32 channel_beacons; /* beacons with our bss id and in our
2851 * serving channel */
2852 __le32 num_missed_bcon; /* number of missed beacons */
2853 __le32 adc_rx_saturation_time; /* count in 0.8us units the time the
2854 * ADC was in saturation */
2855 __le32 ina_detection_search_time;/* total time (in 0.8us) searched
2856 * for INA */
2857 __le32 beacon_silence_rssi_a; /* RSSI silence after beacon frame */
2858 __le32 beacon_silence_rssi_b; /* RSSI silence after beacon frame */
2859 __le32 beacon_silence_rssi_c; /* RSSI silence after beacon frame */
2860 __le32 interference_data_flag; /* flag for interference data
2861 * availability. 1 when data is
2862 * available. */
2863 __le32 channel_load; /* counts RX Enable time in uSec */
2864 __le32 dsp_false_alarms; /* DSP false alarm (both OFDM
2865 * and CCK) counter */
2866 __le32 beacon_rssi_a;
2867 __le32 beacon_rssi_b;
2868 __le32 beacon_rssi_c;
2869 __le32 beacon_energy_a;
2870 __le32 beacon_energy_b;
2871 __le32 beacon_energy_c;
2872} __packed;
2873
2874struct statistics_rx {
2875 struct statistics_rx_phy ofdm;
2876 struct statistics_rx_phy cck;
2877 struct statistics_rx_non_phy general;
2878 struct statistics_rx_ht_phy ofdm_ht;
2879} __packed;
2880
2881/**
2882 * struct statistics_tx_power - current tx power
2883 *
2884 * @ant_a: current tx power on chain a in 1/2 dB step
2885 * @ant_b: current tx power on chain b in 1/2 dB step
2886 * @ant_c: current tx power on chain c in 1/2 dB step
2887 */
2888struct statistics_tx_power {
2889 u8 ant_a;
2890 u8 ant_b;
2891 u8 ant_c;
2892 u8 reserved;
2893} __packed;
2894
2895struct statistics_tx_non_phy_agg {
2896 __le32 ba_timeout;
2897 __le32 ba_reschedule_frames;
2898 __le32 scd_query_agg_frame_cnt;
2899 __le32 scd_query_no_agg;
2900 __le32 scd_query_agg;
2901 __le32 scd_query_mismatch;
2902 __le32 frame_not_ready;
2903 __le32 underrun;
2904 __le32 bt_prio_kill;
2905 __le32 rx_ba_rsp_cnt;
2906} __packed;
2907
2908struct statistics_tx {
2909 __le32 preamble_cnt;
2910 __le32 rx_detected_cnt;
2911 __le32 bt_prio_defer_cnt;
2912 __le32 bt_prio_kill_cnt;
2913 __le32 few_bytes_cnt;
2914 __le32 cts_timeout;
2915 __le32 ack_timeout;
2916 __le32 expected_ack_cnt;
2917 __le32 actual_ack_cnt;
2918 __le32 dump_msdu_cnt;
2919 __le32 burst_abort_next_frame_mismatch_cnt;
2920 __le32 burst_abort_missing_next_frame_cnt;
2921 __le32 cts_timeout_collision;
2922 __le32 ack_or_ba_timeout_collision;
2923 struct statistics_tx_non_phy_agg agg;
2924
2925 __le32 reserved1;
2926} __packed;
2927
2928
2929struct statistics_div {
2930 __le32 tx_on_a;
2931 __le32 tx_on_b;
2932 __le32 exec_time;
2933 __le32 probe_time;
2934 __le32 reserved1;
2935 __le32 reserved2;
2936} __packed;
2937
2938struct statistics_general_common {
2939 __le32 temperature; /* radio temperature */
2940 struct statistics_dbg dbg;
2941 __le32 sleep_time;
2942 __le32 slots_out;
2943 __le32 slots_idle;
2944 __le32 ttl_timestamp;
2945 struct statistics_div div;
2946 __le32 rx_enable_counter;
2947 /*
2948 * num_of_sos_states:
2949 * count the number of times we have to re-tune
2950 * in order to get out of bad PHY status
2951 */
2952 __le32 num_of_sos_states;
2953} __packed;
2954
2955struct statistics_general {
2956 struct statistics_general_common common;
2957 __le32 reserved2;
2958 __le32 reserved3;
2959} __packed;
2960
2961#define UCODE_STATISTICS_CLEAR_MSK (0x1 << 0)
2962#define UCODE_STATISTICS_FREQUENCY_MSK (0x1 << 1)
2963#define UCODE_STATISTICS_NARROW_BAND_MSK (0x1 << 2)
2964
2965/*
2966 * REPLY_STATISTICS_CMD = 0x9c,
2967 * all devices identical.
2968 *
2969 * This command triggers an immediate response containing uCode statistics.
2970 * The response is in the same format as STATISTICS_NOTIFICATION 0x9d, below.
2971 *
2972 * If the CLEAR_STATS configuration flag is set, uCode will clear its
2973 * internal copy of the statistics (counters) after issuing the response.
2974 * This flag does not affect STATISTICS_NOTIFICATIONs after beacons (see below).
2975 *
2976 * If the DISABLE_NOTIF configuration flag is set, uCode will not issue
2977 * STATISTICS_NOTIFICATIONs after received beacons (see below). This flag
2978 * does not affect the response to the REPLY_STATISTICS_CMD 0x9c itself.
2979 */
2980#define IWL_STATS_CONF_CLEAR_STATS cpu_to_le32(0x1) /* see above */
2981#define IWL_STATS_CONF_DISABLE_NOTIF cpu_to_le32(0x2)/* see above */
2982struct iwl_statistics_cmd {
2983 __le32 configuration_flags; /* IWL_STATS_CONF_* */
2984} __packed;
2985
2986/*
2987 * STATISTICS_NOTIFICATION = 0x9d (notification only, not a command)
2988 *
2989 * By default, uCode issues this notification after receiving a beacon
2990 * while associated. To disable this behavior, set DISABLE_NOTIF flag in the
2991 * REPLY_STATISTICS_CMD 0x9c, above.
2992 *
2993 * Statistics counters continue to increment beacon after beacon, but are
2994 * cleared when changing channels or when driver issues REPLY_STATISTICS_CMD
2995 * 0x9c with CLEAR_STATS bit set (see above).
2996 *
2997 * uCode also issues this notification during scans. uCode clears statistics
2998 * appropriately so that each notification contains statistics for only the
2999 * one channel that has just been scanned.
3000 */
3001#define STATISTICS_REPLY_FLG_BAND_24G_MSK cpu_to_le32(0x2)
3002#define STATISTICS_REPLY_FLG_HT40_MODE_MSK cpu_to_le32(0x8)
3003
3004struct iwl3945_notif_statistics {
3005 __le32 flag;
3006 struct iwl39_statistics_rx rx;
3007 struct iwl39_statistics_tx tx;
3008 struct iwl39_statistics_general general;
3009} __packed;
3010
3011struct iwl_notif_statistics {
3012 __le32 flag;
3013 struct statistics_rx rx;
3014 struct statistics_tx tx;
3015 struct statistics_general general;
3016} __packed;
3017
3018/*
3019 * MISSED_BEACONS_NOTIFICATION = 0xa2 (notification only, not a command)
3020 *
3021 * uCode send MISSED_BEACONS_NOTIFICATION to driver when detect beacon missed
3022 * in regardless of how many missed beacons, which mean when driver receive the
3023 * notification, inside the command, it can find all the beacons information
3024 * which include number of total missed beacons, number of consecutive missed
3025 * beacons, number of beacons received and number of beacons expected to
3026 * receive.
3027 *
3028 * If uCode detected consecutive_missed_beacons > 5, it will reset the radio
3029 * in order to bring the radio/PHY back to working state; which has no relation
3030 * to when driver will perform sensitivity calibration.
3031 *
3032 * Driver should set it own missed_beacon_threshold to decide when to perform
3033 * sensitivity calibration based on number of consecutive missed beacons in
3034 * order to improve overall performance, especially in noisy environment.
3035 *
3036 */
3037
3038#define IWL_MISSED_BEACON_THRESHOLD_MIN (1)
3039#define IWL_MISSED_BEACON_THRESHOLD_DEF (5)
3040#define IWL_MISSED_BEACON_THRESHOLD_MAX IWL_MISSED_BEACON_THRESHOLD_DEF
3041
3042struct iwl_missed_beacon_notif {
3043 __le32 consecutive_missed_beacons;
3044 __le32 total_missed_becons;
3045 __le32 num_expected_beacons;
3046 __le32 num_recvd_beacons;
3047} __packed;
3048
3049
3050/******************************************************************************
3051 * (11)
3052 * Rx Calibration Commands:
3053 *
3054 * With the uCode used for open source drivers, most Tx calibration (except
3055 * for Tx Power) and most Rx calibration is done by uCode during the
3056 * "initialize" phase of uCode boot. Driver must calibrate only:
3057 *
3058 * 1) Tx power (depends on temperature), described elsewhere
3059 * 2) Receiver gain balance (optimize MIMO, and detect disconnected antennas)
3060 * 3) Receiver sensitivity (to optimize signal detection)
3061 *
3062 *****************************************************************************/
3063
3064/**
3065 * SENSITIVITY_CMD = 0xa8 (command, has simple generic response)
3066 *
3067 * This command sets up the Rx signal detector for a sensitivity level that
3068 * is high enough to lock onto all signals within the associated network,
3069 * but low enough to ignore signals that are below a certain threshold, so as
3070 * not to have too many "false alarms". False alarms are signals that the
3071 * Rx DSP tries to lock onto, but then discards after determining that they
3072 * are noise.
3073 *
3074 * The optimum number of false alarms is between 5 and 50 per 200 TUs
3075 * (200 * 1024 uSecs, i.e. 204.8 milliseconds) of actual Rx time (i.e.
3076 * time listening, not transmitting). Driver must adjust sensitivity so that
3077 * the ratio of actual false alarms to actual Rx time falls within this range.
3078 *
3079 * While associated, uCode delivers STATISTICS_NOTIFICATIONs after each
3080 * received beacon. These provide information to the driver to analyze the
3081 * sensitivity. Don't analyze statistics that come in from scanning, or any
3082 * other non-associated-network source. Pertinent statistics include:
3083 *
3084 * From "general" statistics (struct statistics_rx_non_phy):
3085 *
3086 * (beacon_energy_[abc] & 0x0FF00) >> 8 (unsigned, higher value is lower level)
3087 * Measure of energy of desired signal. Used for establishing a level
3088 * below which the device does not detect signals.
3089 *
3090 * (beacon_silence_rssi_[abc] & 0x0FF00) >> 8 (unsigned, units in dB)
3091 * Measure of background noise in silent period after beacon.
3092 *
3093 * channel_load
3094 * uSecs of actual Rx time during beacon period (varies according to
3095 * how much time was spent transmitting).
3096 *
3097 * From "cck" and "ofdm" statistics (struct statistics_rx_phy), separately:
3098 *
3099 * false_alarm_cnt
3100 * Signal locks abandoned early (before phy-level header).
3101 *
3102 * plcp_err
3103 * Signal locks abandoned late (during phy-level header).
3104 *
3105 * NOTE: Both false_alarm_cnt and plcp_err increment monotonically from
3106 * beacon to beacon, i.e. each value is an accumulation of all errors
3107 * before and including the latest beacon. Values will wrap around to 0
3108 * after counting up to 2^32 - 1. Driver must differentiate vs.
3109 * previous beacon's values to determine # false alarms in the current
3110 * beacon period.
3111 *
3112 * Total number of false alarms = false_alarms + plcp_errs
3113 *
3114 * For OFDM, adjust the following table entries in struct iwl_sensitivity_cmd
3115 * (notice that the start points for OFDM are at or close to settings for
3116 * maximum sensitivity):
3117 *
3118 * START / MIN / MAX
3119 * HD_AUTO_CORR32_X1_TH_ADD_MIN_INDEX 90 / 85 / 120
3120 * HD_AUTO_CORR32_X1_TH_ADD_MIN_MRC_INDEX 170 / 170 / 210
3121 * HD_AUTO_CORR32_X4_TH_ADD_MIN_INDEX 105 / 105 / 140
3122 * HD_AUTO_CORR32_X4_TH_ADD_MIN_MRC_INDEX 220 / 220 / 270
3123 *
3124 * If actual rate of OFDM false alarms (+ plcp_errors) is too high
3125 * (greater than 50 for each 204.8 msecs listening), reduce sensitivity
3126 * by *adding* 1 to all 4 of the table entries above, up to the max for
3127 * each entry. Conversely, if false alarm rate is too low (less than 5
3128 * for each 204.8 msecs listening), *subtract* 1 from each entry to
3129 * increase sensitivity.
3130 *
3131 * For CCK sensitivity, keep track of the following:
3132 *
3133 * 1). 20-beacon history of maximum background noise, indicated by
3134 * (beacon_silence_rssi_[abc] & 0x0FF00), units in dB, across the
3135 * 3 receivers. For any given beacon, the "silence reference" is
3136 * the maximum of last 60 samples (20 beacons * 3 receivers).
3137 *
3138 * 2). 10-beacon history of strongest signal level, as indicated
3139 * by (beacon_energy_[abc] & 0x0FF00) >> 8, across the 3 receivers,
3140 * i.e. the strength of the signal through the best receiver at the
3141 * moment. These measurements are "upside down", with lower values
3142 * for stronger signals, so max energy will be *minimum* value.
3143 *
3144 * Then for any given beacon, the driver must determine the *weakest*
3145 * of the strongest signals; this is the minimum level that needs to be
3146 * successfully detected, when using the best receiver at the moment.
3147 * "Max cck energy" is the maximum (higher value means lower energy!)
3148 * of the last 10 minima. Once this is determined, driver must add
3149 * a little margin by adding "6" to it.
3150 *
3151 * 3). Number of consecutive beacon periods with too few false alarms.
3152 * Reset this to 0 at the first beacon period that falls within the
3153 * "good" range (5 to 50 false alarms per 204.8 milliseconds rx).
3154 *
3155 * Then, adjust the following CCK table entries in struct iwl_sensitivity_cmd
3156 * (notice that the start points for CCK are at maximum sensitivity):
3157 *
3158 * START / MIN / MAX
3159 * HD_AUTO_CORR40_X4_TH_ADD_MIN_INDEX 125 / 125 / 200
3160 * HD_AUTO_CORR40_X4_TH_ADD_MIN_MRC_INDEX 200 / 200 / 400
3161 * HD_MIN_ENERGY_CCK_DET_INDEX 100 / 0 / 100
3162 *
3163 * If actual rate of CCK false alarms (+ plcp_errors) is too high
3164 * (greater than 50 for each 204.8 msecs listening), method for reducing
3165 * sensitivity is:
3166 *
3167 * 1) *Add* 3 to value in HD_AUTO_CORR40_X4_TH_ADD_MIN_MRC_INDEX,
3168 * up to max 400.
3169 *
3170 * 2) If current value in HD_AUTO_CORR40_X4_TH_ADD_MIN_INDEX is < 160,
3171 * sensitivity has been reduced a significant amount; bring it up to
3172 * a moderate 161. Otherwise, *add* 3, up to max 200.
3173 *
3174 * 3) a) If current value in HD_AUTO_CORR40_X4_TH_ADD_MIN_INDEX is > 160,
3175 * sensitivity has been reduced only a moderate or small amount;
3176 * *subtract* 2 from value in HD_MIN_ENERGY_CCK_DET_INDEX,
3177 * down to min 0. Otherwise (if gain has been significantly reduced),
3178 * don't change the HD_MIN_ENERGY_CCK_DET_INDEX value.
3179 *
3180 * b) Save a snapshot of the "silence reference".
3181 *
3182 * If actual rate of CCK false alarms (+ plcp_errors) is too low
3183 * (less than 5 for each 204.8 msecs listening), method for increasing
3184 * sensitivity is used only if:
3185 *
3186 * 1a) Previous beacon did not have too many false alarms
3187 * 1b) AND difference between previous "silence reference" and current
3188 * "silence reference" (prev - current) is 2 or more,
3189 * OR 2) 100 or more consecutive beacon periods have had rate of
3190 * less than 5 false alarms per 204.8 milliseconds rx time.
3191 *
3192 * Method for increasing sensitivity:
3193 *
3194 * 1) *Subtract* 3 from value in HD_AUTO_CORR40_X4_TH_ADD_MIN_INDEX,
3195 * down to min 125.
3196 *
3197 * 2) *Subtract* 3 from value in HD_AUTO_CORR40_X4_TH_ADD_MIN_MRC_INDEX,
3198 * down to min 200.
3199 *
3200 * 3) *Add* 2 to value in HD_MIN_ENERGY_CCK_DET_INDEX, up to max 100.
3201 *
3202 * If actual rate of CCK false alarms (+ plcp_errors) is within good range
3203 * (between 5 and 50 for each 204.8 msecs listening):
3204 *
3205 * 1) Save a snapshot of the silence reference.
3206 *
3207 * 2) If previous beacon had too many CCK false alarms (+ plcp_errors),
3208 * give some extra margin to energy threshold by *subtracting* 8
3209 * from value in HD_MIN_ENERGY_CCK_DET_INDEX.
3210 *
3211 * For all cases (too few, too many, good range), make sure that the CCK
3212 * detection threshold (energy) is below the energy level for robust
3213 * detection over the past 10 beacon periods, the "Max cck energy".
3214 * Lower values mean higher energy; this means making sure that the value
3215 * in HD_MIN_ENERGY_CCK_DET_INDEX is at or *above* "Max cck energy".
3216 *
3217 */
3218
3219/*
3220 * Table entries in SENSITIVITY_CMD (struct iwl_sensitivity_cmd)
3221 */
3222#define HD_TABLE_SIZE (11) /* number of entries */
3223#define HD_MIN_ENERGY_CCK_DET_INDEX (0) /* table indexes */
3224#define HD_MIN_ENERGY_OFDM_DET_INDEX (1)
3225#define HD_AUTO_CORR32_X1_TH_ADD_MIN_INDEX (2)
3226#define HD_AUTO_CORR32_X1_TH_ADD_MIN_MRC_INDEX (3)
3227#define HD_AUTO_CORR40_X4_TH_ADD_MIN_MRC_INDEX (4)
3228#define HD_AUTO_CORR32_X4_TH_ADD_MIN_INDEX (5)
3229#define HD_AUTO_CORR32_X4_TH_ADD_MIN_MRC_INDEX (6)
3230#define HD_BARKER_CORR_TH_ADD_MIN_INDEX (7)
3231#define HD_BARKER_CORR_TH_ADD_MIN_MRC_INDEX (8)
3232#define HD_AUTO_CORR40_X4_TH_ADD_MIN_INDEX (9)
3233#define HD_OFDM_ENERGY_TH_IN_INDEX (10)
3234
3235/* Control field in struct iwl_sensitivity_cmd */
3236#define SENSITIVITY_CMD_CONTROL_DEFAULT_TABLE cpu_to_le16(0)
3237#define SENSITIVITY_CMD_CONTROL_WORK_TABLE cpu_to_le16(1)
3238
3239/**
3240 * struct iwl_sensitivity_cmd
3241 * @control: (1) updates working table, (0) updates default table
3242 * @table: energy threshold values, use HD_* as index into table
3243 *
3244 * Always use "1" in "control" to update uCode's working table and DSP.
3245 */
3246struct iwl_sensitivity_cmd {
3247 __le16 control; /* always use "1" */
3248 __le16 table[HD_TABLE_SIZE]; /* use HD_* as index */
3249} __packed;
3250
3251
3252/**
3253 * REPLY_PHY_CALIBRATION_CMD = 0xb0 (command, has simple generic response)
3254 *
3255 * This command sets the relative gains of 4965 device's 3 radio receiver chains.
3256 *
3257 * After the first association, driver should accumulate signal and noise
3258 * statistics from the STATISTICS_NOTIFICATIONs that follow the first 20
3259 * beacons from the associated network (don't collect statistics that come
3260 * in from scanning, or any other non-network source).
3261 *
3262 * DISCONNECTED ANTENNA:
3263 *
3264 * Driver should determine which antennas are actually connected, by comparing
3265 * average beacon signal levels for the 3 Rx chains. Accumulate (add) the
3266 * following values over 20 beacons, one accumulator for each of the chains
3267 * a/b/c, from struct statistics_rx_non_phy:
3268 *
3269 * beacon_rssi_[abc] & 0x0FF (unsigned, units in dB)
3270 *
3271 * Find the strongest signal from among a/b/c. Compare the other two to the
3272 * strongest. If any signal is more than 15 dB (times 20, unless you
3273 * divide the accumulated values by 20) below the strongest, the driver
3274 * considers that antenna to be disconnected, and should not try to use that
3275 * antenna/chain for Rx or Tx. If both A and B seem to be disconnected,
3276 * driver should declare the stronger one as connected, and attempt to use it
3277 * (A and B are the only 2 Tx chains!).
3278 *
3279 *
3280 * RX BALANCE:
3281 *
3282 * Driver should balance the 3 receivers (but just the ones that are connected
3283 * to antennas, see above) for gain, by comparing the average signal levels
3284 * detected during the silence after each beacon (background noise).
3285 * Accumulate (add) the following values over 20 beacons, one accumulator for
3286 * each of the chains a/b/c, from struct statistics_rx_non_phy:
3287 *
3288 * beacon_silence_rssi_[abc] & 0x0FF (unsigned, units in dB)
3289 *
3290 * Find the weakest background noise level from among a/b/c. This Rx chain
3291 * will be the reference, with 0 gain adjustment. Attenuate other channels by
3292 * finding noise difference:
3293 *
3294 * (accum_noise[i] - accum_noise[reference]) / 30
3295 *
3296 * The "30" adjusts the dB in the 20 accumulated samples to units of 1.5 dB.
3297 * For use in diff_gain_[abc] fields of struct iwl_calibration_cmd, the
3298 * driver should limit the difference results to a range of 0-3 (0-4.5 dB),
3299 * and set bit 2 to indicate "reduce gain". The value for the reference
3300 * (weakest) chain should be "0".
3301 *
3302 * diff_gain_[abc] bit fields:
3303 * 2: (1) reduce gain, (0) increase gain
3304 * 1-0: amount of gain, units of 1.5 dB
3305 */
3306
3307/* Phy calibration command for series */
3308/* The default calibrate table size if not specified by firmware */
3309#define IWL_DEFAULT_STANDARD_PHY_CALIBRATE_TBL_SIZE 18
3310enum {
3311 IWL_PHY_CALIBRATE_DIFF_GAIN_CMD = 7,
3312 IWL_MAX_STANDARD_PHY_CALIBRATE_TBL_SIZE = 19,
3313};
3314
3315#define IWL_MAX_PHY_CALIBRATE_TBL_SIZE (253)
3316
3317struct iwl_calib_hdr {
3318 u8 op_code;
3319 u8 first_group;
3320 u8 groups_num;
3321 u8 data_valid;
3322} __packed;
3323
3324/* IWL_PHY_CALIBRATE_DIFF_GAIN_CMD (7) */
3325struct iwl_calib_diff_gain_cmd {
3326 struct iwl_calib_hdr hdr;
3327 s8 diff_gain_a; /* see above */
3328 s8 diff_gain_b;
3329 s8 diff_gain_c;
3330 u8 reserved1;
3331} __packed;
3332
3333/******************************************************************************
3334 * (12)
3335 * Miscellaneous Commands:
3336 *
3337 *****************************************************************************/
3338
3339/*
3340 * LEDs Command & Response
3341 * REPLY_LEDS_CMD = 0x48 (command, has simple generic response)
3342 *
3343 * For each of 3 possible LEDs (Activity/Link/Tech, selected by "id" field),
3344 * this command turns it on or off, or sets up a periodic blinking cycle.
3345 */
3346struct iwl_led_cmd {
3347 __le32 interval; /* "interval" in uSec */
3348 u8 id; /* 1: Activity, 2: Link, 3: Tech */
3349 u8 off; /* # intervals off while blinking;
3350 * "0", with >0 "on" value, turns LED on */
3351 u8 on; /* # intervals on while blinking;
3352 * "0", regardless of "off", turns LED off */
3353 u8 reserved;
3354} __packed;
3355
3356
3357/******************************************************************************
3358 * (13)
3359 * Union of all expected notifications/responses:
3360 *
3361 *****************************************************************************/
3362
3363struct iwl_rx_packet {
3364 /*
3365 * The first 4 bytes of the RX frame header contain both the RX frame
3366 * size and some flags.
3367 * Bit fields:
3368 * 31: flag flush RB request
3369 * 30: flag ignore TC (terminal counter) request
3370 * 29: flag fast IRQ request
3371 * 28-14: Reserved
3372 * 13-00: RX frame size
3373 */
3374 __le32 len_n_flags;
3375 struct iwl_cmd_header hdr;
3376 union {
3377 struct iwl3945_rx_frame rx_frame;
3378 struct iwl3945_tx_resp tx_resp;
3379 struct iwl3945_beacon_notif beacon_status;
3380
3381 struct iwl_alive_resp alive_frame;
3382 struct iwl_spectrum_notification spectrum_notif;
3383 struct iwl_csa_notification csa_notif;
3384 struct iwl_error_resp err_resp;
3385 struct iwl_card_state_notif card_state_notif;
3386 struct iwl_add_sta_resp add_sta;
3387 struct iwl_rem_sta_resp rem_sta;
3388 struct iwl_sleep_notification sleep_notif;
3389 struct iwl_spectrum_resp spectrum;
3390 struct iwl_notif_statistics stats;
3391 struct iwl_compressed_ba_resp compressed_ba;
3392 struct iwl_missed_beacon_notif missed_beacon;
3393 __le32 status;
3394 u8 raw[0];
3395 } u;
3396} __packed;
3397
3398#endif /* __iwl_legacy_commands_h__ */
diff --git a/drivers/net/wireless/iwlegacy/iwl-core.c b/drivers/net/wireless/iwlegacy/iwl-core.c
new file mode 100644
index 00000000000..e5971fe9d16
--- /dev/null
+++ b/drivers/net/wireless/iwlegacy/iwl-core.c
@@ -0,0 +1,2659 @@
1/******************************************************************************
2 *
3 * GPL LICENSE SUMMARY
4 *
5 * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of version 2 of the GNU General Public License as
9 * published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful, but
12 * WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
19 * USA
20 *
21 * The full GNU General Public License is included in this distribution
22 * in the file called LICENSE.GPL.
23 *
24 * Contact Information:
25 * Intel Linux Wireless <ilw@linux.intel.com>
26 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
27 *****************************************************************************/
28
29#include <linux/kernel.h>
30#include <linux/module.h>
31#include <linux/etherdevice.h>
32#include <linux/sched.h>
33#include <linux/slab.h>
34#include <net/mac80211.h>
35
36#include "iwl-eeprom.h"
37#include "iwl-dev.h"
38#include "iwl-debug.h"
39#include "iwl-core.h"
40#include "iwl-io.h"
41#include "iwl-power.h"
42#include "iwl-sta.h"
43#include "iwl-helpers.h"
44
45
46MODULE_DESCRIPTION("iwl-legacy: common functions for 3945 and 4965");
47MODULE_VERSION(IWLWIFI_VERSION);
48MODULE_AUTHOR(DRV_COPYRIGHT " " DRV_AUTHOR);
49MODULE_LICENSE("GPL");
50
51/*
52 * set bt_coex_active to true, uCode will do kill/defer
53 * every time the priority line is asserted (BT is sending signals on the
54 * priority line in the PCIx).
55 * set bt_coex_active to false, uCode will ignore the BT activity and
56 * perform the normal operation
57 *
58 * User might experience transmit issue on some platform due to WiFi/BT
59 * co-exist problem. The possible behaviors are:
60 * Able to scan and finding all the available AP
61 * Not able to associate with any AP
62 * On those platforms, WiFi communication can be restored by set
63 * "bt_coex_active" module parameter to "false"
64 *
65 * default: bt_coex_active = true (BT_COEX_ENABLE)
66 */
67static bool bt_coex_active = true;
68module_param(bt_coex_active, bool, S_IRUGO);
69MODULE_PARM_DESC(bt_coex_active, "enable wifi/bluetooth co-exist");
70
71u32 iwlegacy_debug_level;
72EXPORT_SYMBOL(iwlegacy_debug_level);
73
74const u8 iwlegacy_bcast_addr[ETH_ALEN] = { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF };
75EXPORT_SYMBOL(iwlegacy_bcast_addr);
76
77
78/* This function both allocates and initializes hw and priv. */
79struct ieee80211_hw *iwl_legacy_alloc_all(struct iwl_cfg *cfg)
80{
81 struct iwl_priv *priv;
82 /* mac80211 allocates memory for this device instance, including
83 * space for this driver's private structure */
84 struct ieee80211_hw *hw;
85
86 hw = ieee80211_alloc_hw(sizeof(struct iwl_priv),
87 cfg->ops->ieee80211_ops);
88 if (hw == NULL) {
89 pr_err("%s: Can not allocate network device\n",
90 cfg->name);
91 goto out;
92 }
93
94 priv = hw->priv;
95 priv->hw = hw;
96
97out:
98 return hw;
99}
100EXPORT_SYMBOL(iwl_legacy_alloc_all);
101
102#define MAX_BIT_RATE_40_MHZ 150 /* Mbps */
103#define MAX_BIT_RATE_20_MHZ 72 /* Mbps */
104static void iwl_legacy_init_ht_hw_capab(const struct iwl_priv *priv,
105 struct ieee80211_sta_ht_cap *ht_info,
106 enum ieee80211_band band)
107{
108 u16 max_bit_rate = 0;
109 u8 rx_chains_num = priv->hw_params.rx_chains_num;
110 u8 tx_chains_num = priv->hw_params.tx_chains_num;
111
112 ht_info->cap = 0;
113 memset(&ht_info->mcs, 0, sizeof(ht_info->mcs));
114
115 ht_info->ht_supported = true;
116
117 ht_info->cap |= IEEE80211_HT_CAP_SGI_20;
118 max_bit_rate = MAX_BIT_RATE_20_MHZ;
119 if (priv->hw_params.ht40_channel & BIT(band)) {
120 ht_info->cap |= IEEE80211_HT_CAP_SUP_WIDTH_20_40;
121 ht_info->cap |= IEEE80211_HT_CAP_SGI_40;
122 ht_info->mcs.rx_mask[4] = 0x01;
123 max_bit_rate = MAX_BIT_RATE_40_MHZ;
124 }
125
126 if (priv->cfg->mod_params->amsdu_size_8K)
127 ht_info->cap |= IEEE80211_HT_CAP_MAX_AMSDU;
128
129 ht_info->ampdu_factor = CFG_HT_RX_AMPDU_FACTOR_DEF;
130 ht_info->ampdu_density = CFG_HT_MPDU_DENSITY_DEF;
131
132 ht_info->mcs.rx_mask[0] = 0xFF;
133 if (rx_chains_num >= 2)
134 ht_info->mcs.rx_mask[1] = 0xFF;
135 if (rx_chains_num >= 3)
136 ht_info->mcs.rx_mask[2] = 0xFF;
137
138 /* Highest supported Rx data rate */
139 max_bit_rate *= rx_chains_num;
140 WARN_ON(max_bit_rate & ~IEEE80211_HT_MCS_RX_HIGHEST_MASK);
141 ht_info->mcs.rx_highest = cpu_to_le16(max_bit_rate);
142
143 /* Tx MCS capabilities */
144 ht_info->mcs.tx_params = IEEE80211_HT_MCS_TX_DEFINED;
145 if (tx_chains_num != rx_chains_num) {
146 ht_info->mcs.tx_params |= IEEE80211_HT_MCS_TX_RX_DIFF;
147 ht_info->mcs.tx_params |= ((tx_chains_num - 1) <<
148 IEEE80211_HT_MCS_TX_MAX_STREAMS_SHIFT);
149 }
150}
151
152/**
153 * iwl_legacy_init_geos - Initialize mac80211's geo/channel info based from eeprom
154 */
155int iwl_legacy_init_geos(struct iwl_priv *priv)
156{
157 struct iwl_channel_info *ch;
158 struct ieee80211_supported_band *sband;
159 struct ieee80211_channel *channels;
160 struct ieee80211_channel *geo_ch;
161 struct ieee80211_rate *rates;
162 int i = 0;
163 s8 max_tx_power = 0;
164
165 if (priv->bands[IEEE80211_BAND_2GHZ].n_bitrates ||
166 priv->bands[IEEE80211_BAND_5GHZ].n_bitrates) {
167 IWL_DEBUG_INFO(priv, "Geography modes already initialized.\n");
168 set_bit(STATUS_GEO_CONFIGURED, &priv->status);
169 return 0;
170 }
171
172 channels = kzalloc(sizeof(struct ieee80211_channel) *
173 priv->channel_count, GFP_KERNEL);
174 if (!channels)
175 return -ENOMEM;
176
177 rates = kzalloc((sizeof(struct ieee80211_rate) * IWL_RATE_COUNT_LEGACY),
178 GFP_KERNEL);
179 if (!rates) {
180 kfree(channels);
181 return -ENOMEM;
182 }
183
184 /* 5.2GHz channels start after the 2.4GHz channels */
185 sband = &priv->bands[IEEE80211_BAND_5GHZ];
186 sband->channels = &channels[ARRAY_SIZE(iwlegacy_eeprom_band_1)];
187 /* just OFDM */
188 sband->bitrates = &rates[IWL_FIRST_OFDM_RATE];
189 sband->n_bitrates = IWL_RATE_COUNT_LEGACY - IWL_FIRST_OFDM_RATE;
190
191 if (priv->cfg->sku & IWL_SKU_N)
192 iwl_legacy_init_ht_hw_capab(priv, &sband->ht_cap,
193 IEEE80211_BAND_5GHZ);
194
195 sband = &priv->bands[IEEE80211_BAND_2GHZ];
196 sband->channels = channels;
197 /* OFDM & CCK */
198 sband->bitrates = rates;
199 sband->n_bitrates = IWL_RATE_COUNT_LEGACY;
200
201 if (priv->cfg->sku & IWL_SKU_N)
202 iwl_legacy_init_ht_hw_capab(priv, &sband->ht_cap,
203 IEEE80211_BAND_2GHZ);
204
205 priv->ieee_channels = channels;
206 priv->ieee_rates = rates;
207
208 for (i = 0; i < priv->channel_count; i++) {
209 ch = &priv->channel_info[i];
210
211 if (!iwl_legacy_is_channel_valid(ch))
212 continue;
213
214 sband = &priv->bands[ch->band];
215
216 geo_ch = &sband->channels[sband->n_channels++];
217
218 geo_ch->center_freq =
219 ieee80211_channel_to_frequency(ch->channel, ch->band);
220 geo_ch->max_power = ch->max_power_avg;
221 geo_ch->max_antenna_gain = 0xff;
222 geo_ch->hw_value = ch->channel;
223
224 if (iwl_legacy_is_channel_valid(ch)) {
225 if (!(ch->flags & EEPROM_CHANNEL_IBSS))
226 geo_ch->flags |= IEEE80211_CHAN_NO_IBSS;
227
228 if (!(ch->flags & EEPROM_CHANNEL_ACTIVE))
229 geo_ch->flags |= IEEE80211_CHAN_PASSIVE_SCAN;
230
231 if (ch->flags & EEPROM_CHANNEL_RADAR)
232 geo_ch->flags |= IEEE80211_CHAN_RADAR;
233
234 geo_ch->flags |= ch->ht40_extension_channel;
235
236 if (ch->max_power_avg > max_tx_power)
237 max_tx_power = ch->max_power_avg;
238 } else {
239 geo_ch->flags |= IEEE80211_CHAN_DISABLED;
240 }
241
242 IWL_DEBUG_INFO(priv, "Channel %d Freq=%d[%sGHz] %s flag=0x%X\n",
243 ch->channel, geo_ch->center_freq,
244 iwl_legacy_is_channel_a_band(ch) ? "5.2" : "2.4",
245 geo_ch->flags & IEEE80211_CHAN_DISABLED ?
246 "restricted" : "valid",
247 geo_ch->flags);
248 }
249
250 priv->tx_power_device_lmt = max_tx_power;
251 priv->tx_power_user_lmt = max_tx_power;
252 priv->tx_power_next = max_tx_power;
253
254 if ((priv->bands[IEEE80211_BAND_5GHZ].n_channels == 0) &&
255 priv->cfg->sku & IWL_SKU_A) {
256 IWL_INFO(priv, "Incorrectly detected BG card as ABG. "
257 "Please send your PCI ID 0x%04X:0x%04X to maintainer.\n",
258 priv->pci_dev->device,
259 priv->pci_dev->subsystem_device);
260 priv->cfg->sku &= ~IWL_SKU_A;
261 }
262
263 IWL_INFO(priv, "Tunable channels: %d 802.11bg, %d 802.11a channels\n",
264 priv->bands[IEEE80211_BAND_2GHZ].n_channels,
265 priv->bands[IEEE80211_BAND_5GHZ].n_channels);
266
267 set_bit(STATUS_GEO_CONFIGURED, &priv->status);
268
269 return 0;
270}
271EXPORT_SYMBOL(iwl_legacy_init_geos);
272
273/*
274 * iwl_legacy_free_geos - undo allocations in iwl_legacy_init_geos
275 */
276void iwl_legacy_free_geos(struct iwl_priv *priv)
277{
278 kfree(priv->ieee_channels);
279 kfree(priv->ieee_rates);
280 clear_bit(STATUS_GEO_CONFIGURED, &priv->status);
281}
282EXPORT_SYMBOL(iwl_legacy_free_geos);
283
284static bool iwl_legacy_is_channel_extension(struct iwl_priv *priv,
285 enum ieee80211_band band,
286 u16 channel, u8 extension_chan_offset)
287{
288 const struct iwl_channel_info *ch_info;
289
290 ch_info = iwl_legacy_get_channel_info(priv, band, channel);
291 if (!iwl_legacy_is_channel_valid(ch_info))
292 return false;
293
294 if (extension_chan_offset == IEEE80211_HT_PARAM_CHA_SEC_ABOVE)
295 return !(ch_info->ht40_extension_channel &
296 IEEE80211_CHAN_NO_HT40PLUS);
297 else if (extension_chan_offset == IEEE80211_HT_PARAM_CHA_SEC_BELOW)
298 return !(ch_info->ht40_extension_channel &
299 IEEE80211_CHAN_NO_HT40MINUS);
300
301 return false;
302}
303
304bool iwl_legacy_is_ht40_tx_allowed(struct iwl_priv *priv,
305 struct iwl_rxon_context *ctx,
306 struct ieee80211_sta_ht_cap *ht_cap)
307{
308 if (!ctx->ht.enabled || !ctx->ht.is_40mhz)
309 return false;
310
311 /*
312 * We do not check for IEEE80211_HT_CAP_SUP_WIDTH_20_40
313 * the bit will not set if it is pure 40MHz case
314 */
315 if (ht_cap && !ht_cap->ht_supported)
316 return false;
317
318#ifdef CONFIG_IWLWIFI_LEGACY_DEBUGFS
319 if (priv->disable_ht40)
320 return false;
321#endif
322
323 return iwl_legacy_is_channel_extension(priv, priv->band,
324 le16_to_cpu(ctx->staging.channel),
325 ctx->ht.extension_chan_offset);
326}
327EXPORT_SYMBOL(iwl_legacy_is_ht40_tx_allowed);
328
329static u16 iwl_legacy_adjust_beacon_interval(u16 beacon_val, u16 max_beacon_val)
330{
331 u16 new_val;
332 u16 beacon_factor;
333
334 /*
335 * If mac80211 hasn't given us a beacon interval, program
336 * the default into the device.
337 */
338 if (!beacon_val)
339 return DEFAULT_BEACON_INTERVAL;
340
341 /*
342 * If the beacon interval we obtained from the peer
343 * is too large, we'll have to wake up more often
344 * (and in IBSS case, we'll beacon too much)
345 *
346 * For example, if max_beacon_val is 4096, and the
347 * requested beacon interval is 7000, we'll have to
348 * use 3500 to be able to wake up on the beacons.
349 *
350 * This could badly influence beacon detection stats.
351 */
352
353 beacon_factor = (beacon_val + max_beacon_val) / max_beacon_val;
354 new_val = beacon_val / beacon_factor;
355
356 if (!new_val)
357 new_val = max_beacon_val;
358
359 return new_val;
360}
361
362int
363iwl_legacy_send_rxon_timing(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
364{
365 u64 tsf;
366 s32 interval_tm, rem;
367 struct ieee80211_conf *conf = NULL;
368 u16 beacon_int;
369 struct ieee80211_vif *vif = ctx->vif;
370
371 conf = iwl_legacy_ieee80211_get_hw_conf(priv->hw);
372
373 lockdep_assert_held(&priv->mutex);
374
375 memset(&ctx->timing, 0, sizeof(struct iwl_rxon_time_cmd));
376
377 ctx->timing.timestamp = cpu_to_le64(priv->timestamp);
378 ctx->timing.listen_interval = cpu_to_le16(conf->listen_interval);
379
380 beacon_int = vif ? vif->bss_conf.beacon_int : 0;
381
382 /*
383 * TODO: For IBSS we need to get atim_window from mac80211,
384 * for now just always use 0
385 */
386 ctx->timing.atim_window = 0;
387
388 beacon_int = iwl_legacy_adjust_beacon_interval(beacon_int,
389 priv->hw_params.max_beacon_itrvl * TIME_UNIT);
390 ctx->timing.beacon_interval = cpu_to_le16(beacon_int);
391
392 tsf = priv->timestamp; /* tsf is modifed by do_div: copy it */
393 interval_tm = beacon_int * TIME_UNIT;
394 rem = do_div(tsf, interval_tm);
395 ctx->timing.beacon_init_val = cpu_to_le32(interval_tm - rem);
396
397 ctx->timing.dtim_period = vif ? (vif->bss_conf.dtim_period ?: 1) : 1;
398
399 IWL_DEBUG_ASSOC(priv,
400 "beacon interval %d beacon timer %d beacon tim %d\n",
401 le16_to_cpu(ctx->timing.beacon_interval),
402 le32_to_cpu(ctx->timing.beacon_init_val),
403 le16_to_cpu(ctx->timing.atim_window));
404
405 return iwl_legacy_send_cmd_pdu(priv, ctx->rxon_timing_cmd,
406 sizeof(ctx->timing), &ctx->timing);
407}
408EXPORT_SYMBOL(iwl_legacy_send_rxon_timing);
409
410void
411iwl_legacy_set_rxon_hwcrypto(struct iwl_priv *priv,
412 struct iwl_rxon_context *ctx,
413 int hw_decrypt)
414{
415 struct iwl_legacy_rxon_cmd *rxon = &ctx->staging;
416
417 if (hw_decrypt)
418 rxon->filter_flags &= ~RXON_FILTER_DIS_DECRYPT_MSK;
419 else
420 rxon->filter_flags |= RXON_FILTER_DIS_DECRYPT_MSK;
421
422}
423EXPORT_SYMBOL(iwl_legacy_set_rxon_hwcrypto);
424
425/* validate RXON structure is valid */
426int
427iwl_legacy_check_rxon_cmd(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
428{
429 struct iwl_legacy_rxon_cmd *rxon = &ctx->staging;
430 bool error = false;
431
432 if (rxon->flags & RXON_FLG_BAND_24G_MSK) {
433 if (rxon->flags & RXON_FLG_TGJ_NARROW_BAND_MSK) {
434 IWL_WARN(priv, "check 2.4G: wrong narrow\n");
435 error = true;
436 }
437 if (rxon->flags & RXON_FLG_RADAR_DETECT_MSK) {
438 IWL_WARN(priv, "check 2.4G: wrong radar\n");
439 error = true;
440 }
441 } else {
442 if (!(rxon->flags & RXON_FLG_SHORT_SLOT_MSK)) {
443 IWL_WARN(priv, "check 5.2G: not short slot!\n");
444 error = true;
445 }
446 if (rxon->flags & RXON_FLG_CCK_MSK) {
447 IWL_WARN(priv, "check 5.2G: CCK!\n");
448 error = true;
449 }
450 }
451 if ((rxon->node_addr[0] | rxon->bssid_addr[0]) & 0x1) {
452 IWL_WARN(priv, "mac/bssid mcast!\n");
453 error = true;
454 }
455
456 /* make sure basic rates 6Mbps and 1Mbps are supported */
457 if ((rxon->ofdm_basic_rates & IWL_RATE_6M_MASK) == 0 &&
458 (rxon->cck_basic_rates & IWL_RATE_1M_MASK) == 0) {
459 IWL_WARN(priv, "neither 1 nor 6 are basic\n");
460 error = true;
461 }
462
463 if (le16_to_cpu(rxon->assoc_id) > 2007) {
464 IWL_WARN(priv, "aid > 2007\n");
465 error = true;
466 }
467
468 if ((rxon->flags & (RXON_FLG_CCK_MSK | RXON_FLG_SHORT_SLOT_MSK))
469 == (RXON_FLG_CCK_MSK | RXON_FLG_SHORT_SLOT_MSK)) {
470 IWL_WARN(priv, "CCK and short slot\n");
471 error = true;
472 }
473
474 if ((rxon->flags & (RXON_FLG_CCK_MSK | RXON_FLG_AUTO_DETECT_MSK))
475 == (RXON_FLG_CCK_MSK | RXON_FLG_AUTO_DETECT_MSK)) {
476 IWL_WARN(priv, "CCK and auto detect");
477 error = true;
478 }
479
480 if ((rxon->flags & (RXON_FLG_AUTO_DETECT_MSK |
481 RXON_FLG_TGG_PROTECT_MSK)) ==
482 RXON_FLG_TGG_PROTECT_MSK) {
483 IWL_WARN(priv, "TGg but no auto-detect\n");
484 error = true;
485 }
486
487 if (error)
488 IWL_WARN(priv, "Tuning to channel %d\n",
489 le16_to_cpu(rxon->channel));
490
491 if (error) {
492 IWL_ERR(priv, "Invalid RXON\n");
493 return -EINVAL;
494 }
495 return 0;
496}
497EXPORT_SYMBOL(iwl_legacy_check_rxon_cmd);
498
499/**
500 * iwl_legacy_full_rxon_required - check if full RXON (vs RXON_ASSOC) cmd is needed
501 * @priv: staging_rxon is compared to active_rxon
502 *
503 * If the RXON structure is changing enough to require a new tune,
504 * or is clearing the RXON_FILTER_ASSOC_MSK, then return 1 to indicate that
505 * a new tune (full RXON command, rather than RXON_ASSOC cmd) is required.
506 */
507int iwl_legacy_full_rxon_required(struct iwl_priv *priv,
508 struct iwl_rxon_context *ctx)
509{
510 const struct iwl_legacy_rxon_cmd *staging = &ctx->staging;
511 const struct iwl_legacy_rxon_cmd *active = &ctx->active;
512
513#define CHK(cond) \
514 if ((cond)) { \
515 IWL_DEBUG_INFO(priv, "need full RXON - " #cond "\n"); \
516 return 1; \
517 }
518
519#define CHK_NEQ(c1, c2) \
520 if ((c1) != (c2)) { \
521 IWL_DEBUG_INFO(priv, "need full RXON - " \
522 #c1 " != " #c2 " - %d != %d\n", \
523 (c1), (c2)); \
524 return 1; \
525 }
526
527 /* These items are only settable from the full RXON command */
528 CHK(!iwl_legacy_is_associated_ctx(ctx));
529 CHK(compare_ether_addr(staging->bssid_addr, active->bssid_addr));
530 CHK(compare_ether_addr(staging->node_addr, active->node_addr));
531 CHK(compare_ether_addr(staging->wlap_bssid_addr,
532 active->wlap_bssid_addr));
533 CHK_NEQ(staging->dev_type, active->dev_type);
534 CHK_NEQ(staging->channel, active->channel);
535 CHK_NEQ(staging->air_propagation, active->air_propagation);
536 CHK_NEQ(staging->ofdm_ht_single_stream_basic_rates,
537 active->ofdm_ht_single_stream_basic_rates);
538 CHK_NEQ(staging->ofdm_ht_dual_stream_basic_rates,
539 active->ofdm_ht_dual_stream_basic_rates);
540 CHK_NEQ(staging->assoc_id, active->assoc_id);
541
542 /* flags, filter_flags, ofdm_basic_rates, and cck_basic_rates can
543 * be updated with the RXON_ASSOC command -- however only some
544 * flag transitions are allowed using RXON_ASSOC */
545
546 /* Check if we are not switching bands */
547 CHK_NEQ(staging->flags & RXON_FLG_BAND_24G_MSK,
548 active->flags & RXON_FLG_BAND_24G_MSK);
549
550 /* Check if we are switching association toggle */
551 CHK_NEQ(staging->filter_flags & RXON_FILTER_ASSOC_MSK,
552 active->filter_flags & RXON_FILTER_ASSOC_MSK);
553
554#undef CHK
555#undef CHK_NEQ
556
557 return 0;
558}
559EXPORT_SYMBOL(iwl_legacy_full_rxon_required);
560
561u8 iwl_legacy_get_lowest_plcp(struct iwl_priv *priv,
562 struct iwl_rxon_context *ctx)
563{
564 /*
565 * Assign the lowest rate -- should really get this from
566 * the beacon skb from mac80211.
567 */
568 if (ctx->staging.flags & RXON_FLG_BAND_24G_MSK)
569 return IWL_RATE_1M_PLCP;
570 else
571 return IWL_RATE_6M_PLCP;
572}
573EXPORT_SYMBOL(iwl_legacy_get_lowest_plcp);
574
575static void _iwl_legacy_set_rxon_ht(struct iwl_priv *priv,
576 struct iwl_ht_config *ht_conf,
577 struct iwl_rxon_context *ctx)
578{
579 struct iwl_legacy_rxon_cmd *rxon = &ctx->staging;
580
581 if (!ctx->ht.enabled) {
582 rxon->flags &= ~(RXON_FLG_CHANNEL_MODE_MSK |
583 RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK |
584 RXON_FLG_HT40_PROT_MSK |
585 RXON_FLG_HT_PROT_MSK);
586 return;
587 }
588
589 rxon->flags |= cpu_to_le32(ctx->ht.protection <<
590 RXON_FLG_HT_OPERATING_MODE_POS);
591
592 /* Set up channel bandwidth:
593 * 20 MHz only, 20/40 mixed or pure 40 if ht40 ok */
594 /* clear the HT channel mode before set the mode */
595 rxon->flags &= ~(RXON_FLG_CHANNEL_MODE_MSK |
596 RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK);
597 if (iwl_legacy_is_ht40_tx_allowed(priv, ctx, NULL)) {
598 /* pure ht40 */
599 if (ctx->ht.protection ==
600 IEEE80211_HT_OP_MODE_PROTECTION_20MHZ) {
601 rxon->flags |= RXON_FLG_CHANNEL_MODE_PURE_40;
602 /* Note: control channel is opposite of extension channel */
603 switch (ctx->ht.extension_chan_offset) {
604 case IEEE80211_HT_PARAM_CHA_SEC_ABOVE:
605 rxon->flags &=
606 ~RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK;
607 break;
608 case IEEE80211_HT_PARAM_CHA_SEC_BELOW:
609 rxon->flags |=
610 RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK;
611 break;
612 }
613 } else {
614 /* Note: control channel is opposite of extension channel */
615 switch (ctx->ht.extension_chan_offset) {
616 case IEEE80211_HT_PARAM_CHA_SEC_ABOVE:
617 rxon->flags &=
618 ~(RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK);
619 rxon->flags |= RXON_FLG_CHANNEL_MODE_MIXED;
620 break;
621 case IEEE80211_HT_PARAM_CHA_SEC_BELOW:
622 rxon->flags |=
623 RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK;
624 rxon->flags |= RXON_FLG_CHANNEL_MODE_MIXED;
625 break;
626 case IEEE80211_HT_PARAM_CHA_SEC_NONE:
627 default:
628 /* channel location only valid if in Mixed mode */
629 IWL_ERR(priv,
630 "invalid extension channel offset\n");
631 break;
632 }
633 }
634 } else {
635 rxon->flags |= RXON_FLG_CHANNEL_MODE_LEGACY;
636 }
637
638 if (priv->cfg->ops->hcmd->set_rxon_chain)
639 priv->cfg->ops->hcmd->set_rxon_chain(priv, ctx);
640
641 IWL_DEBUG_ASSOC(priv, "rxon flags 0x%X operation mode :0x%X "
642 "extension channel offset 0x%x\n",
643 le32_to_cpu(rxon->flags), ctx->ht.protection,
644 ctx->ht.extension_chan_offset);
645}
646
647void iwl_legacy_set_rxon_ht(struct iwl_priv *priv, struct iwl_ht_config *ht_conf)
648{
649 struct iwl_rxon_context *ctx;
650
651 for_each_context(priv, ctx)
652 _iwl_legacy_set_rxon_ht(priv, ht_conf, ctx);
653}
654EXPORT_SYMBOL(iwl_legacy_set_rxon_ht);
655
656/* Return valid, unused, channel for a passive scan to reset the RF */
657u8 iwl_legacy_get_single_channel_number(struct iwl_priv *priv,
658 enum ieee80211_band band)
659{
660 const struct iwl_channel_info *ch_info;
661 int i;
662 u8 channel = 0;
663 u8 min, max;
664 struct iwl_rxon_context *ctx;
665
666 if (band == IEEE80211_BAND_5GHZ) {
667 min = 14;
668 max = priv->channel_count;
669 } else {
670 min = 0;
671 max = 14;
672 }
673
674 for (i = min; i < max; i++) {
675 bool busy = false;
676
677 for_each_context(priv, ctx) {
678 busy = priv->channel_info[i].channel ==
679 le16_to_cpu(ctx->staging.channel);
680 if (busy)
681 break;
682 }
683
684 if (busy)
685 continue;
686
687 channel = priv->channel_info[i].channel;
688 ch_info = iwl_legacy_get_channel_info(priv, band, channel);
689 if (iwl_legacy_is_channel_valid(ch_info))
690 break;
691 }
692
693 return channel;
694}
695EXPORT_SYMBOL(iwl_legacy_get_single_channel_number);
696
697/**
698 * iwl_legacy_set_rxon_channel - Set the band and channel values in staging RXON
699 * @ch: requested channel as a pointer to struct ieee80211_channel
700
701 * NOTE: Does not commit to the hardware; it sets appropriate bit fields
702 * in the staging RXON flag structure based on the ch->band
703 */
704int
705iwl_legacy_set_rxon_channel(struct iwl_priv *priv, struct ieee80211_channel *ch,
706 struct iwl_rxon_context *ctx)
707{
708 enum ieee80211_band band = ch->band;
709 u16 channel = ch->hw_value;
710
711 if ((le16_to_cpu(ctx->staging.channel) == channel) &&
712 (priv->band == band))
713 return 0;
714
715 ctx->staging.channel = cpu_to_le16(channel);
716 if (band == IEEE80211_BAND_5GHZ)
717 ctx->staging.flags &= ~RXON_FLG_BAND_24G_MSK;
718 else
719 ctx->staging.flags |= RXON_FLG_BAND_24G_MSK;
720
721 priv->band = band;
722
723 IWL_DEBUG_INFO(priv, "Staging channel set to %d [%d]\n", channel, band);
724
725 return 0;
726}
727EXPORT_SYMBOL(iwl_legacy_set_rxon_channel);
728
729void iwl_legacy_set_flags_for_band(struct iwl_priv *priv,
730 struct iwl_rxon_context *ctx,
731 enum ieee80211_band band,
732 struct ieee80211_vif *vif)
733{
734 if (band == IEEE80211_BAND_5GHZ) {
735 ctx->staging.flags &=
736 ~(RXON_FLG_BAND_24G_MSK | RXON_FLG_AUTO_DETECT_MSK
737 | RXON_FLG_CCK_MSK);
738 ctx->staging.flags |= RXON_FLG_SHORT_SLOT_MSK;
739 } else {
740 /* Copied from iwl_post_associate() */
741 if (vif && vif->bss_conf.use_short_slot)
742 ctx->staging.flags |= RXON_FLG_SHORT_SLOT_MSK;
743 else
744 ctx->staging.flags &= ~RXON_FLG_SHORT_SLOT_MSK;
745
746 ctx->staging.flags |= RXON_FLG_BAND_24G_MSK;
747 ctx->staging.flags |= RXON_FLG_AUTO_DETECT_MSK;
748 ctx->staging.flags &= ~RXON_FLG_CCK_MSK;
749 }
750}
751EXPORT_SYMBOL(iwl_legacy_set_flags_for_band);
752
753/*
754 * initialize rxon structure with default values from eeprom
755 */
756void iwl_legacy_connection_init_rx_config(struct iwl_priv *priv,
757 struct iwl_rxon_context *ctx)
758{
759 const struct iwl_channel_info *ch_info;
760
761 memset(&ctx->staging, 0, sizeof(ctx->staging));
762
763 if (!ctx->vif) {
764 ctx->staging.dev_type = ctx->unused_devtype;
765 } else
766 switch (ctx->vif->type) {
767
768 case NL80211_IFTYPE_STATION:
769 ctx->staging.dev_type = ctx->station_devtype;
770 ctx->staging.filter_flags = RXON_FILTER_ACCEPT_GRP_MSK;
771 break;
772
773 case NL80211_IFTYPE_ADHOC:
774 ctx->staging.dev_type = ctx->ibss_devtype;
775 ctx->staging.flags = RXON_FLG_SHORT_PREAMBLE_MSK;
776 ctx->staging.filter_flags = RXON_FILTER_BCON_AWARE_MSK |
777 RXON_FILTER_ACCEPT_GRP_MSK;
778 break;
779
780 default:
781 IWL_ERR(priv, "Unsupported interface type %d\n",
782 ctx->vif->type);
783 break;
784 }
785
786#if 0
787 /* TODO: Figure out when short_preamble would be set and cache from
788 * that */
789 if (!hw_to_local(priv->hw)->short_preamble)
790 ctx->staging.flags &= ~RXON_FLG_SHORT_PREAMBLE_MSK;
791 else
792 ctx->staging.flags |= RXON_FLG_SHORT_PREAMBLE_MSK;
793#endif
794
795 ch_info = iwl_legacy_get_channel_info(priv, priv->band,
796 le16_to_cpu(ctx->active.channel));
797
798 if (!ch_info)
799 ch_info = &priv->channel_info[0];
800
801 ctx->staging.channel = cpu_to_le16(ch_info->channel);
802 priv->band = ch_info->band;
803
804 iwl_legacy_set_flags_for_band(priv, ctx, priv->band, ctx->vif);
805
806 ctx->staging.ofdm_basic_rates =
807 (IWL_OFDM_RATES_MASK >> IWL_FIRST_OFDM_RATE) & 0xFF;
808 ctx->staging.cck_basic_rates =
809 (IWL_CCK_RATES_MASK >> IWL_FIRST_CCK_RATE) & 0xF;
810
811 /* clear both MIX and PURE40 mode flag */
812 ctx->staging.flags &= ~(RXON_FLG_CHANNEL_MODE_MIXED |
813 RXON_FLG_CHANNEL_MODE_PURE_40);
814 if (ctx->vif)
815 memcpy(ctx->staging.node_addr, ctx->vif->addr, ETH_ALEN);
816
817 ctx->staging.ofdm_ht_single_stream_basic_rates = 0xff;
818 ctx->staging.ofdm_ht_dual_stream_basic_rates = 0xff;
819}
820EXPORT_SYMBOL(iwl_legacy_connection_init_rx_config);
821
822void iwl_legacy_set_rate(struct iwl_priv *priv)
823{
824 const struct ieee80211_supported_band *hw = NULL;
825 struct ieee80211_rate *rate;
826 struct iwl_rxon_context *ctx;
827 int i;
828
829 hw = iwl_get_hw_mode(priv, priv->band);
830 if (!hw) {
831 IWL_ERR(priv, "Failed to set rate: unable to get hw mode\n");
832 return;
833 }
834
835 priv->active_rate = 0;
836
837 for (i = 0; i < hw->n_bitrates; i++) {
838 rate = &(hw->bitrates[i]);
839 if (rate->hw_value < IWL_RATE_COUNT_LEGACY)
840 priv->active_rate |= (1 << rate->hw_value);
841 }
842
843 IWL_DEBUG_RATE(priv, "Set active_rate = %0x\n", priv->active_rate);
844
845 for_each_context(priv, ctx) {
846 ctx->staging.cck_basic_rates =
847 (IWL_CCK_BASIC_RATES_MASK >> IWL_FIRST_CCK_RATE) & 0xF;
848
849 ctx->staging.ofdm_basic_rates =
850 (IWL_OFDM_BASIC_RATES_MASK >> IWL_FIRST_OFDM_RATE) & 0xFF;
851 }
852}
853EXPORT_SYMBOL(iwl_legacy_set_rate);
854
855void iwl_legacy_chswitch_done(struct iwl_priv *priv, bool is_success)
856{
857 struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
858
859 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
860 return;
861
862 if (test_and_clear_bit(STATUS_CHANNEL_SWITCH_PENDING, &priv->status))
863 ieee80211_chswitch_done(ctx->vif, is_success);
864}
865EXPORT_SYMBOL(iwl_legacy_chswitch_done);
866
867void iwl_legacy_rx_csa(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb)
868{
869 struct iwl_rx_packet *pkt = rxb_addr(rxb);
870 struct iwl_csa_notification *csa = &(pkt->u.csa_notif);
871
872 struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
873 struct iwl_legacy_rxon_cmd *rxon = (void *)&ctx->active;
874
875 if (!test_bit(STATUS_CHANNEL_SWITCH_PENDING, &priv->status))
876 return;
877
878 if (!le32_to_cpu(csa->status) && csa->channel == priv->switch_channel) {
879 rxon->channel = csa->channel;
880 ctx->staging.channel = csa->channel;
881 IWL_DEBUG_11H(priv, "CSA notif: channel %d\n",
882 le16_to_cpu(csa->channel));
883 iwl_legacy_chswitch_done(priv, true);
884 } else {
885 IWL_ERR(priv, "CSA notif (fail) : channel %d\n",
886 le16_to_cpu(csa->channel));
887 iwl_legacy_chswitch_done(priv, false);
888 }
889}
890EXPORT_SYMBOL(iwl_legacy_rx_csa);
891
892#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
893void iwl_legacy_print_rx_config_cmd(struct iwl_priv *priv,
894 struct iwl_rxon_context *ctx)
895{
896 struct iwl_legacy_rxon_cmd *rxon = &ctx->staging;
897
898 IWL_DEBUG_RADIO(priv, "RX CONFIG:\n");
899 iwl_print_hex_dump(priv, IWL_DL_RADIO, (u8 *) rxon, sizeof(*rxon));
900 IWL_DEBUG_RADIO(priv, "u16 channel: 0x%x\n",
901 le16_to_cpu(rxon->channel));
902 IWL_DEBUG_RADIO(priv, "u32 flags: 0x%08X\n", le32_to_cpu(rxon->flags));
903 IWL_DEBUG_RADIO(priv, "u32 filter_flags: 0x%08x\n",
904 le32_to_cpu(rxon->filter_flags));
905 IWL_DEBUG_RADIO(priv, "u8 dev_type: 0x%x\n", rxon->dev_type);
906 IWL_DEBUG_RADIO(priv, "u8 ofdm_basic_rates: 0x%02x\n",
907 rxon->ofdm_basic_rates);
908 IWL_DEBUG_RADIO(priv, "u8 cck_basic_rates: 0x%02x\n",
909 rxon->cck_basic_rates);
910 IWL_DEBUG_RADIO(priv, "u8[6] node_addr: %pM\n", rxon->node_addr);
911 IWL_DEBUG_RADIO(priv, "u8[6] bssid_addr: %pM\n", rxon->bssid_addr);
912 IWL_DEBUG_RADIO(priv, "u16 assoc_id: 0x%x\n",
913 le16_to_cpu(rxon->assoc_id));
914}
915EXPORT_SYMBOL(iwl_legacy_print_rx_config_cmd);
916#endif
917/**
918 * iwl_legacy_irq_handle_error - called for HW or SW error interrupt from card
919 */
920void iwl_legacy_irq_handle_error(struct iwl_priv *priv)
921{
922 /* Set the FW error flag -- cleared on iwl_down */
923 set_bit(STATUS_FW_ERROR, &priv->status);
924
925 /* Cancel currently queued command. */
926 clear_bit(STATUS_HCMD_ACTIVE, &priv->status);
927
928 IWL_ERR(priv, "Loaded firmware version: %s\n",
929 priv->hw->wiphy->fw_version);
930
931 priv->cfg->ops->lib->dump_nic_error_log(priv);
932 if (priv->cfg->ops->lib->dump_fh)
933 priv->cfg->ops->lib->dump_fh(priv, NULL, false);
934#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
935 if (iwl_legacy_get_debug_level(priv) & IWL_DL_FW_ERRORS)
936 iwl_legacy_print_rx_config_cmd(priv,
937 &priv->contexts[IWL_RXON_CTX_BSS]);
938#endif
939
940 wake_up(&priv->wait_command_queue);
941
942 /* Keep the restart process from trying to send host
943 * commands by clearing the INIT status bit */
944 clear_bit(STATUS_READY, &priv->status);
945
946 if (!test_bit(STATUS_EXIT_PENDING, &priv->status)) {
947 IWL_DEBUG(priv, IWL_DL_FW_ERRORS,
948 "Restarting adapter due to uCode error.\n");
949
950 if (priv->cfg->mod_params->restart_fw)
951 queue_work(priv->workqueue, &priv->restart);
952 }
953}
954EXPORT_SYMBOL(iwl_legacy_irq_handle_error);
955
956static int iwl_legacy_apm_stop_master(struct iwl_priv *priv)
957{
958 int ret = 0;
959
960 /* stop device's busmaster DMA activity */
961 iwl_legacy_set_bit(priv, CSR_RESET, CSR_RESET_REG_FLAG_STOP_MASTER);
962
963 ret = iwl_poll_bit(priv, CSR_RESET, CSR_RESET_REG_FLAG_MASTER_DISABLED,
964 CSR_RESET_REG_FLAG_MASTER_DISABLED, 100);
965 if (ret)
966 IWL_WARN(priv, "Master Disable Timed Out, 100 usec\n");
967
968 IWL_DEBUG_INFO(priv, "stop master\n");
969
970 return ret;
971}
972
973void iwl_legacy_apm_stop(struct iwl_priv *priv)
974{
975 IWL_DEBUG_INFO(priv, "Stop card, put in low power state\n");
976
977 /* Stop device's DMA activity */
978 iwl_legacy_apm_stop_master(priv);
979
980 /* Reset the entire device */
981 iwl_legacy_set_bit(priv, CSR_RESET, CSR_RESET_REG_FLAG_SW_RESET);
982
983 udelay(10);
984
985 /*
986 * Clear "initialization complete" bit to move adapter from
987 * D0A* (powered-up Active) --> D0U* (Uninitialized) state.
988 */
989 iwl_legacy_clear_bit(priv, CSR_GP_CNTRL,
990 CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
991}
992EXPORT_SYMBOL(iwl_legacy_apm_stop);
993
994
995/*
996 * Start up NIC's basic functionality after it has been reset
997 * (e.g. after platform boot, or shutdown via iwl_legacy_apm_stop())
998 * NOTE: This does not load uCode nor start the embedded processor
999 */
1000int iwl_legacy_apm_init(struct iwl_priv *priv)
1001{
1002 int ret = 0;
1003 u16 lctl;
1004
1005 IWL_DEBUG_INFO(priv, "Init card's basic functions\n");
1006
1007 /*
1008 * Use "set_bit" below rather than "write", to preserve any hardware
1009 * bits already set by default after reset.
1010 */
1011
1012 /* Disable L0S exit timer (platform NMI Work/Around) */
1013 iwl_legacy_set_bit(priv, CSR_GIO_CHICKEN_BITS,
1014 CSR_GIO_CHICKEN_BITS_REG_BIT_DIS_L0S_EXIT_TIMER);
1015
1016 /*
1017 * Disable L0s without affecting L1;
1018 * don't wait for ICH L0s (ICH bug W/A)
1019 */
1020 iwl_legacy_set_bit(priv, CSR_GIO_CHICKEN_BITS,
1021 CSR_GIO_CHICKEN_BITS_REG_BIT_L1A_NO_L0S_RX);
1022
1023 /* Set FH wait threshold to maximum (HW error during stress W/A) */
1024 iwl_legacy_set_bit(priv, CSR_DBG_HPET_MEM_REG,
1025 CSR_DBG_HPET_MEM_REG_VAL);
1026
1027 /*
1028 * Enable HAP INTA (interrupt from management bus) to
1029 * wake device's PCI Express link L1a -> L0s
1030 * NOTE: This is no-op for 3945 (non-existent bit)
1031 */
1032 iwl_legacy_set_bit(priv, CSR_HW_IF_CONFIG_REG,
1033 CSR_HW_IF_CONFIG_REG_BIT_HAP_WAKE_L1A);
1034
1035 /*
1036 * HW bug W/A for instability in PCIe bus L0->L0S->L1 transition.
1037 * Check if BIOS (or OS) enabled L1-ASPM on this device.
1038 * If so (likely), disable L0S, so device moves directly L0->L1;
1039 * costs negligible amount of power savings.
1040 * If not (unlikely), enable L0S, so there is at least some
1041 * power savings, even without L1.
1042 */
1043 if (priv->cfg->base_params->set_l0s) {
1044 lctl = iwl_legacy_pcie_link_ctl(priv);
1045 if ((lctl & PCI_CFG_LINK_CTRL_VAL_L1_EN) ==
1046 PCI_CFG_LINK_CTRL_VAL_L1_EN) {
1047 /* L1-ASPM enabled; disable(!) L0S */
1048 iwl_legacy_set_bit(priv, CSR_GIO_REG,
1049 CSR_GIO_REG_VAL_L0S_ENABLED);
1050 IWL_DEBUG_POWER(priv, "L1 Enabled; Disabling L0S\n");
1051 } else {
1052 /* L1-ASPM disabled; enable(!) L0S */
1053 iwl_legacy_clear_bit(priv, CSR_GIO_REG,
1054 CSR_GIO_REG_VAL_L0S_ENABLED);
1055 IWL_DEBUG_POWER(priv, "L1 Disabled; Enabling L0S\n");
1056 }
1057 }
1058
1059 /* Configure analog phase-lock-loop before activating to D0A */
1060 if (priv->cfg->base_params->pll_cfg_val)
1061 iwl_legacy_set_bit(priv, CSR_ANA_PLL_CFG,
1062 priv->cfg->base_params->pll_cfg_val);
1063
1064 /*
1065 * Set "initialization complete" bit to move adapter from
1066 * D0U* --> D0A* (powered-up active) state.
1067 */
1068 iwl_legacy_set_bit(priv, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
1069
1070 /*
1071 * Wait for clock stabilization; once stabilized, access to
1072 * device-internal resources is supported, e.g. iwl_legacy_write_prph()
1073 * and accesses to uCode SRAM.
1074 */
1075 ret = iwl_poll_bit(priv, CSR_GP_CNTRL,
1076 CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
1077 CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY, 25000);
1078 if (ret < 0) {
1079 IWL_DEBUG_INFO(priv, "Failed to init the card\n");
1080 goto out;
1081 }
1082
1083 /*
1084 * Enable DMA and BSM (if used) clocks, wait for them to stabilize.
1085 * BSM (Boostrap State Machine) is only in 3945 and 4965.
1086 *
1087 * Write to "CLK_EN_REG"; "1" bits enable clocks, while "0" bits
1088 * do not disable clocks. This preserves any hardware bits already
1089 * set by default in "CLK_CTRL_REG" after reset.
1090 */
1091 if (priv->cfg->base_params->use_bsm)
1092 iwl_legacy_write_prph(priv, APMG_CLK_EN_REG,
1093 APMG_CLK_VAL_DMA_CLK_RQT | APMG_CLK_VAL_BSM_CLK_RQT);
1094 else
1095 iwl_legacy_write_prph(priv, APMG_CLK_EN_REG,
1096 APMG_CLK_VAL_DMA_CLK_RQT);
1097 udelay(20);
1098
1099 /* Disable L1-Active */
1100 iwl_legacy_set_bits_prph(priv, APMG_PCIDEV_STT_REG,
1101 APMG_PCIDEV_STT_VAL_L1_ACT_DIS);
1102
1103out:
1104 return ret;
1105}
1106EXPORT_SYMBOL(iwl_legacy_apm_init);
1107
1108
1109int iwl_legacy_set_tx_power(struct iwl_priv *priv, s8 tx_power, bool force)
1110{
1111 int ret;
1112 s8 prev_tx_power;
1113 bool defer;
1114 struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
1115
1116 lockdep_assert_held(&priv->mutex);
1117
1118 if (priv->tx_power_user_lmt == tx_power && !force)
1119 return 0;
1120
1121 if (!priv->cfg->ops->lib->send_tx_power)
1122 return -EOPNOTSUPP;
1123
1124 /* 0 dBm mean 1 milliwatt */
1125 if (tx_power < 0) {
1126 IWL_WARN(priv,
1127 "Requested user TXPOWER %d below 1 mW.\n",
1128 tx_power);
1129 return -EINVAL;
1130 }
1131
1132 if (tx_power > priv->tx_power_device_lmt) {
1133 IWL_WARN(priv,
1134 "Requested user TXPOWER %d above upper limit %d.\n",
1135 tx_power, priv->tx_power_device_lmt);
1136 return -EINVAL;
1137 }
1138
1139 if (!iwl_legacy_is_ready_rf(priv))
1140 return -EIO;
1141
1142 /* scan complete and commit_rxon use tx_power_next value,
1143 * it always need to be updated for newest request */
1144 priv->tx_power_next = tx_power;
1145
1146 /* do not set tx power when scanning or channel changing */
1147 defer = test_bit(STATUS_SCANNING, &priv->status) ||
1148 memcmp(&ctx->active, &ctx->staging, sizeof(ctx->staging));
1149 if (defer && !force) {
1150 IWL_DEBUG_INFO(priv, "Deferring tx power set\n");
1151 return 0;
1152 }
1153
1154 prev_tx_power = priv->tx_power_user_lmt;
1155 priv->tx_power_user_lmt = tx_power;
1156
1157 ret = priv->cfg->ops->lib->send_tx_power(priv);
1158
1159 /* if fail to set tx_power, restore the orig. tx power */
1160 if (ret) {
1161 priv->tx_power_user_lmt = prev_tx_power;
1162 priv->tx_power_next = prev_tx_power;
1163 }
1164 return ret;
1165}
1166EXPORT_SYMBOL(iwl_legacy_set_tx_power);
1167
1168void iwl_legacy_send_bt_config(struct iwl_priv *priv)
1169{
1170 struct iwl_bt_cmd bt_cmd = {
1171 .lead_time = BT_LEAD_TIME_DEF,
1172 .max_kill = BT_MAX_KILL_DEF,
1173 .kill_ack_mask = 0,
1174 .kill_cts_mask = 0,
1175 };
1176
1177 if (!bt_coex_active)
1178 bt_cmd.flags = BT_COEX_DISABLE;
1179 else
1180 bt_cmd.flags = BT_COEX_ENABLE;
1181
1182 IWL_DEBUG_INFO(priv, "BT coex %s\n",
1183 (bt_cmd.flags == BT_COEX_DISABLE) ? "disable" : "active");
1184
1185 if (iwl_legacy_send_cmd_pdu(priv, REPLY_BT_CONFIG,
1186 sizeof(struct iwl_bt_cmd), &bt_cmd))
1187 IWL_ERR(priv, "failed to send BT Coex Config\n");
1188}
1189EXPORT_SYMBOL(iwl_legacy_send_bt_config);
1190
1191int iwl_legacy_send_statistics_request(struct iwl_priv *priv, u8 flags, bool clear)
1192{
1193 struct iwl_statistics_cmd statistics_cmd = {
1194 .configuration_flags =
1195 clear ? IWL_STATS_CONF_CLEAR_STATS : 0,
1196 };
1197
1198 if (flags & CMD_ASYNC)
1199 return iwl_legacy_send_cmd_pdu_async(priv, REPLY_STATISTICS_CMD,
1200 sizeof(struct iwl_statistics_cmd),
1201 &statistics_cmd, NULL);
1202 else
1203 return iwl_legacy_send_cmd_pdu(priv, REPLY_STATISTICS_CMD,
1204 sizeof(struct iwl_statistics_cmd),
1205 &statistics_cmd);
1206}
1207EXPORT_SYMBOL(iwl_legacy_send_statistics_request);
1208
1209void iwl_legacy_rx_pm_sleep_notif(struct iwl_priv *priv,
1210 struct iwl_rx_mem_buffer *rxb)
1211{
1212#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
1213 struct iwl_rx_packet *pkt = rxb_addr(rxb);
1214 struct iwl_sleep_notification *sleep = &(pkt->u.sleep_notif);
1215 IWL_DEBUG_RX(priv, "sleep mode: %d, src: %d\n",
1216 sleep->pm_sleep_mode, sleep->pm_wakeup_src);
1217#endif
1218}
1219EXPORT_SYMBOL(iwl_legacy_rx_pm_sleep_notif);
1220
1221void iwl_legacy_rx_pm_debug_statistics_notif(struct iwl_priv *priv,
1222 struct iwl_rx_mem_buffer *rxb)
1223{
1224 struct iwl_rx_packet *pkt = rxb_addr(rxb);
1225 u32 len = le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK;
1226 IWL_DEBUG_RADIO(priv, "Dumping %d bytes of unhandled "
1227 "notification for %s:\n", len,
1228 iwl_legacy_get_cmd_string(pkt->hdr.cmd));
1229 iwl_print_hex_dump(priv, IWL_DL_RADIO, pkt->u.raw, len);
1230}
1231EXPORT_SYMBOL(iwl_legacy_rx_pm_debug_statistics_notif);
1232
1233void iwl_legacy_rx_reply_error(struct iwl_priv *priv,
1234 struct iwl_rx_mem_buffer *rxb)
1235{
1236 struct iwl_rx_packet *pkt = rxb_addr(rxb);
1237
1238 IWL_ERR(priv, "Error Reply type 0x%08X cmd %s (0x%02X) "
1239 "seq 0x%04X ser 0x%08X\n",
1240 le32_to_cpu(pkt->u.err_resp.error_type),
1241 iwl_legacy_get_cmd_string(pkt->u.err_resp.cmd_id),
1242 pkt->u.err_resp.cmd_id,
1243 le16_to_cpu(pkt->u.err_resp.bad_cmd_seq_num),
1244 le32_to_cpu(pkt->u.err_resp.error_info));
1245}
1246EXPORT_SYMBOL(iwl_legacy_rx_reply_error);
1247
1248void iwl_legacy_clear_isr_stats(struct iwl_priv *priv)
1249{
1250 memset(&priv->isr_stats, 0, sizeof(priv->isr_stats));
1251}
1252
1253int iwl_legacy_mac_conf_tx(struct ieee80211_hw *hw, u16 queue,
1254 const struct ieee80211_tx_queue_params *params)
1255{
1256 struct iwl_priv *priv = hw->priv;
1257 struct iwl_rxon_context *ctx;
1258 unsigned long flags;
1259 int q;
1260
1261 IWL_DEBUG_MAC80211(priv, "enter\n");
1262
1263 if (!iwl_legacy_is_ready_rf(priv)) {
1264 IWL_DEBUG_MAC80211(priv, "leave - RF not ready\n");
1265 return -EIO;
1266 }
1267
1268 if (queue >= AC_NUM) {
1269 IWL_DEBUG_MAC80211(priv, "leave - queue >= AC_NUM %d\n", queue);
1270 return 0;
1271 }
1272
1273 q = AC_NUM - 1 - queue;
1274
1275 spin_lock_irqsave(&priv->lock, flags);
1276
1277 for_each_context(priv, ctx) {
1278 ctx->qos_data.def_qos_parm.ac[q].cw_min =
1279 cpu_to_le16(params->cw_min);
1280 ctx->qos_data.def_qos_parm.ac[q].cw_max =
1281 cpu_to_le16(params->cw_max);
1282 ctx->qos_data.def_qos_parm.ac[q].aifsn = params->aifs;
1283 ctx->qos_data.def_qos_parm.ac[q].edca_txop =
1284 cpu_to_le16((params->txop * 32));
1285
1286 ctx->qos_data.def_qos_parm.ac[q].reserved1 = 0;
1287 }
1288
1289 spin_unlock_irqrestore(&priv->lock, flags);
1290
1291 IWL_DEBUG_MAC80211(priv, "leave\n");
1292 return 0;
1293}
1294EXPORT_SYMBOL(iwl_legacy_mac_conf_tx);
1295
1296int iwl_legacy_mac_tx_last_beacon(struct ieee80211_hw *hw)
1297{
1298 struct iwl_priv *priv = hw->priv;
1299
1300 return priv->ibss_manager == IWL_IBSS_MANAGER;
1301}
1302EXPORT_SYMBOL_GPL(iwl_legacy_mac_tx_last_beacon);
1303
1304static int
1305iwl_legacy_set_mode(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
1306{
1307 iwl_legacy_connection_init_rx_config(priv, ctx);
1308
1309 if (priv->cfg->ops->hcmd->set_rxon_chain)
1310 priv->cfg->ops->hcmd->set_rxon_chain(priv, ctx);
1311
1312 return iwl_legacy_commit_rxon(priv, ctx);
1313}
1314
1315static int iwl_legacy_setup_interface(struct iwl_priv *priv,
1316 struct iwl_rxon_context *ctx)
1317{
1318 struct ieee80211_vif *vif = ctx->vif;
1319 int err;
1320
1321 lockdep_assert_held(&priv->mutex);
1322
1323 /*
1324 * This variable will be correct only when there's just
1325 * a single context, but all code using it is for hardware
1326 * that supports only one context.
1327 */
1328 priv->iw_mode = vif->type;
1329
1330 ctx->is_active = true;
1331
1332 err = iwl_legacy_set_mode(priv, ctx);
1333 if (err) {
1334 if (!ctx->always_active)
1335 ctx->is_active = false;
1336 return err;
1337 }
1338
1339 return 0;
1340}
1341
1342int
1343iwl_legacy_mac_add_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
1344{
1345 struct iwl_priv *priv = hw->priv;
1346 struct iwl_vif_priv *vif_priv = (void *)vif->drv_priv;
1347 struct iwl_rxon_context *tmp, *ctx = NULL;
1348 int err;
1349
1350 IWL_DEBUG_MAC80211(priv, "enter: type %d, addr %pM\n",
1351 vif->type, vif->addr);
1352
1353 mutex_lock(&priv->mutex);
1354
1355 if (!iwl_legacy_is_ready_rf(priv)) {
1356 IWL_WARN(priv, "Try to add interface when device not ready\n");
1357 err = -EINVAL;
1358 goto out;
1359 }
1360
1361 for_each_context(priv, tmp) {
1362 u32 possible_modes =
1363 tmp->interface_modes | tmp->exclusive_interface_modes;
1364
1365 if (tmp->vif) {
1366 /* check if this busy context is exclusive */
1367 if (tmp->exclusive_interface_modes &
1368 BIT(tmp->vif->type)) {
1369 err = -EINVAL;
1370 goto out;
1371 }
1372 continue;
1373 }
1374
1375 if (!(possible_modes & BIT(vif->type)))
1376 continue;
1377
1378 /* have maybe usable context w/o interface */
1379 ctx = tmp;
1380 break;
1381 }
1382
1383 if (!ctx) {
1384 err = -EOPNOTSUPP;
1385 goto out;
1386 }
1387
1388 vif_priv->ctx = ctx;
1389 ctx->vif = vif;
1390
1391 err = iwl_legacy_setup_interface(priv, ctx);
1392 if (!err)
1393 goto out;
1394
1395 ctx->vif = NULL;
1396 priv->iw_mode = NL80211_IFTYPE_STATION;
1397 out:
1398 mutex_unlock(&priv->mutex);
1399
1400 IWL_DEBUG_MAC80211(priv, "leave\n");
1401 return err;
1402}
1403EXPORT_SYMBOL(iwl_legacy_mac_add_interface);
1404
1405static void iwl_legacy_teardown_interface(struct iwl_priv *priv,
1406 struct ieee80211_vif *vif,
1407 bool mode_change)
1408{
1409 struct iwl_rxon_context *ctx = iwl_legacy_rxon_ctx_from_vif(vif);
1410
1411 lockdep_assert_held(&priv->mutex);
1412
1413 if (priv->scan_vif == vif) {
1414 iwl_legacy_scan_cancel_timeout(priv, 200);
1415 iwl_legacy_force_scan_end(priv);
1416 }
1417
1418 if (!mode_change) {
1419 iwl_legacy_set_mode(priv, ctx);
1420 if (!ctx->always_active)
1421 ctx->is_active = false;
1422 }
1423}
1424
1425void iwl_legacy_mac_remove_interface(struct ieee80211_hw *hw,
1426 struct ieee80211_vif *vif)
1427{
1428 struct iwl_priv *priv = hw->priv;
1429 struct iwl_rxon_context *ctx = iwl_legacy_rxon_ctx_from_vif(vif);
1430
1431 IWL_DEBUG_MAC80211(priv, "enter\n");
1432
1433 mutex_lock(&priv->mutex);
1434
1435 WARN_ON(ctx->vif != vif);
1436 ctx->vif = NULL;
1437
1438 iwl_legacy_teardown_interface(priv, vif, false);
1439
1440 memset(priv->bssid, 0, ETH_ALEN);
1441 mutex_unlock(&priv->mutex);
1442
1443 IWL_DEBUG_MAC80211(priv, "leave\n");
1444
1445}
1446EXPORT_SYMBOL(iwl_legacy_mac_remove_interface);
1447
1448int iwl_legacy_alloc_txq_mem(struct iwl_priv *priv)
1449{
1450 if (!priv->txq)
1451 priv->txq = kzalloc(
1452 sizeof(struct iwl_tx_queue) *
1453 priv->cfg->base_params->num_of_queues,
1454 GFP_KERNEL);
1455 if (!priv->txq) {
1456 IWL_ERR(priv, "Not enough memory for txq\n");
1457 return -ENOMEM;
1458 }
1459 return 0;
1460}
1461EXPORT_SYMBOL(iwl_legacy_alloc_txq_mem);
1462
1463void iwl_legacy_txq_mem(struct iwl_priv *priv)
1464{
1465 kfree(priv->txq);
1466 priv->txq = NULL;
1467}
1468EXPORT_SYMBOL(iwl_legacy_txq_mem);
1469
1470#ifdef CONFIG_IWLWIFI_LEGACY_DEBUGFS
1471
1472#define IWL_TRAFFIC_DUMP_SIZE (IWL_TRAFFIC_ENTRY_SIZE * IWL_TRAFFIC_ENTRIES)
1473
1474void iwl_legacy_reset_traffic_log(struct iwl_priv *priv)
1475{
1476 priv->tx_traffic_idx = 0;
1477 priv->rx_traffic_idx = 0;
1478 if (priv->tx_traffic)
1479 memset(priv->tx_traffic, 0, IWL_TRAFFIC_DUMP_SIZE);
1480 if (priv->rx_traffic)
1481 memset(priv->rx_traffic, 0, IWL_TRAFFIC_DUMP_SIZE);
1482}
1483
1484int iwl_legacy_alloc_traffic_mem(struct iwl_priv *priv)
1485{
1486 u32 traffic_size = IWL_TRAFFIC_DUMP_SIZE;
1487
1488 if (iwlegacy_debug_level & IWL_DL_TX) {
1489 if (!priv->tx_traffic) {
1490 priv->tx_traffic =
1491 kzalloc(traffic_size, GFP_KERNEL);
1492 if (!priv->tx_traffic)
1493 return -ENOMEM;
1494 }
1495 }
1496 if (iwlegacy_debug_level & IWL_DL_RX) {
1497 if (!priv->rx_traffic) {
1498 priv->rx_traffic =
1499 kzalloc(traffic_size, GFP_KERNEL);
1500 if (!priv->rx_traffic)
1501 return -ENOMEM;
1502 }
1503 }
1504 iwl_legacy_reset_traffic_log(priv);
1505 return 0;
1506}
1507EXPORT_SYMBOL(iwl_legacy_alloc_traffic_mem);
1508
1509void iwl_legacy_free_traffic_mem(struct iwl_priv *priv)
1510{
1511 kfree(priv->tx_traffic);
1512 priv->tx_traffic = NULL;
1513
1514 kfree(priv->rx_traffic);
1515 priv->rx_traffic = NULL;
1516}
1517EXPORT_SYMBOL(iwl_legacy_free_traffic_mem);
1518
1519void iwl_legacy_dbg_log_tx_data_frame(struct iwl_priv *priv,
1520 u16 length, struct ieee80211_hdr *header)
1521{
1522 __le16 fc;
1523 u16 len;
1524
1525 if (likely(!(iwlegacy_debug_level & IWL_DL_TX)))
1526 return;
1527
1528 if (!priv->tx_traffic)
1529 return;
1530
1531 fc = header->frame_control;
1532 if (ieee80211_is_data(fc)) {
1533 len = (length > IWL_TRAFFIC_ENTRY_SIZE)
1534 ? IWL_TRAFFIC_ENTRY_SIZE : length;
1535 memcpy((priv->tx_traffic +
1536 (priv->tx_traffic_idx * IWL_TRAFFIC_ENTRY_SIZE)),
1537 header, len);
1538 priv->tx_traffic_idx =
1539 (priv->tx_traffic_idx + 1) % IWL_TRAFFIC_ENTRIES;
1540 }
1541}
1542EXPORT_SYMBOL(iwl_legacy_dbg_log_tx_data_frame);
1543
1544void iwl_legacy_dbg_log_rx_data_frame(struct iwl_priv *priv,
1545 u16 length, struct ieee80211_hdr *header)
1546{
1547 __le16 fc;
1548 u16 len;
1549
1550 if (likely(!(iwlegacy_debug_level & IWL_DL_RX)))
1551 return;
1552
1553 if (!priv->rx_traffic)
1554 return;
1555
1556 fc = header->frame_control;
1557 if (ieee80211_is_data(fc)) {
1558 len = (length > IWL_TRAFFIC_ENTRY_SIZE)
1559 ? IWL_TRAFFIC_ENTRY_SIZE : length;
1560 memcpy((priv->rx_traffic +
1561 (priv->rx_traffic_idx * IWL_TRAFFIC_ENTRY_SIZE)),
1562 header, len);
1563 priv->rx_traffic_idx =
1564 (priv->rx_traffic_idx + 1) % IWL_TRAFFIC_ENTRIES;
1565 }
1566}
1567EXPORT_SYMBOL(iwl_legacy_dbg_log_rx_data_frame);
1568
1569const char *iwl_legacy_get_mgmt_string(int cmd)
1570{
1571 switch (cmd) {
1572 IWL_CMD(MANAGEMENT_ASSOC_REQ);
1573 IWL_CMD(MANAGEMENT_ASSOC_RESP);
1574 IWL_CMD(MANAGEMENT_REASSOC_REQ);
1575 IWL_CMD(MANAGEMENT_REASSOC_RESP);
1576 IWL_CMD(MANAGEMENT_PROBE_REQ);
1577 IWL_CMD(MANAGEMENT_PROBE_RESP);
1578 IWL_CMD(MANAGEMENT_BEACON);
1579 IWL_CMD(MANAGEMENT_ATIM);
1580 IWL_CMD(MANAGEMENT_DISASSOC);
1581 IWL_CMD(MANAGEMENT_AUTH);
1582 IWL_CMD(MANAGEMENT_DEAUTH);
1583 IWL_CMD(MANAGEMENT_ACTION);
1584 default:
1585 return "UNKNOWN";
1586
1587 }
1588}
1589
1590const char *iwl_legacy_get_ctrl_string(int cmd)
1591{
1592 switch (cmd) {
1593 IWL_CMD(CONTROL_BACK_REQ);
1594 IWL_CMD(CONTROL_BACK);
1595 IWL_CMD(CONTROL_PSPOLL);
1596 IWL_CMD(CONTROL_RTS);
1597 IWL_CMD(CONTROL_CTS);
1598 IWL_CMD(CONTROL_ACK);
1599 IWL_CMD(CONTROL_CFEND);
1600 IWL_CMD(CONTROL_CFENDACK);
1601 default:
1602 return "UNKNOWN";
1603
1604 }
1605}
1606
1607void iwl_legacy_clear_traffic_stats(struct iwl_priv *priv)
1608{
1609 memset(&priv->tx_stats, 0, sizeof(struct traffic_stats));
1610 memset(&priv->rx_stats, 0, sizeof(struct traffic_stats));
1611}
1612
1613/*
1614 * if CONFIG_IWLWIFI_LEGACY_DEBUGFS defined,
1615 * iwl_legacy_update_stats function will
1616 * record all the MGMT, CTRL and DATA pkt for both TX and Rx pass
1617 * Use debugFs to display the rx/rx_statistics
1618 * if CONFIG_IWLWIFI_LEGACY_DEBUGFS not being defined, then no MGMT and CTRL
1619 * information will be recorded, but DATA pkt still will be recorded
1620 * for the reason of iwl_led.c need to control the led blinking based on
1621 * number of tx and rx data.
1622 *
1623 */
1624void
1625iwl_legacy_update_stats(struct iwl_priv *priv, bool is_tx, __le16 fc, u16 len)
1626{
1627 struct traffic_stats *stats;
1628
1629 if (is_tx)
1630 stats = &priv->tx_stats;
1631 else
1632 stats = &priv->rx_stats;
1633
1634 if (ieee80211_is_mgmt(fc)) {
1635 switch (fc & cpu_to_le16(IEEE80211_FCTL_STYPE)) {
1636 case cpu_to_le16(IEEE80211_STYPE_ASSOC_REQ):
1637 stats->mgmt[MANAGEMENT_ASSOC_REQ]++;
1638 break;
1639 case cpu_to_le16(IEEE80211_STYPE_ASSOC_RESP):
1640 stats->mgmt[MANAGEMENT_ASSOC_RESP]++;
1641 break;
1642 case cpu_to_le16(IEEE80211_STYPE_REASSOC_REQ):
1643 stats->mgmt[MANAGEMENT_REASSOC_REQ]++;
1644 break;
1645 case cpu_to_le16(IEEE80211_STYPE_REASSOC_RESP):
1646 stats->mgmt[MANAGEMENT_REASSOC_RESP]++;
1647 break;
1648 case cpu_to_le16(IEEE80211_STYPE_PROBE_REQ):
1649 stats->mgmt[MANAGEMENT_PROBE_REQ]++;
1650 break;
1651 case cpu_to_le16(IEEE80211_STYPE_PROBE_RESP):
1652 stats->mgmt[MANAGEMENT_PROBE_RESP]++;
1653 break;
1654 case cpu_to_le16(IEEE80211_STYPE_BEACON):
1655 stats->mgmt[MANAGEMENT_BEACON]++;
1656 break;
1657 case cpu_to_le16(IEEE80211_STYPE_ATIM):
1658 stats->mgmt[MANAGEMENT_ATIM]++;
1659 break;
1660 case cpu_to_le16(IEEE80211_STYPE_DISASSOC):
1661 stats->mgmt[MANAGEMENT_DISASSOC]++;
1662 break;
1663 case cpu_to_le16(IEEE80211_STYPE_AUTH):
1664 stats->mgmt[MANAGEMENT_AUTH]++;
1665 break;
1666 case cpu_to_le16(IEEE80211_STYPE_DEAUTH):
1667 stats->mgmt[MANAGEMENT_DEAUTH]++;
1668 break;
1669 case cpu_to_le16(IEEE80211_STYPE_ACTION):
1670 stats->mgmt[MANAGEMENT_ACTION]++;
1671 break;
1672 }
1673 } else if (ieee80211_is_ctl(fc)) {
1674 switch (fc & cpu_to_le16(IEEE80211_FCTL_STYPE)) {
1675 case cpu_to_le16(IEEE80211_STYPE_BACK_REQ):
1676 stats->ctrl[CONTROL_BACK_REQ]++;
1677 break;
1678 case cpu_to_le16(IEEE80211_STYPE_BACK):
1679 stats->ctrl[CONTROL_BACK]++;
1680 break;
1681 case cpu_to_le16(IEEE80211_STYPE_PSPOLL):
1682 stats->ctrl[CONTROL_PSPOLL]++;
1683 break;
1684 case cpu_to_le16(IEEE80211_STYPE_RTS):
1685 stats->ctrl[CONTROL_RTS]++;
1686 break;
1687 case cpu_to_le16(IEEE80211_STYPE_CTS):
1688 stats->ctrl[CONTROL_CTS]++;
1689 break;
1690 case cpu_to_le16(IEEE80211_STYPE_ACK):
1691 stats->ctrl[CONTROL_ACK]++;
1692 break;
1693 case cpu_to_le16(IEEE80211_STYPE_CFEND):
1694 stats->ctrl[CONTROL_CFEND]++;
1695 break;
1696 case cpu_to_le16(IEEE80211_STYPE_CFENDACK):
1697 stats->ctrl[CONTROL_CFENDACK]++;
1698 break;
1699 }
1700 } else {
1701 /* data */
1702 stats->data_cnt++;
1703 stats->data_bytes += len;
1704 }
1705}
1706EXPORT_SYMBOL(iwl_legacy_update_stats);
1707#endif
1708
1709int iwl_legacy_force_reset(struct iwl_priv *priv, bool external)
1710{
1711 struct iwl_force_reset *force_reset;
1712
1713 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
1714 return -EINVAL;
1715
1716 force_reset = &priv->force_reset;
1717 force_reset->reset_request_count++;
1718 if (!external) {
1719 if (force_reset->last_force_reset_jiffies &&
1720 time_after(force_reset->last_force_reset_jiffies +
1721 force_reset->reset_duration, jiffies)) {
1722 IWL_DEBUG_INFO(priv, "force reset rejected\n");
1723 force_reset->reset_reject_count++;
1724 return -EAGAIN;
1725 }
1726 }
1727 force_reset->reset_success_count++;
1728 force_reset->last_force_reset_jiffies = jiffies;
1729
1730 /*
1731 * if the request is from external(ex: debugfs),
1732 * then always perform the request in regardless the module
1733 * parameter setting
1734 * if the request is from internal (uCode error or driver
1735 * detect failure), then fw_restart module parameter
1736 * need to be check before performing firmware reload
1737 */
1738
1739 if (!external && !priv->cfg->mod_params->restart_fw) {
1740 IWL_DEBUG_INFO(priv, "Cancel firmware reload based on "
1741 "module parameter setting\n");
1742 return 0;
1743 }
1744
1745 IWL_ERR(priv, "On demand firmware reload\n");
1746
1747 /* Set the FW error flag -- cleared on iwl_down */
1748 set_bit(STATUS_FW_ERROR, &priv->status);
1749 wake_up(&priv->wait_command_queue);
1750 /*
1751 * Keep the restart process from trying to send host
1752 * commands by clearing the INIT status bit
1753 */
1754 clear_bit(STATUS_READY, &priv->status);
1755 queue_work(priv->workqueue, &priv->restart);
1756
1757 return 0;
1758}
1759
1760int
1761iwl_legacy_mac_change_interface(struct ieee80211_hw *hw,
1762 struct ieee80211_vif *vif,
1763 enum nl80211_iftype newtype, bool newp2p)
1764{
1765 struct iwl_priv *priv = hw->priv;
1766 struct iwl_rxon_context *ctx = iwl_legacy_rxon_ctx_from_vif(vif);
1767 struct iwl_rxon_context *tmp;
1768 u32 interface_modes;
1769 int err;
1770
1771 newtype = ieee80211_iftype_p2p(newtype, newp2p);
1772
1773 mutex_lock(&priv->mutex);
1774
1775 if (!ctx->vif || !iwl_legacy_is_ready_rf(priv)) {
1776 /*
1777 * Huh? But wait ... this can maybe happen when
1778 * we're in the middle of a firmware restart!
1779 */
1780 err = -EBUSY;
1781 goto out;
1782 }
1783
1784 interface_modes = ctx->interface_modes | ctx->exclusive_interface_modes;
1785
1786 if (!(interface_modes & BIT(newtype))) {
1787 err = -EBUSY;
1788 goto out;
1789 }
1790
1791 if (ctx->exclusive_interface_modes & BIT(newtype)) {
1792 for_each_context(priv, tmp) {
1793 if (ctx == tmp)
1794 continue;
1795
1796 if (!tmp->vif)
1797 continue;
1798
1799 /*
1800 * The current mode switch would be exclusive, but
1801 * another context is active ... refuse the switch.
1802 */
1803 err = -EBUSY;
1804 goto out;
1805 }
1806 }
1807
1808 /* success */
1809 iwl_legacy_teardown_interface(priv, vif, true);
1810 vif->type = newtype;
1811 vif->p2p = newp2p;
1812 err = iwl_legacy_setup_interface(priv, ctx);
1813 WARN_ON(err);
1814 /*
1815 * We've switched internally, but submitting to the
1816 * device may have failed for some reason. Mask this
1817 * error, because otherwise mac80211 will not switch
1818 * (and set the interface type back) and we'll be
1819 * out of sync with it.
1820 */
1821 err = 0;
1822
1823 out:
1824 mutex_unlock(&priv->mutex);
1825 return err;
1826}
1827EXPORT_SYMBOL(iwl_legacy_mac_change_interface);
1828
1829/*
1830 * On every watchdog tick we check (latest) time stamp. If it does not
1831 * change during timeout period and queue is not empty we reset firmware.
1832 */
1833static int iwl_legacy_check_stuck_queue(struct iwl_priv *priv, int cnt)
1834{
1835 struct iwl_tx_queue *txq = &priv->txq[cnt];
1836 struct iwl_queue *q = &txq->q;
1837 unsigned long timeout;
1838 int ret;
1839
1840 if (q->read_ptr == q->write_ptr) {
1841 txq->time_stamp = jiffies;
1842 return 0;
1843 }
1844
1845 timeout = txq->time_stamp +
1846 msecs_to_jiffies(priv->cfg->base_params->wd_timeout);
1847
1848 if (time_after(jiffies, timeout)) {
1849 IWL_ERR(priv, "Queue %d stuck for %u ms.\n",
1850 q->id, priv->cfg->base_params->wd_timeout);
1851 ret = iwl_legacy_force_reset(priv, false);
1852 return (ret == -EAGAIN) ? 0 : 1;
1853 }
1854
1855 return 0;
1856}
1857
1858/*
1859 * Making watchdog tick be a quarter of timeout assure we will
1860 * discover the queue hung between timeout and 1.25*timeout
1861 */
1862#define IWL_WD_TICK(timeout) ((timeout) / 4)
1863
1864/*
1865 * Watchdog timer callback, we check each tx queue for stuck, if if hung
1866 * we reset the firmware. If everything is fine just rearm the timer.
1867 */
1868void iwl_legacy_bg_watchdog(unsigned long data)
1869{
1870 struct iwl_priv *priv = (struct iwl_priv *)data;
1871 int cnt;
1872 unsigned long timeout;
1873
1874 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
1875 return;
1876
1877 timeout = priv->cfg->base_params->wd_timeout;
1878 if (timeout == 0)
1879 return;
1880
1881 /* monitor and check for stuck cmd queue */
1882 if (iwl_legacy_check_stuck_queue(priv, priv->cmd_queue))
1883 return;
1884
1885 /* monitor and check for other stuck queues */
1886 if (iwl_legacy_is_any_associated(priv)) {
1887 for (cnt = 0; cnt < priv->hw_params.max_txq_num; cnt++) {
1888 /* skip as we already checked the command queue */
1889 if (cnt == priv->cmd_queue)
1890 continue;
1891 if (iwl_legacy_check_stuck_queue(priv, cnt))
1892 return;
1893 }
1894 }
1895
1896 mod_timer(&priv->watchdog, jiffies +
1897 msecs_to_jiffies(IWL_WD_TICK(timeout)));
1898}
1899EXPORT_SYMBOL(iwl_legacy_bg_watchdog);
1900
1901void iwl_legacy_setup_watchdog(struct iwl_priv *priv)
1902{
1903 unsigned int timeout = priv->cfg->base_params->wd_timeout;
1904
1905 if (timeout)
1906 mod_timer(&priv->watchdog,
1907 jiffies + msecs_to_jiffies(IWL_WD_TICK(timeout)));
1908 else
1909 del_timer(&priv->watchdog);
1910}
1911EXPORT_SYMBOL(iwl_legacy_setup_watchdog);
1912
1913/*
1914 * extended beacon time format
1915 * time in usec will be changed into a 32-bit value in extended:internal format
1916 * the extended part is the beacon counts
1917 * the internal part is the time in usec within one beacon interval
1918 */
1919u32
1920iwl_legacy_usecs_to_beacons(struct iwl_priv *priv,
1921 u32 usec, u32 beacon_interval)
1922{
1923 u32 quot;
1924 u32 rem;
1925 u32 interval = beacon_interval * TIME_UNIT;
1926
1927 if (!interval || !usec)
1928 return 0;
1929
1930 quot = (usec / interval) &
1931 (iwl_legacy_beacon_time_mask_high(priv,
1932 priv->hw_params.beacon_time_tsf_bits) >>
1933 priv->hw_params.beacon_time_tsf_bits);
1934 rem = (usec % interval) & iwl_legacy_beacon_time_mask_low(priv,
1935 priv->hw_params.beacon_time_tsf_bits);
1936
1937 return (quot << priv->hw_params.beacon_time_tsf_bits) + rem;
1938}
1939EXPORT_SYMBOL(iwl_legacy_usecs_to_beacons);
1940
1941/* base is usually what we get from ucode with each received frame,
1942 * the same as HW timer counter counting down
1943 */
1944__le32 iwl_legacy_add_beacon_time(struct iwl_priv *priv, u32 base,
1945 u32 addon, u32 beacon_interval)
1946{
1947 u32 base_low = base & iwl_legacy_beacon_time_mask_low(priv,
1948 priv->hw_params.beacon_time_tsf_bits);
1949 u32 addon_low = addon & iwl_legacy_beacon_time_mask_low(priv,
1950 priv->hw_params.beacon_time_tsf_bits);
1951 u32 interval = beacon_interval * TIME_UNIT;
1952 u32 res = (base & iwl_legacy_beacon_time_mask_high(priv,
1953 priv->hw_params.beacon_time_tsf_bits)) +
1954 (addon & iwl_legacy_beacon_time_mask_high(priv,
1955 priv->hw_params.beacon_time_tsf_bits));
1956
1957 if (base_low > addon_low)
1958 res += base_low - addon_low;
1959 else if (base_low < addon_low) {
1960 res += interval + base_low - addon_low;
1961 res += (1 << priv->hw_params.beacon_time_tsf_bits);
1962 } else
1963 res += (1 << priv->hw_params.beacon_time_tsf_bits);
1964
1965 return cpu_to_le32(res);
1966}
1967EXPORT_SYMBOL(iwl_legacy_add_beacon_time);
1968
1969#ifdef CONFIG_PM
1970
1971int iwl_legacy_pci_suspend(struct device *device)
1972{
1973 struct pci_dev *pdev = to_pci_dev(device);
1974 struct iwl_priv *priv = pci_get_drvdata(pdev);
1975
1976 /*
1977 * This function is called when system goes into suspend state
1978 * mac80211 will call iwl_mac_stop() from the mac80211 suspend function
1979 * first but since iwl_mac_stop() has no knowledge of who the caller is,
1980 * it will not call apm_ops.stop() to stop the DMA operation.
1981 * Calling apm_ops.stop here to make sure we stop the DMA.
1982 */
1983 iwl_legacy_apm_stop(priv);
1984
1985 return 0;
1986}
1987EXPORT_SYMBOL(iwl_legacy_pci_suspend);
1988
1989int iwl_legacy_pci_resume(struct device *device)
1990{
1991 struct pci_dev *pdev = to_pci_dev(device);
1992 struct iwl_priv *priv = pci_get_drvdata(pdev);
1993 bool hw_rfkill = false;
1994
1995 /*
1996 * We disable the RETRY_TIMEOUT register (0x41) to keep
1997 * PCI Tx retries from interfering with C3 CPU state.
1998 */
1999 pci_write_config_byte(pdev, PCI_CFG_RETRY_TIMEOUT, 0x00);
2000
2001 iwl_legacy_enable_interrupts(priv);
2002
2003 if (!(iwl_read32(priv, CSR_GP_CNTRL) &
2004 CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW))
2005 hw_rfkill = true;
2006
2007 if (hw_rfkill)
2008 set_bit(STATUS_RF_KILL_HW, &priv->status);
2009 else
2010 clear_bit(STATUS_RF_KILL_HW, &priv->status);
2011
2012 wiphy_rfkill_set_hw_state(priv->hw->wiphy, hw_rfkill);
2013
2014 return 0;
2015}
2016EXPORT_SYMBOL(iwl_legacy_pci_resume);
2017
2018const struct dev_pm_ops iwl_legacy_pm_ops = {
2019 .suspend = iwl_legacy_pci_suspend,
2020 .resume = iwl_legacy_pci_resume,
2021 .freeze = iwl_legacy_pci_suspend,
2022 .thaw = iwl_legacy_pci_resume,
2023 .poweroff = iwl_legacy_pci_suspend,
2024 .restore = iwl_legacy_pci_resume,
2025};
2026EXPORT_SYMBOL(iwl_legacy_pm_ops);
2027
2028#endif /* CONFIG_PM */
2029
2030static void
2031iwl_legacy_update_qos(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
2032{
2033 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
2034 return;
2035
2036 if (!ctx->is_active)
2037 return;
2038
2039 ctx->qos_data.def_qos_parm.qos_flags = 0;
2040
2041 if (ctx->qos_data.qos_active)
2042 ctx->qos_data.def_qos_parm.qos_flags |=
2043 QOS_PARAM_FLG_UPDATE_EDCA_MSK;
2044
2045 if (ctx->ht.enabled)
2046 ctx->qos_data.def_qos_parm.qos_flags |= QOS_PARAM_FLG_TGN_MSK;
2047
2048 IWL_DEBUG_QOS(priv, "send QoS cmd with Qos active=%d FLAGS=0x%X\n",
2049 ctx->qos_data.qos_active,
2050 ctx->qos_data.def_qos_parm.qos_flags);
2051
2052 iwl_legacy_send_cmd_pdu_async(priv, ctx->qos_cmd,
2053 sizeof(struct iwl_qosparam_cmd),
2054 &ctx->qos_data.def_qos_parm, NULL);
2055}
2056
2057/**
2058 * iwl_legacy_mac_config - mac80211 config callback
2059 */
2060int iwl_legacy_mac_config(struct ieee80211_hw *hw, u32 changed)
2061{
2062 struct iwl_priv *priv = hw->priv;
2063 const struct iwl_channel_info *ch_info;
2064 struct ieee80211_conf *conf = &hw->conf;
2065 struct ieee80211_channel *channel = conf->channel;
2066 struct iwl_ht_config *ht_conf = &priv->current_ht_config;
2067 struct iwl_rxon_context *ctx;
2068 unsigned long flags = 0;
2069 int ret = 0;
2070 u16 ch;
2071 int scan_active = 0;
2072 bool ht_changed[NUM_IWL_RXON_CTX] = {};
2073
2074 if (WARN_ON(!priv->cfg->ops->legacy))
2075 return -EOPNOTSUPP;
2076
2077 mutex_lock(&priv->mutex);
2078
2079 IWL_DEBUG_MAC80211(priv, "enter to channel %d changed 0x%X\n",
2080 channel->hw_value, changed);
2081
2082 if (unlikely(test_bit(STATUS_SCANNING, &priv->status))) {
2083 scan_active = 1;
2084 IWL_DEBUG_MAC80211(priv, "scan active\n");
2085 }
2086
2087 if (changed & (IEEE80211_CONF_CHANGE_SMPS |
2088 IEEE80211_CONF_CHANGE_CHANNEL)) {
2089 /* mac80211 uses static for non-HT which is what we want */
2090 priv->current_ht_config.smps = conf->smps_mode;
2091
2092 /*
2093 * Recalculate chain counts.
2094 *
2095 * If monitor mode is enabled then mac80211 will
2096 * set up the SM PS mode to OFF if an HT channel is
2097 * configured.
2098 */
2099 if (priv->cfg->ops->hcmd->set_rxon_chain)
2100 for_each_context(priv, ctx)
2101 priv->cfg->ops->hcmd->set_rxon_chain(priv, ctx);
2102 }
2103
2104 /* during scanning mac80211 will delay channel setting until
2105 * scan finish with changed = 0
2106 */
2107 if (!changed || (changed & IEEE80211_CONF_CHANGE_CHANNEL)) {
2108 if (scan_active)
2109 goto set_ch_out;
2110
2111 ch = channel->hw_value;
2112 ch_info = iwl_legacy_get_channel_info(priv, channel->band, ch);
2113 if (!iwl_legacy_is_channel_valid(ch_info)) {
2114 IWL_DEBUG_MAC80211(priv, "leave - invalid channel\n");
2115 ret = -EINVAL;
2116 goto set_ch_out;
2117 }
2118
2119 if (priv->iw_mode == NL80211_IFTYPE_ADHOC &&
2120 !iwl_legacy_is_channel_ibss(ch_info)) {
2121 IWL_DEBUG_MAC80211(priv, "leave - not IBSS channel\n");
2122 ret = -EINVAL;
2123 goto set_ch_out;
2124 }
2125
2126 spin_lock_irqsave(&priv->lock, flags);
2127
2128 for_each_context(priv, ctx) {
2129 /* Configure HT40 channels */
2130 if (ctx->ht.enabled != conf_is_ht(conf)) {
2131 ctx->ht.enabled = conf_is_ht(conf);
2132 ht_changed[ctx->ctxid] = true;
2133 }
2134 if (ctx->ht.enabled) {
2135 if (conf_is_ht40_minus(conf)) {
2136 ctx->ht.extension_chan_offset =
2137 IEEE80211_HT_PARAM_CHA_SEC_BELOW;
2138 ctx->ht.is_40mhz = true;
2139 } else if (conf_is_ht40_plus(conf)) {
2140 ctx->ht.extension_chan_offset =
2141 IEEE80211_HT_PARAM_CHA_SEC_ABOVE;
2142 ctx->ht.is_40mhz = true;
2143 } else {
2144 ctx->ht.extension_chan_offset =
2145 IEEE80211_HT_PARAM_CHA_SEC_NONE;
2146 ctx->ht.is_40mhz = false;
2147 }
2148 } else
2149 ctx->ht.is_40mhz = false;
2150
2151 /*
2152 * Default to no protection. Protection mode will
2153 * later be set from BSS config in iwl_ht_conf
2154 */
2155 ctx->ht.protection =
2156 IEEE80211_HT_OP_MODE_PROTECTION_NONE;
2157
2158 /* if we are switching from ht to 2.4 clear flags
2159 * from any ht related info since 2.4 does not
2160 * support ht */
2161 if ((le16_to_cpu(ctx->staging.channel) != ch))
2162 ctx->staging.flags = 0;
2163
2164 iwl_legacy_set_rxon_channel(priv, channel, ctx);
2165 iwl_legacy_set_rxon_ht(priv, ht_conf);
2166
2167 iwl_legacy_set_flags_for_band(priv, ctx, channel->band,
2168 ctx->vif);
2169 }
2170
2171 spin_unlock_irqrestore(&priv->lock, flags);
2172
2173 if (priv->cfg->ops->legacy->update_bcast_stations)
2174 ret =
2175 priv->cfg->ops->legacy->update_bcast_stations(priv);
2176
2177 set_ch_out:
2178 /* The list of supported rates and rate mask can be different
2179 * for each band; since the band may have changed, reset
2180 * the rate mask to what mac80211 lists */
2181 iwl_legacy_set_rate(priv);
2182 }
2183
2184 if (changed & (IEEE80211_CONF_CHANGE_PS |
2185 IEEE80211_CONF_CHANGE_IDLE)) {
2186 ret = iwl_legacy_power_update_mode(priv, false);
2187 if (ret)
2188 IWL_DEBUG_MAC80211(priv, "Error setting sleep level\n");
2189 }
2190
2191 if (changed & IEEE80211_CONF_CHANGE_POWER) {
2192 IWL_DEBUG_MAC80211(priv, "TX Power old=%d new=%d\n",
2193 priv->tx_power_user_lmt, conf->power_level);
2194
2195 iwl_legacy_set_tx_power(priv, conf->power_level, false);
2196 }
2197
2198 if (!iwl_legacy_is_ready(priv)) {
2199 IWL_DEBUG_MAC80211(priv, "leave - not ready\n");
2200 goto out;
2201 }
2202
2203 if (scan_active)
2204 goto out;
2205
2206 for_each_context(priv, ctx) {
2207 if (memcmp(&ctx->active, &ctx->staging, sizeof(ctx->staging)))
2208 iwl_legacy_commit_rxon(priv, ctx);
2209 else
2210 IWL_DEBUG_INFO(priv,
2211 "Not re-sending same RXON configuration.\n");
2212 if (ht_changed[ctx->ctxid])
2213 iwl_legacy_update_qos(priv, ctx);
2214 }
2215
2216out:
2217 IWL_DEBUG_MAC80211(priv, "leave\n");
2218 mutex_unlock(&priv->mutex);
2219 return ret;
2220}
2221EXPORT_SYMBOL(iwl_legacy_mac_config);
2222
2223void iwl_legacy_mac_reset_tsf(struct ieee80211_hw *hw)
2224{
2225 struct iwl_priv *priv = hw->priv;
2226 unsigned long flags;
2227 /* IBSS can only be the IWL_RXON_CTX_BSS context */
2228 struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
2229
2230 if (WARN_ON(!priv->cfg->ops->legacy))
2231 return;
2232
2233 mutex_lock(&priv->mutex);
2234 IWL_DEBUG_MAC80211(priv, "enter\n");
2235
2236 spin_lock_irqsave(&priv->lock, flags);
2237 memset(&priv->current_ht_config, 0, sizeof(struct iwl_ht_config));
2238 spin_unlock_irqrestore(&priv->lock, flags);
2239
2240 spin_lock_irqsave(&priv->lock, flags);
2241
2242 /* new association get rid of ibss beacon skb */
2243 if (priv->beacon_skb)
2244 dev_kfree_skb(priv->beacon_skb);
2245
2246 priv->beacon_skb = NULL;
2247
2248 priv->timestamp = 0;
2249
2250 spin_unlock_irqrestore(&priv->lock, flags);
2251
2252 iwl_legacy_scan_cancel_timeout(priv, 100);
2253 if (!iwl_legacy_is_ready_rf(priv)) {
2254 IWL_DEBUG_MAC80211(priv, "leave - not ready\n");
2255 mutex_unlock(&priv->mutex);
2256 return;
2257 }
2258
2259 /* we are restarting association process
2260 * clear RXON_FILTER_ASSOC_MSK bit
2261 */
2262 ctx->staging.filter_flags &= ~RXON_FILTER_ASSOC_MSK;
2263 iwl_legacy_commit_rxon(priv, ctx);
2264
2265 iwl_legacy_set_rate(priv);
2266
2267 mutex_unlock(&priv->mutex);
2268
2269 IWL_DEBUG_MAC80211(priv, "leave\n");
2270}
2271EXPORT_SYMBOL(iwl_legacy_mac_reset_tsf);
2272
2273static void iwl_legacy_ht_conf(struct iwl_priv *priv,
2274 struct ieee80211_vif *vif)
2275{
2276 struct iwl_ht_config *ht_conf = &priv->current_ht_config;
2277 struct ieee80211_sta *sta;
2278 struct ieee80211_bss_conf *bss_conf = &vif->bss_conf;
2279 struct iwl_rxon_context *ctx = iwl_legacy_rxon_ctx_from_vif(vif);
2280
2281 IWL_DEBUG_ASSOC(priv, "enter:\n");
2282
2283 if (!ctx->ht.enabled)
2284 return;
2285
2286 ctx->ht.protection =
2287 bss_conf->ht_operation_mode & IEEE80211_HT_OP_MODE_PROTECTION;
2288 ctx->ht.non_gf_sta_present =
2289 !!(bss_conf->ht_operation_mode &
2290 IEEE80211_HT_OP_MODE_NON_GF_STA_PRSNT);
2291
2292 ht_conf->single_chain_sufficient = false;
2293
2294 switch (vif->type) {
2295 case NL80211_IFTYPE_STATION:
2296 rcu_read_lock();
2297 sta = ieee80211_find_sta(vif, bss_conf->bssid);
2298 if (sta) {
2299 struct ieee80211_sta_ht_cap *ht_cap = &sta->ht_cap;
2300 int maxstreams;
2301
2302 maxstreams = (ht_cap->mcs.tx_params &
2303 IEEE80211_HT_MCS_TX_MAX_STREAMS_MASK)
2304 >> IEEE80211_HT_MCS_TX_MAX_STREAMS_SHIFT;
2305 maxstreams += 1;
2306
2307 if ((ht_cap->mcs.rx_mask[1] == 0) &&
2308 (ht_cap->mcs.rx_mask[2] == 0))
2309 ht_conf->single_chain_sufficient = true;
2310 if (maxstreams <= 1)
2311 ht_conf->single_chain_sufficient = true;
2312 } else {
2313 /*
2314 * If at all, this can only happen through a race
2315 * when the AP disconnects us while we're still
2316 * setting up the connection, in that case mac80211
2317 * will soon tell us about that.
2318 */
2319 ht_conf->single_chain_sufficient = true;
2320 }
2321 rcu_read_unlock();
2322 break;
2323 case NL80211_IFTYPE_ADHOC:
2324 ht_conf->single_chain_sufficient = true;
2325 break;
2326 default:
2327 break;
2328 }
2329
2330 IWL_DEBUG_ASSOC(priv, "leave\n");
2331}
2332
2333static inline void iwl_legacy_set_no_assoc(struct iwl_priv *priv,
2334 struct ieee80211_vif *vif)
2335{
2336 struct iwl_rxon_context *ctx = iwl_legacy_rxon_ctx_from_vif(vif);
2337
2338 /*
2339 * inform the ucode that there is no longer an
2340 * association and that no more packets should be
2341 * sent
2342 */
2343 ctx->staging.filter_flags &= ~RXON_FILTER_ASSOC_MSK;
2344 ctx->staging.assoc_id = 0;
2345 iwl_legacy_commit_rxon(priv, ctx);
2346}
2347
2348static void iwl_legacy_beacon_update(struct ieee80211_hw *hw,
2349 struct ieee80211_vif *vif)
2350{
2351 struct iwl_priv *priv = hw->priv;
2352 unsigned long flags;
2353 __le64 timestamp;
2354 struct sk_buff *skb = ieee80211_beacon_get(hw, vif);
2355
2356 if (!skb)
2357 return;
2358
2359 IWL_DEBUG_MAC80211(priv, "enter\n");
2360
2361 lockdep_assert_held(&priv->mutex);
2362
2363 if (!priv->beacon_ctx) {
2364 IWL_ERR(priv, "update beacon but no beacon context!\n");
2365 dev_kfree_skb(skb);
2366 return;
2367 }
2368
2369 spin_lock_irqsave(&priv->lock, flags);
2370
2371 if (priv->beacon_skb)
2372 dev_kfree_skb(priv->beacon_skb);
2373
2374 priv->beacon_skb = skb;
2375
2376 timestamp = ((struct ieee80211_mgmt *)skb->data)->u.beacon.timestamp;
2377 priv->timestamp = le64_to_cpu(timestamp);
2378
2379 IWL_DEBUG_MAC80211(priv, "leave\n");
2380 spin_unlock_irqrestore(&priv->lock, flags);
2381
2382 if (!iwl_legacy_is_ready_rf(priv)) {
2383 IWL_DEBUG_MAC80211(priv, "leave - RF not ready\n");
2384 return;
2385 }
2386
2387 priv->cfg->ops->legacy->post_associate(priv);
2388}
2389
2390void iwl_legacy_mac_bss_info_changed(struct ieee80211_hw *hw,
2391 struct ieee80211_vif *vif,
2392 struct ieee80211_bss_conf *bss_conf,
2393 u32 changes)
2394{
2395 struct iwl_priv *priv = hw->priv;
2396 struct iwl_rxon_context *ctx = iwl_legacy_rxon_ctx_from_vif(vif);
2397 int ret;
2398
2399 if (WARN_ON(!priv->cfg->ops->legacy))
2400 return;
2401
2402 IWL_DEBUG_MAC80211(priv, "changes = 0x%X\n", changes);
2403
2404 mutex_lock(&priv->mutex);
2405
2406 if (!iwl_legacy_is_alive(priv)) {
2407 mutex_unlock(&priv->mutex);
2408 return;
2409 }
2410
2411 if (changes & BSS_CHANGED_QOS) {
2412 unsigned long flags;
2413
2414 spin_lock_irqsave(&priv->lock, flags);
2415 ctx->qos_data.qos_active = bss_conf->qos;
2416 iwl_legacy_update_qos(priv, ctx);
2417 spin_unlock_irqrestore(&priv->lock, flags);
2418 }
2419
2420 if (changes & BSS_CHANGED_BEACON_ENABLED) {
2421 /*
2422 * the add_interface code must make sure we only ever
2423 * have a single interface that could be beaconing at
2424 * any time.
2425 */
2426 if (vif->bss_conf.enable_beacon)
2427 priv->beacon_ctx = ctx;
2428 else
2429 priv->beacon_ctx = NULL;
2430 }
2431
2432 if (changes & BSS_CHANGED_BSSID) {
2433 IWL_DEBUG_MAC80211(priv, "BSSID %pM\n", bss_conf->bssid);
2434
2435 /*
2436 * If there is currently a HW scan going on in the
2437 * background then we need to cancel it else the RXON
2438 * below/in post_associate will fail.
2439 */
2440 if (iwl_legacy_scan_cancel_timeout(priv, 100)) {
2441 IWL_WARN(priv,
2442 "Aborted scan still in progress after 100ms\n");
2443 IWL_DEBUG_MAC80211(priv,
2444 "leaving - scan abort failed.\n");
2445 mutex_unlock(&priv->mutex);
2446 return;
2447 }
2448
2449 /* mac80211 only sets assoc when in STATION mode */
2450 if (vif->type == NL80211_IFTYPE_ADHOC || bss_conf->assoc) {
2451 memcpy(ctx->staging.bssid_addr,
2452 bss_conf->bssid, ETH_ALEN);
2453
2454 /* currently needed in a few places */
2455 memcpy(priv->bssid, bss_conf->bssid, ETH_ALEN);
2456 } else {
2457 ctx->staging.filter_flags &=
2458 ~RXON_FILTER_ASSOC_MSK;
2459 }
2460
2461 }
2462
2463 /*
2464 * This needs to be after setting the BSSID in case
2465 * mac80211 decides to do both changes at once because
2466 * it will invoke post_associate.
2467 */
2468 if (vif->type == NL80211_IFTYPE_ADHOC && changes & BSS_CHANGED_BEACON)
2469 iwl_legacy_beacon_update(hw, vif);
2470
2471 if (changes & BSS_CHANGED_ERP_PREAMBLE) {
2472 IWL_DEBUG_MAC80211(priv, "ERP_PREAMBLE %d\n",
2473 bss_conf->use_short_preamble);
2474 if (bss_conf->use_short_preamble)
2475 ctx->staging.flags |= RXON_FLG_SHORT_PREAMBLE_MSK;
2476 else
2477 ctx->staging.flags &= ~RXON_FLG_SHORT_PREAMBLE_MSK;
2478 }
2479
2480 if (changes & BSS_CHANGED_ERP_CTS_PROT) {
2481 IWL_DEBUG_MAC80211(priv,
2482 "ERP_CTS %d\n", bss_conf->use_cts_prot);
2483 if (bss_conf->use_cts_prot &&
2484 (priv->band != IEEE80211_BAND_5GHZ))
2485 ctx->staging.flags |= RXON_FLG_TGG_PROTECT_MSK;
2486 else
2487 ctx->staging.flags &= ~RXON_FLG_TGG_PROTECT_MSK;
2488 if (bss_conf->use_cts_prot)
2489 ctx->staging.flags |= RXON_FLG_SELF_CTS_EN;
2490 else
2491 ctx->staging.flags &= ~RXON_FLG_SELF_CTS_EN;
2492 }
2493
2494 if (changes & BSS_CHANGED_BASIC_RATES) {
2495 /* XXX use this information
2496 *
2497 * To do that, remove code from iwl_legacy_set_rate() and put something
2498 * like this here:
2499 *
2500 if (A-band)
2501 ctx->staging.ofdm_basic_rates =
2502 bss_conf->basic_rates;
2503 else
2504 ctx->staging.ofdm_basic_rates =
2505 bss_conf->basic_rates >> 4;
2506 ctx->staging.cck_basic_rates =
2507 bss_conf->basic_rates & 0xF;
2508 */
2509 }
2510
2511 if (changes & BSS_CHANGED_HT) {
2512 iwl_legacy_ht_conf(priv, vif);
2513
2514 if (priv->cfg->ops->hcmd->set_rxon_chain)
2515 priv->cfg->ops->hcmd->set_rxon_chain(priv, ctx);
2516 }
2517
2518 if (changes & BSS_CHANGED_ASSOC) {
2519 IWL_DEBUG_MAC80211(priv, "ASSOC %d\n", bss_conf->assoc);
2520 if (bss_conf->assoc) {
2521 priv->timestamp = bss_conf->timestamp;
2522
2523 if (!iwl_legacy_is_rfkill(priv))
2524 priv->cfg->ops->legacy->post_associate(priv);
2525 } else
2526 iwl_legacy_set_no_assoc(priv, vif);
2527 }
2528
2529 if (changes && iwl_legacy_is_associated_ctx(ctx) && bss_conf->aid) {
2530 IWL_DEBUG_MAC80211(priv, "Changes (%#x) while associated\n",
2531 changes);
2532 ret = iwl_legacy_send_rxon_assoc(priv, ctx);
2533 if (!ret) {
2534 /* Sync active_rxon with latest change. */
2535 memcpy((void *)&ctx->active,
2536 &ctx->staging,
2537 sizeof(struct iwl_legacy_rxon_cmd));
2538 }
2539 }
2540
2541 if (changes & BSS_CHANGED_BEACON_ENABLED) {
2542 if (vif->bss_conf.enable_beacon) {
2543 memcpy(ctx->staging.bssid_addr,
2544 bss_conf->bssid, ETH_ALEN);
2545 memcpy(priv->bssid, bss_conf->bssid, ETH_ALEN);
2546 priv->cfg->ops->legacy->config_ap(priv);
2547 } else
2548 iwl_legacy_set_no_assoc(priv, vif);
2549 }
2550
2551 if (changes & BSS_CHANGED_IBSS) {
2552 ret = priv->cfg->ops->legacy->manage_ibss_station(priv, vif,
2553 bss_conf->ibss_joined);
2554 if (ret)
2555 IWL_ERR(priv, "failed to %s IBSS station %pM\n",
2556 bss_conf->ibss_joined ? "add" : "remove",
2557 bss_conf->bssid);
2558 }
2559
2560 mutex_unlock(&priv->mutex);
2561
2562 IWL_DEBUG_MAC80211(priv, "leave\n");
2563}
2564EXPORT_SYMBOL(iwl_legacy_mac_bss_info_changed);
2565
2566irqreturn_t iwl_legacy_isr(int irq, void *data)
2567{
2568 struct iwl_priv *priv = data;
2569 u32 inta, inta_mask;
2570 u32 inta_fh;
2571 unsigned long flags;
2572 if (!priv)
2573 return IRQ_NONE;
2574
2575 spin_lock_irqsave(&priv->lock, flags);
2576
2577 /* Disable (but don't clear!) interrupts here to avoid
2578 * back-to-back ISRs and sporadic interrupts from our NIC.
2579 * If we have something to service, the tasklet will re-enable ints.
2580 * If we *don't* have something, we'll re-enable before leaving here. */
2581 inta_mask = iwl_read32(priv, CSR_INT_MASK); /* just for debug */
2582 iwl_write32(priv, CSR_INT_MASK, 0x00000000);
2583
2584 /* Discover which interrupts are active/pending */
2585 inta = iwl_read32(priv, CSR_INT);
2586 inta_fh = iwl_read32(priv, CSR_FH_INT_STATUS);
2587
2588 /* Ignore interrupt if there's nothing in NIC to service.
2589 * This may be due to IRQ shared with another device,
2590 * or due to sporadic interrupts thrown from our NIC. */
2591 if (!inta && !inta_fh) {
2592 IWL_DEBUG_ISR(priv,
2593 "Ignore interrupt, inta == 0, inta_fh == 0\n");
2594 goto none;
2595 }
2596
2597 if ((inta == 0xFFFFFFFF) || ((inta & 0xFFFFFFF0) == 0xa5a5a5a0)) {
2598 /* Hardware disappeared. It might have already raised
2599 * an interrupt */
2600 IWL_WARN(priv, "HARDWARE GONE?? INTA == 0x%08x\n", inta);
2601 goto unplugged;
2602 }
2603
2604 IWL_DEBUG_ISR(priv, "ISR inta 0x%08x, enabled 0x%08x, fh 0x%08x\n",
2605 inta, inta_mask, inta_fh);
2606
2607 inta &= ~CSR_INT_BIT_SCD;
2608
2609 /* iwl_irq_tasklet() will service interrupts and re-enable them */
2610 if (likely(inta || inta_fh))
2611 tasklet_schedule(&priv->irq_tasklet);
2612
2613unplugged:
2614 spin_unlock_irqrestore(&priv->lock, flags);
2615 return IRQ_HANDLED;
2616
2617none:
2618 /* re-enable interrupts here since we don't have anything to service. */
2619 /* only Re-enable if disabled by irq */
2620 if (test_bit(STATUS_INT_ENABLED, &priv->status))
2621 iwl_legacy_enable_interrupts(priv);
2622 spin_unlock_irqrestore(&priv->lock, flags);
2623 return IRQ_NONE;
2624}
2625EXPORT_SYMBOL(iwl_legacy_isr);
2626
2627/*
2628 * iwl_legacy_tx_cmd_protection: Set rts/cts. 3945 and 4965 only share this
2629 * function.
2630 */
2631void iwl_legacy_tx_cmd_protection(struct iwl_priv *priv,
2632 struct ieee80211_tx_info *info,
2633 __le16 fc, __le32 *tx_flags)
2634{
2635 if (info->control.rates[0].flags & IEEE80211_TX_RC_USE_RTS_CTS) {
2636 *tx_flags |= TX_CMD_FLG_RTS_MSK;
2637 *tx_flags &= ~TX_CMD_FLG_CTS_MSK;
2638 *tx_flags |= TX_CMD_FLG_FULL_TXOP_PROT_MSK;
2639
2640 if (!ieee80211_is_mgmt(fc))
2641 return;
2642
2643 switch (fc & cpu_to_le16(IEEE80211_FCTL_STYPE)) {
2644 case cpu_to_le16(IEEE80211_STYPE_AUTH):
2645 case cpu_to_le16(IEEE80211_STYPE_DEAUTH):
2646 case cpu_to_le16(IEEE80211_STYPE_ASSOC_REQ):
2647 case cpu_to_le16(IEEE80211_STYPE_REASSOC_REQ):
2648 *tx_flags &= ~TX_CMD_FLG_RTS_MSK;
2649 *tx_flags |= TX_CMD_FLG_CTS_MSK;
2650 break;
2651 }
2652 } else if (info->control.rates[0].flags &
2653 IEEE80211_TX_RC_USE_CTS_PROTECT) {
2654 *tx_flags &= ~TX_CMD_FLG_RTS_MSK;
2655 *tx_flags |= TX_CMD_FLG_CTS_MSK;
2656 *tx_flags |= TX_CMD_FLG_FULL_TXOP_PROT_MSK;
2657 }
2658}
2659EXPORT_SYMBOL(iwl_legacy_tx_cmd_protection);
diff --git a/drivers/net/wireless/iwlegacy/iwl-core.h b/drivers/net/wireless/iwlegacy/iwl-core.h
new file mode 100644
index 00000000000..84da79376ef
--- /dev/null
+++ b/drivers/net/wireless/iwlegacy/iwl-core.h
@@ -0,0 +1,634 @@
1/******************************************************************************
2 *
3 * This file is provided under a dual BSD/GPLv2 license. When using or
4 * redistributing this file, you may do so under either license.
5 *
6 * GPL LICENSE SUMMARY
7 *
8 * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of version 2 of the GNU General Public License as
12 * published by the Free Software Foundation.
13 *
14 * This program is distributed in the hope that it will be useful, but
15 * WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
22 * USA
23 *
24 * The full GNU General Public License is included in this distribution
25 * in the file called LICENSE.GPL.
26 *
27 * Contact Information:
28 * Intel Linux Wireless <ilw@linux.intel.com>
29 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
30 *
31 * BSD LICENSE
32 *
33 * Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved.
34 * All rights reserved.
35 *
36 * Redistribution and use in source and binary forms, with or without
37 * modification, are permitted provided that the following conditions
38 * are met:
39 *
40 * * Redistributions of source code must retain the above copyright
41 * notice, this list of conditions and the following disclaimer.
42 * * Redistributions in binary form must reproduce the above copyright
43 * notice, this list of conditions and the following disclaimer in
44 * the documentation and/or other materials provided with the
45 * distribution.
46 * * Neither the name Intel Corporation nor the names of its
47 * contributors may be used to endorse or promote products derived
48 * from this software without specific prior written permission.
49 *
50 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
51 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
52 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
53 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
54 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
55 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
56 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
57 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
58 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
59 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
60 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
61 *****************************************************************************/
62
63#ifndef __iwl_legacy_core_h__
64#define __iwl_legacy_core_h__
65
66/************************
67 * forward declarations *
68 ************************/
69struct iwl_host_cmd;
70struct iwl_cmd;
71
72
73#define IWLWIFI_VERSION "in-tree:"
74#define DRV_COPYRIGHT "Copyright(c) 2003-2011 Intel Corporation"
75#define DRV_AUTHOR "<ilw@linux.intel.com>"
76
77#define IWL_PCI_DEVICE(dev, subdev, cfg) \
78 .vendor = PCI_VENDOR_ID_INTEL, .device = (dev), \
79 .subvendor = PCI_ANY_ID, .subdevice = (subdev), \
80 .driver_data = (kernel_ulong_t)&(cfg)
81
82#define TIME_UNIT 1024
83
84#define IWL_SKU_G 0x1
85#define IWL_SKU_A 0x2
86#define IWL_SKU_N 0x8
87
88#define IWL_CMD(x) case x: return #x
89
90struct iwl_hcmd_ops {
91 int (*rxon_assoc)(struct iwl_priv *priv, struct iwl_rxon_context *ctx);
92 int (*commit_rxon)(struct iwl_priv *priv, struct iwl_rxon_context *ctx);
93 void (*set_rxon_chain)(struct iwl_priv *priv,
94 struct iwl_rxon_context *ctx);
95};
96
97struct iwl_hcmd_utils_ops {
98 u16 (*get_hcmd_size)(u8 cmd_id, u16 len);
99 u16 (*build_addsta_hcmd)(const struct iwl_legacy_addsta_cmd *cmd,
100 u8 *data);
101 int (*request_scan)(struct iwl_priv *priv, struct ieee80211_vif *vif);
102 void (*post_scan)(struct iwl_priv *priv);
103};
104
105struct iwl_apm_ops {
106 int (*init)(struct iwl_priv *priv);
107 void (*config)(struct iwl_priv *priv);
108};
109
110struct iwl_debugfs_ops {
111 ssize_t (*rx_stats_read)(struct file *file, char __user *user_buf,
112 size_t count, loff_t *ppos);
113 ssize_t (*tx_stats_read)(struct file *file, char __user *user_buf,
114 size_t count, loff_t *ppos);
115 ssize_t (*general_stats_read)(struct file *file, char __user *user_buf,
116 size_t count, loff_t *ppos);
117};
118
119struct iwl_temp_ops {
120 void (*temperature)(struct iwl_priv *priv);
121};
122
123struct iwl_lib_ops {
124 /* set hw dependent parameters */
125 int (*set_hw_params)(struct iwl_priv *priv);
126 /* Handling TX */
127 void (*txq_update_byte_cnt_tbl)(struct iwl_priv *priv,
128 struct iwl_tx_queue *txq,
129 u16 byte_cnt);
130 int (*txq_attach_buf_to_tfd)(struct iwl_priv *priv,
131 struct iwl_tx_queue *txq,
132 dma_addr_t addr,
133 u16 len, u8 reset, u8 pad);
134 void (*txq_free_tfd)(struct iwl_priv *priv,
135 struct iwl_tx_queue *txq);
136 int (*txq_init)(struct iwl_priv *priv,
137 struct iwl_tx_queue *txq);
138 /* setup Rx handler */
139 void (*rx_handler_setup)(struct iwl_priv *priv);
140 /* alive notification after init uCode load */
141 void (*init_alive_start)(struct iwl_priv *priv);
142 /* check validity of rtc data address */
143 int (*is_valid_rtc_data_addr)(u32 addr);
144 /* 1st ucode load */
145 int (*load_ucode)(struct iwl_priv *priv);
146
147 void (*dump_nic_error_log)(struct iwl_priv *priv);
148 int (*dump_fh)(struct iwl_priv *priv, char **buf, bool display);
149 int (*set_channel_switch)(struct iwl_priv *priv,
150 struct ieee80211_channel_switch *ch_switch);
151 /* power management */
152 struct iwl_apm_ops apm_ops;
153
154 /* power */
155 int (*send_tx_power) (struct iwl_priv *priv);
156 void (*update_chain_flags)(struct iwl_priv *priv);
157
158 /* eeprom operations (as defined in iwl-eeprom.h) */
159 struct iwl_eeprom_ops eeprom_ops;
160
161 /* temperature */
162 struct iwl_temp_ops temp_ops;
163
164 struct iwl_debugfs_ops debugfs_ops;
165
166};
167
168struct iwl_led_ops {
169 int (*cmd)(struct iwl_priv *priv, struct iwl_led_cmd *led_cmd);
170};
171
172struct iwl_legacy_ops {
173 void (*post_associate)(struct iwl_priv *priv);
174 void (*config_ap)(struct iwl_priv *priv);
175 /* station management */
176 int (*update_bcast_stations)(struct iwl_priv *priv);
177 int (*manage_ibss_station)(struct iwl_priv *priv,
178 struct ieee80211_vif *vif, bool add);
179};
180
181struct iwl_ops {
182 const struct iwl_lib_ops *lib;
183 const struct iwl_hcmd_ops *hcmd;
184 const struct iwl_hcmd_utils_ops *utils;
185 const struct iwl_led_ops *led;
186 const struct iwl_nic_ops *nic;
187 const struct iwl_legacy_ops *legacy;
188 const struct ieee80211_ops *ieee80211_ops;
189};
190
191struct iwl_mod_params {
192 int sw_crypto; /* def: 0 = using hardware encryption */
193 int disable_hw_scan; /* def: 0 = use h/w scan */
194 int num_of_queues; /* def: HW dependent */
195 int disable_11n; /* def: 0 = 11n capabilities enabled */
196 int amsdu_size_8K; /* def: 1 = enable 8K amsdu size */
197 int antenna; /* def: 0 = both antennas (use diversity) */
198 int restart_fw; /* def: 1 = restart firmware */
199};
200
201/*
202 * @led_compensation: compensate on the led on/off time per HW according
203 * to the deviation to achieve the desired led frequency.
204 * The detail algorithm is described in iwl-led.c
205 * @chain_noise_num_beacons: number of beacons used to compute chain noise
206 * @wd_timeout: TX queues watchdog timeout
207 * @temperature_kelvin: temperature report by uCode in kelvin
208 * @ucode_tracing: support ucode continuous tracing
209 * @sensitivity_calib_by_driver: driver has the capability to perform
210 * sensitivity calibration operation
211 * @chain_noise_calib_by_driver: driver has the capability to perform
212 * chain noise calibration operation
213 */
214struct iwl_base_params {
215 int eeprom_size;
216 int num_of_queues; /* def: HW dependent */
217 int num_of_ampdu_queues;/* def: HW dependent */
218 /* for iwl_legacy_apm_init() */
219 u32 pll_cfg_val;
220 bool set_l0s;
221 bool use_bsm;
222
223 u16 led_compensation;
224 int chain_noise_num_beacons;
225 unsigned int wd_timeout;
226 bool temperature_kelvin;
227 const bool ucode_tracing;
228 const bool sensitivity_calib_by_driver;
229 const bool chain_noise_calib_by_driver;
230};
231
232/**
233 * struct iwl_cfg
234 * @fw_name_pre: Firmware filename prefix. The api version and extension
235 * (.ucode) will be added to filename before loading from disk. The
236 * filename is constructed as fw_name_pre<api>.ucode.
237 * @ucode_api_max: Highest version of uCode API supported by driver.
238 * @ucode_api_min: Lowest version of uCode API supported by driver.
239 * @scan_antennas: available antenna for scan operation
240 * @led_mode: 0=blinking, 1=On(RF On)/Off(RF Off)
241 *
242 * We enable the driver to be backward compatible wrt API version. The
243 * driver specifies which APIs it supports (with @ucode_api_max being the
244 * highest and @ucode_api_min the lowest). Firmware will only be loaded if
245 * it has a supported API version. The firmware's API version will be
246 * stored in @iwl_priv, enabling the driver to make runtime changes based
247 * on firmware version used.
248 *
249 * For example,
250 * if (IWL_UCODE_API(priv->ucode_ver) >= 2) {
251 * Driver interacts with Firmware API version >= 2.
252 * } else {
253 * Driver interacts with Firmware API version 1.
254 * }
255 *
256 * The ideal usage of this infrastructure is to treat a new ucode API
257 * release as a new hardware revision. That is, through utilizing the
258 * iwl_hcmd_utils_ops etc. we accommodate different command structures
259 * and flows between hardware versions as well as their API
260 * versions.
261 *
262 */
263struct iwl_cfg {
264 /* params specific to an individual device within a device family */
265 const char *name;
266 const char *fw_name_pre;
267 const unsigned int ucode_api_max;
268 const unsigned int ucode_api_min;
269 u8 valid_tx_ant;
270 u8 valid_rx_ant;
271 unsigned int sku;
272 u16 eeprom_ver;
273 u16 eeprom_calib_ver;
274 const struct iwl_ops *ops;
275 /* module based parameters which can be set from modprobe cmd */
276 const struct iwl_mod_params *mod_params;
277 /* params not likely to change within a device family */
278 struct iwl_base_params *base_params;
279 /* params likely to change within a device family */
280 u8 scan_rx_antennas[IEEE80211_NUM_BANDS];
281 enum iwl_led_mode led_mode;
282};
283
284/***************************
285 * L i b *
286 ***************************/
287
288struct ieee80211_hw *iwl_legacy_alloc_all(struct iwl_cfg *cfg);
289int iwl_legacy_mac_conf_tx(struct ieee80211_hw *hw, u16 queue,
290 const struct ieee80211_tx_queue_params *params);
291int iwl_legacy_mac_tx_last_beacon(struct ieee80211_hw *hw);
292void iwl_legacy_set_rxon_hwcrypto(struct iwl_priv *priv,
293 struct iwl_rxon_context *ctx,
294 int hw_decrypt);
295int iwl_legacy_check_rxon_cmd(struct iwl_priv *priv,
296 struct iwl_rxon_context *ctx);
297int iwl_legacy_full_rxon_required(struct iwl_priv *priv,
298 struct iwl_rxon_context *ctx);
299int iwl_legacy_set_rxon_channel(struct iwl_priv *priv,
300 struct ieee80211_channel *ch,
301 struct iwl_rxon_context *ctx);
302void iwl_legacy_set_flags_for_band(struct iwl_priv *priv,
303 struct iwl_rxon_context *ctx,
304 enum ieee80211_band band,
305 struct ieee80211_vif *vif);
306u8 iwl_legacy_get_single_channel_number(struct iwl_priv *priv,
307 enum ieee80211_band band);
308void iwl_legacy_set_rxon_ht(struct iwl_priv *priv,
309 struct iwl_ht_config *ht_conf);
310bool iwl_legacy_is_ht40_tx_allowed(struct iwl_priv *priv,
311 struct iwl_rxon_context *ctx,
312 struct ieee80211_sta_ht_cap *ht_cap);
313void iwl_legacy_connection_init_rx_config(struct iwl_priv *priv,
314 struct iwl_rxon_context *ctx);
315void iwl_legacy_set_rate(struct iwl_priv *priv);
316int iwl_legacy_set_decrypted_flag(struct iwl_priv *priv,
317 struct ieee80211_hdr *hdr,
318 u32 decrypt_res,
319 struct ieee80211_rx_status *stats);
320void iwl_legacy_irq_handle_error(struct iwl_priv *priv);
321int iwl_legacy_mac_add_interface(struct ieee80211_hw *hw,
322 struct ieee80211_vif *vif);
323void iwl_legacy_mac_remove_interface(struct ieee80211_hw *hw,
324 struct ieee80211_vif *vif);
325int iwl_legacy_mac_change_interface(struct ieee80211_hw *hw,
326 struct ieee80211_vif *vif,
327 enum nl80211_iftype newtype, bool newp2p);
328int iwl_legacy_alloc_txq_mem(struct iwl_priv *priv);
329void iwl_legacy_txq_mem(struct iwl_priv *priv);
330
331#ifdef CONFIG_IWLWIFI_LEGACY_DEBUGFS
332int iwl_legacy_alloc_traffic_mem(struct iwl_priv *priv);
333void iwl_legacy_free_traffic_mem(struct iwl_priv *priv);
334void iwl_legacy_reset_traffic_log(struct iwl_priv *priv);
335void iwl_legacy_dbg_log_tx_data_frame(struct iwl_priv *priv,
336 u16 length, struct ieee80211_hdr *header);
337void iwl_legacy_dbg_log_rx_data_frame(struct iwl_priv *priv,
338 u16 length, struct ieee80211_hdr *header);
339const char *iwl_legacy_get_mgmt_string(int cmd);
340const char *iwl_legacy_get_ctrl_string(int cmd);
341void iwl_legacy_clear_traffic_stats(struct iwl_priv *priv);
342void iwl_legacy_update_stats(struct iwl_priv *priv, bool is_tx, __le16 fc,
343 u16 len);
344#else
345static inline int iwl_legacy_alloc_traffic_mem(struct iwl_priv *priv)
346{
347 return 0;
348}
349static inline void iwl_legacy_free_traffic_mem(struct iwl_priv *priv)
350{
351}
352static inline void iwl_legacy_reset_traffic_log(struct iwl_priv *priv)
353{
354}
355static inline void iwl_legacy_dbg_log_tx_data_frame(struct iwl_priv *priv,
356 u16 length, struct ieee80211_hdr *header)
357{
358}
359static inline void iwl_legacy_dbg_log_rx_data_frame(struct iwl_priv *priv,
360 u16 length, struct ieee80211_hdr *header)
361{
362}
363static inline void iwl_legacy_update_stats(struct iwl_priv *priv, bool is_tx,
364 __le16 fc, u16 len)
365{
366}
367#endif
368/*****************************************************
369 * RX handlers.
370 * **************************************************/
371void iwl_legacy_rx_pm_sleep_notif(struct iwl_priv *priv,
372 struct iwl_rx_mem_buffer *rxb);
373void iwl_legacy_rx_pm_debug_statistics_notif(struct iwl_priv *priv,
374 struct iwl_rx_mem_buffer *rxb);
375void iwl_legacy_rx_reply_error(struct iwl_priv *priv,
376 struct iwl_rx_mem_buffer *rxb);
377
378/*****************************************************
379* RX
380******************************************************/
381void iwl_legacy_cmd_queue_unmap(struct iwl_priv *priv);
382void iwl_legacy_cmd_queue_free(struct iwl_priv *priv);
383int iwl_legacy_rx_queue_alloc(struct iwl_priv *priv);
384void iwl_legacy_rx_queue_update_write_ptr(struct iwl_priv *priv,
385 struct iwl_rx_queue *q);
386int iwl_legacy_rx_queue_space(const struct iwl_rx_queue *q);
387void iwl_legacy_tx_cmd_complete(struct iwl_priv *priv,
388 struct iwl_rx_mem_buffer *rxb);
389/* Handlers */
390void iwl_legacy_rx_spectrum_measure_notif(struct iwl_priv *priv,
391 struct iwl_rx_mem_buffer *rxb);
392void iwl_legacy_recover_from_statistics(struct iwl_priv *priv,
393 struct iwl_rx_packet *pkt);
394void iwl_legacy_chswitch_done(struct iwl_priv *priv, bool is_success);
395void iwl_legacy_rx_csa(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb);
396
397/* TX helpers */
398
399/*****************************************************
400* TX
401******************************************************/
402void iwl_legacy_txq_update_write_ptr(struct iwl_priv *priv,
403 struct iwl_tx_queue *txq);
404int iwl_legacy_tx_queue_init(struct iwl_priv *priv, struct iwl_tx_queue *txq,
405 int slots_num, u32 txq_id);
406void iwl_legacy_tx_queue_reset(struct iwl_priv *priv,
407 struct iwl_tx_queue *txq,
408 int slots_num, u32 txq_id);
409void iwl_legacy_tx_queue_unmap(struct iwl_priv *priv, int txq_id);
410void iwl_legacy_tx_queue_free(struct iwl_priv *priv, int txq_id);
411void iwl_legacy_setup_watchdog(struct iwl_priv *priv);
412/*****************************************************
413 * TX power
414 ****************************************************/
415int iwl_legacy_set_tx_power(struct iwl_priv *priv, s8 tx_power, bool force);
416
417/*******************************************************************************
418 * Rate
419 ******************************************************************************/
420
421u8 iwl_legacy_get_lowest_plcp(struct iwl_priv *priv,
422 struct iwl_rxon_context *ctx);
423
424/*******************************************************************************
425 * Scanning
426 ******************************************************************************/
427void iwl_legacy_init_scan_params(struct iwl_priv *priv);
428int iwl_legacy_scan_cancel(struct iwl_priv *priv);
429int iwl_legacy_scan_cancel_timeout(struct iwl_priv *priv, unsigned long ms);
430void iwl_legacy_force_scan_end(struct iwl_priv *priv);
431int iwl_legacy_mac_hw_scan(struct ieee80211_hw *hw,
432 struct ieee80211_vif *vif,
433 struct cfg80211_scan_request *req);
434void iwl_legacy_internal_short_hw_scan(struct iwl_priv *priv);
435int iwl_legacy_force_reset(struct iwl_priv *priv, bool external);
436u16 iwl_legacy_fill_probe_req(struct iwl_priv *priv,
437 struct ieee80211_mgmt *frame,
438 const u8 *ta, const u8 *ie, int ie_len, int left);
439void iwl_legacy_setup_rx_scan_handlers(struct iwl_priv *priv);
440u16 iwl_legacy_get_active_dwell_time(struct iwl_priv *priv,
441 enum ieee80211_band band,
442 u8 n_probes);
443u16 iwl_legacy_get_passive_dwell_time(struct iwl_priv *priv,
444 enum ieee80211_band band,
445 struct ieee80211_vif *vif);
446void iwl_legacy_setup_scan_deferred_work(struct iwl_priv *priv);
447void iwl_legacy_cancel_scan_deferred_work(struct iwl_priv *priv);
448
449/* For faster active scanning, scan will move to the next channel if fewer than
450 * PLCP_QUIET_THRESH packets are heard on this channel within
451 * ACTIVE_QUIET_TIME after sending probe request. This shortens the dwell
452 * time if it's a quiet channel (nothing responded to our probe, and there's
453 * no other traffic).
454 * Disable "quiet" feature by setting PLCP_QUIET_THRESH to 0. */
455#define IWL_ACTIVE_QUIET_TIME cpu_to_le16(10) /* msec */
456#define IWL_PLCP_QUIET_THRESH cpu_to_le16(1) /* packets */
457
458#define IWL_SCAN_CHECK_WATCHDOG (HZ * 7)
459
460/*****************************************************
461 * S e n d i n g H o s t C o m m a n d s *
462 *****************************************************/
463
464const char *iwl_legacy_get_cmd_string(u8 cmd);
465int __must_check iwl_legacy_send_cmd_sync(struct iwl_priv *priv,
466 struct iwl_host_cmd *cmd);
467int iwl_legacy_send_cmd(struct iwl_priv *priv, struct iwl_host_cmd *cmd);
468int __must_check iwl_legacy_send_cmd_pdu(struct iwl_priv *priv, u8 id,
469 u16 len, const void *data);
470int iwl_legacy_send_cmd_pdu_async(struct iwl_priv *priv, u8 id, u16 len,
471 const void *data,
472 void (*callback)(struct iwl_priv *priv,
473 struct iwl_device_cmd *cmd,
474 struct iwl_rx_packet *pkt));
475
476int iwl_legacy_enqueue_hcmd(struct iwl_priv *priv, struct iwl_host_cmd *cmd);
477
478
479/*****************************************************
480 * PCI *
481 *****************************************************/
482
483static inline u16 iwl_legacy_pcie_link_ctl(struct iwl_priv *priv)
484{
485 int pos;
486 u16 pci_lnk_ctl;
487 pos = pci_pcie_cap(priv->pci_dev);
488 pci_read_config_word(priv->pci_dev, pos + PCI_EXP_LNKCTL, &pci_lnk_ctl);
489 return pci_lnk_ctl;
490}
491
492void iwl_legacy_bg_watchdog(unsigned long data);
493u32 iwl_legacy_usecs_to_beacons(struct iwl_priv *priv,
494 u32 usec, u32 beacon_interval);
495__le32 iwl_legacy_add_beacon_time(struct iwl_priv *priv, u32 base,
496 u32 addon, u32 beacon_interval);
497
498#ifdef CONFIG_PM
499int iwl_legacy_pci_suspend(struct device *device);
500int iwl_legacy_pci_resume(struct device *device);
501extern const struct dev_pm_ops iwl_legacy_pm_ops;
502
503#define IWL_LEGACY_PM_OPS (&iwl_legacy_pm_ops)
504
505#else /* !CONFIG_PM */
506
507#define IWL_LEGACY_PM_OPS NULL
508
509#endif /* !CONFIG_PM */
510
511/*****************************************************
512* Error Handling Debugging
513******************************************************/
514void iwl4965_dump_nic_error_log(struct iwl_priv *priv);
515#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
516void iwl_legacy_print_rx_config_cmd(struct iwl_priv *priv,
517 struct iwl_rxon_context *ctx);
518#else
519static inline void iwl_legacy_print_rx_config_cmd(struct iwl_priv *priv,
520 struct iwl_rxon_context *ctx)
521{
522}
523#endif
524
525void iwl_legacy_clear_isr_stats(struct iwl_priv *priv);
526
527/*****************************************************
528* GEOS
529******************************************************/
530int iwl_legacy_init_geos(struct iwl_priv *priv);
531void iwl_legacy_free_geos(struct iwl_priv *priv);
532
533/*************** DRIVER STATUS FUNCTIONS *****/
534
535#define STATUS_HCMD_ACTIVE 0 /* host command in progress */
536/* 1 is unused (used to be STATUS_HCMD_SYNC_ACTIVE) */
537#define STATUS_INT_ENABLED 2
538#define STATUS_RF_KILL_HW 3
539#define STATUS_CT_KILL 4
540#define STATUS_INIT 5
541#define STATUS_ALIVE 6
542#define STATUS_READY 7
543#define STATUS_TEMPERATURE 8
544#define STATUS_GEO_CONFIGURED 9
545#define STATUS_EXIT_PENDING 10
546#define STATUS_STATISTICS 12
547#define STATUS_SCANNING 13
548#define STATUS_SCAN_ABORTING 14
549#define STATUS_SCAN_HW 15
550#define STATUS_POWER_PMI 16
551#define STATUS_FW_ERROR 17
552#define STATUS_CHANNEL_SWITCH_PENDING 18
553
554static inline int iwl_legacy_is_ready(struct iwl_priv *priv)
555{
556 /* The adapter is 'ready' if READY and GEO_CONFIGURED bits are
557 * set but EXIT_PENDING is not */
558 return test_bit(STATUS_READY, &priv->status) &&
559 test_bit(STATUS_GEO_CONFIGURED, &priv->status) &&
560 !test_bit(STATUS_EXIT_PENDING, &priv->status);
561}
562
563static inline int iwl_legacy_is_alive(struct iwl_priv *priv)
564{
565 return test_bit(STATUS_ALIVE, &priv->status);
566}
567
568static inline int iwl_legacy_is_init(struct iwl_priv *priv)
569{
570 return test_bit(STATUS_INIT, &priv->status);
571}
572
573static inline int iwl_legacy_is_rfkill_hw(struct iwl_priv *priv)
574{
575 return test_bit(STATUS_RF_KILL_HW, &priv->status);
576}
577
578static inline int iwl_legacy_is_rfkill(struct iwl_priv *priv)
579{
580 return iwl_legacy_is_rfkill_hw(priv);
581}
582
583static inline int iwl_legacy_is_ctkill(struct iwl_priv *priv)
584{
585 return test_bit(STATUS_CT_KILL, &priv->status);
586}
587
588static inline int iwl_legacy_is_ready_rf(struct iwl_priv *priv)
589{
590
591 if (iwl_legacy_is_rfkill(priv))
592 return 0;
593
594 return iwl_legacy_is_ready(priv);
595}
596
597extern void iwl_legacy_send_bt_config(struct iwl_priv *priv);
598extern int iwl_legacy_send_statistics_request(struct iwl_priv *priv,
599 u8 flags, bool clear);
600void iwl_legacy_apm_stop(struct iwl_priv *priv);
601int iwl_legacy_apm_init(struct iwl_priv *priv);
602
603int iwl_legacy_send_rxon_timing(struct iwl_priv *priv,
604 struct iwl_rxon_context *ctx);
605static inline int iwl_legacy_send_rxon_assoc(struct iwl_priv *priv,
606 struct iwl_rxon_context *ctx)
607{
608 return priv->cfg->ops->hcmd->rxon_assoc(priv, ctx);
609}
610static inline int iwl_legacy_commit_rxon(struct iwl_priv *priv,
611 struct iwl_rxon_context *ctx)
612{
613 return priv->cfg->ops->hcmd->commit_rxon(priv, ctx);
614}
615static inline const struct ieee80211_supported_band *iwl_get_hw_mode(
616 struct iwl_priv *priv, enum ieee80211_band band)
617{
618 return priv->hw->wiphy->bands[band];
619}
620
621/* mac80211 handlers */
622int iwl_legacy_mac_config(struct ieee80211_hw *hw, u32 changed);
623void iwl_legacy_mac_reset_tsf(struct ieee80211_hw *hw);
624void iwl_legacy_mac_bss_info_changed(struct ieee80211_hw *hw,
625 struct ieee80211_vif *vif,
626 struct ieee80211_bss_conf *bss_conf,
627 u32 changes);
628void iwl_legacy_tx_cmd_protection(struct iwl_priv *priv,
629 struct ieee80211_tx_info *info,
630 __le16 fc, __le32 *tx_flags);
631
632irqreturn_t iwl_legacy_isr(int irq, void *data);
633
634#endif /* __iwl_legacy_core_h__ */
diff --git a/drivers/net/wireless/iwlegacy/iwl-csr.h b/drivers/net/wireless/iwlegacy/iwl-csr.h
new file mode 100644
index 00000000000..668a9616c26
--- /dev/null
+++ b/drivers/net/wireless/iwlegacy/iwl-csr.h
@@ -0,0 +1,422 @@
1/******************************************************************************
2 *
3 * This file is provided under a dual BSD/GPLv2 license. When using or
4 * redistributing this file, you may do so under either license.
5 *
6 * GPL LICENSE SUMMARY
7 *
8 * Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved.
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of version 2 of the GNU General Public License as
12 * published by the Free Software Foundation.
13 *
14 * This program is distributed in the hope that it will be useful, but
15 * WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
22 * USA
23 *
24 * The full GNU General Public License is included in this distribution
25 * in the file called LICENSE.GPL.
26 *
27 * Contact Information:
28 * Intel Linux Wireless <ilw@linux.intel.com>
29 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
30 *
31 * BSD LICENSE
32 *
33 * Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved.
34 * All rights reserved.
35 *
36 * Redistribution and use in source and binary forms, with or without
37 * modification, are permitted provided that the following conditions
38 * are met:
39 *
40 * * Redistributions of source code must retain the above copyright
41 * notice, this list of conditions and the following disclaimer.
42 * * Redistributions in binary form must reproduce the above copyright
43 * notice, this list of conditions and the following disclaimer in
44 * the documentation and/or other materials provided with the
45 * distribution.
46 * * Neither the name Intel Corporation nor the names of its
47 * contributors may be used to endorse or promote products derived
48 * from this software without specific prior written permission.
49 *
50 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
51 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
52 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
53 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
54 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
55 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
56 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
57 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
58 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
59 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
60 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
61 *
62 *****************************************************************************/
63#ifndef __iwl_legacy_csr_h__
64#define __iwl_legacy_csr_h__
65/*
66 * CSR (control and status registers)
67 *
68 * CSR registers are mapped directly into PCI bus space, and are accessible
69 * whenever platform supplies power to device, even when device is in
70 * low power states due to driver-invoked device resets
71 * (e.g. CSR_RESET_REG_FLAG_SW_RESET) or uCode-driven power-saving modes.
72 *
73 * Use iwl_write32() and iwl_read32() family to access these registers;
74 * these provide simple PCI bus access, without waking up the MAC.
75 * Do not use iwl_legacy_write_direct32() family for these registers;
76 * no need to "grab nic access" via CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ.
77 * The MAC (uCode processor, etc.) does not need to be powered up for accessing
78 * the CSR registers.
79 *
80 * NOTE: Device does need to be awake in order to read this memory
81 * via CSR_EEPROM register
82 */
83#define CSR_BASE (0x000)
84
85#define CSR_HW_IF_CONFIG_REG (CSR_BASE+0x000) /* hardware interface config */
86#define CSR_INT_COALESCING (CSR_BASE+0x004) /* accum ints, 32-usec units */
87#define CSR_INT (CSR_BASE+0x008) /* host interrupt status/ack */
88#define CSR_INT_MASK (CSR_BASE+0x00c) /* host interrupt enable */
89#define CSR_FH_INT_STATUS (CSR_BASE+0x010) /* busmaster int status/ack*/
90#define CSR_GPIO_IN (CSR_BASE+0x018) /* read external chip pins */
91#define CSR_RESET (CSR_BASE+0x020) /* busmaster enable, NMI, etc*/
92#define CSR_GP_CNTRL (CSR_BASE+0x024)
93
94/* 2nd byte of CSR_INT_COALESCING, not accessible via iwl_write32()! */
95#define CSR_INT_PERIODIC_REG (CSR_BASE+0x005)
96
97/*
98 * Hardware revision info
99 * Bit fields:
100 * 31-8: Reserved
101 * 7-4: Type of device: see CSR_HW_REV_TYPE_xxx definitions
102 * 3-2: Revision step: 0 = A, 1 = B, 2 = C, 3 = D
103 * 1-0: "Dash" (-) value, as in A-1, etc.
104 *
105 * NOTE: Revision step affects calculation of CCK txpower for 4965.
106 * NOTE: See also CSR_HW_REV_WA_REG (work-around for bug in 4965).
107 */
108#define CSR_HW_REV (CSR_BASE+0x028)
109
110/*
111 * EEPROM memory reads
112 *
113 * NOTE: Device must be awake, initialized via apm_ops.init(),
114 * in order to read.
115 */
116#define CSR_EEPROM_REG (CSR_BASE+0x02c)
117#define CSR_EEPROM_GP (CSR_BASE+0x030)
118
119#define CSR_GIO_REG (CSR_BASE+0x03C)
120#define CSR_GP_UCODE_REG (CSR_BASE+0x048)
121#define CSR_GP_DRIVER_REG (CSR_BASE+0x050)
122
123/*
124 * UCODE-DRIVER GP (general purpose) mailbox registers.
125 * SET/CLR registers set/clear bit(s) if "1" is written.
126 */
127#define CSR_UCODE_DRV_GP1 (CSR_BASE+0x054)
128#define CSR_UCODE_DRV_GP1_SET (CSR_BASE+0x058)
129#define CSR_UCODE_DRV_GP1_CLR (CSR_BASE+0x05c)
130#define CSR_UCODE_DRV_GP2 (CSR_BASE+0x060)
131
132#define CSR_LED_REG (CSR_BASE+0x094)
133#define CSR_DRAM_INT_TBL_REG (CSR_BASE+0x0A0)
134
135/* GIO Chicken Bits (PCI Express bus link power management) */
136#define CSR_GIO_CHICKEN_BITS (CSR_BASE+0x100)
137
138/* Analog phase-lock-loop configuration */
139#define CSR_ANA_PLL_CFG (CSR_BASE+0x20c)
140
141/*
142 * CSR Hardware Revision Workaround Register. Indicates hardware rev;
143 * "step" determines CCK backoff for txpower calculation. Used for 4965 only.
144 * See also CSR_HW_REV register.
145 * Bit fields:
146 * 3-2: 0 = A, 1 = B, 2 = C, 3 = D step
147 * 1-0: "Dash" (-) value, as in C-1, etc.
148 */
149#define CSR_HW_REV_WA_REG (CSR_BASE+0x22C)
150
151#define CSR_DBG_HPET_MEM_REG (CSR_BASE+0x240)
152#define CSR_DBG_LINK_PWR_MGMT_REG (CSR_BASE+0x250)
153
154/* Bits for CSR_HW_IF_CONFIG_REG */
155#define CSR49_HW_IF_CONFIG_REG_BIT_4965_R (0x00000010)
156#define CSR_HW_IF_CONFIG_REG_MSK_BOARD_VER (0x00000C00)
157#define CSR_HW_IF_CONFIG_REG_BIT_MAC_SI (0x00000100)
158#define CSR_HW_IF_CONFIG_REG_BIT_RADIO_SI (0x00000200)
159
160#define CSR39_HW_IF_CONFIG_REG_BIT_3945_MB (0x00000100)
161#define CSR39_HW_IF_CONFIG_REG_BIT_3945_MM (0x00000200)
162#define CSR39_HW_IF_CONFIG_REG_BIT_SKU_MRC (0x00000400)
163#define CSR39_HW_IF_CONFIG_REG_BIT_BOARD_TYPE (0x00000800)
164#define CSR39_HW_IF_CONFIG_REG_BITS_SILICON_TYPE_A (0x00000000)
165#define CSR39_HW_IF_CONFIG_REG_BITS_SILICON_TYPE_B (0x00001000)
166
167#define CSR_HW_IF_CONFIG_REG_BIT_HAP_WAKE_L1A (0x00080000)
168#define CSR_HW_IF_CONFIG_REG_BIT_EEPROM_OWN_SEM (0x00200000)
169#define CSR_HW_IF_CONFIG_REG_BIT_NIC_READY (0x00400000) /* PCI_OWN_SEM */
170#define CSR_HW_IF_CONFIG_REG_BIT_NIC_PREPARE_DONE (0x02000000) /* ME_OWN */
171#define CSR_HW_IF_CONFIG_REG_PREPARE (0x08000000) /* WAKE_ME */
172
173#define CSR_INT_PERIODIC_DIS (0x00) /* disable periodic int*/
174#define CSR_INT_PERIODIC_ENA (0xFF) /* 255*32 usec ~ 8 msec*/
175
176/* interrupt flags in INTA, set by uCode or hardware (e.g. dma),
177 * acknowledged (reset) by host writing "1" to flagged bits. */
178#define CSR_INT_BIT_FH_RX (1 << 31) /* Rx DMA, cmd responses, FH_INT[17:16] */
179#define CSR_INT_BIT_HW_ERR (1 << 29) /* DMA hardware error FH_INT[31] */
180#define CSR_INT_BIT_RX_PERIODIC (1 << 28) /* Rx periodic */
181#define CSR_INT_BIT_FH_TX (1 << 27) /* Tx DMA FH_INT[1:0] */
182#define CSR_INT_BIT_SCD (1 << 26) /* TXQ pointer advanced */
183#define CSR_INT_BIT_SW_ERR (1 << 25) /* uCode error */
184#define CSR_INT_BIT_RF_KILL (1 << 7) /* HW RFKILL switch GP_CNTRL[27] toggled */
185#define CSR_INT_BIT_CT_KILL (1 << 6) /* Critical temp (chip too hot) rfkill */
186#define CSR_INT_BIT_SW_RX (1 << 3) /* Rx, command responses, 3945 */
187#define CSR_INT_BIT_WAKEUP (1 << 1) /* NIC controller waking up (pwr mgmt) */
188#define CSR_INT_BIT_ALIVE (1 << 0) /* uCode interrupts once it initializes */
189
190#define CSR_INI_SET_MASK (CSR_INT_BIT_FH_RX | \
191 CSR_INT_BIT_HW_ERR | \
192 CSR_INT_BIT_FH_TX | \
193 CSR_INT_BIT_SW_ERR | \
194 CSR_INT_BIT_RF_KILL | \
195 CSR_INT_BIT_SW_RX | \
196 CSR_INT_BIT_WAKEUP | \
197 CSR_INT_BIT_ALIVE)
198
199/* interrupt flags in FH (flow handler) (PCI busmaster DMA) */
200#define CSR_FH_INT_BIT_ERR (1 << 31) /* Error */
201#define CSR_FH_INT_BIT_HI_PRIOR (1 << 30) /* High priority Rx, bypass coalescing */
202#define CSR39_FH_INT_BIT_RX_CHNL2 (1 << 18) /* Rx channel 2 (3945 only) */
203#define CSR_FH_INT_BIT_RX_CHNL1 (1 << 17) /* Rx channel 1 */
204#define CSR_FH_INT_BIT_RX_CHNL0 (1 << 16) /* Rx channel 0 */
205#define CSR39_FH_INT_BIT_TX_CHNL6 (1 << 6) /* Tx channel 6 (3945 only) */
206#define CSR_FH_INT_BIT_TX_CHNL1 (1 << 1) /* Tx channel 1 */
207#define CSR_FH_INT_BIT_TX_CHNL0 (1 << 0) /* Tx channel 0 */
208
209#define CSR39_FH_INT_RX_MASK (CSR_FH_INT_BIT_HI_PRIOR | \
210 CSR39_FH_INT_BIT_RX_CHNL2 | \
211 CSR_FH_INT_BIT_RX_CHNL1 | \
212 CSR_FH_INT_BIT_RX_CHNL0)
213
214
215#define CSR39_FH_INT_TX_MASK (CSR39_FH_INT_BIT_TX_CHNL6 | \
216 CSR_FH_INT_BIT_TX_CHNL1 | \
217 CSR_FH_INT_BIT_TX_CHNL0)
218
219#define CSR49_FH_INT_RX_MASK (CSR_FH_INT_BIT_HI_PRIOR | \
220 CSR_FH_INT_BIT_RX_CHNL1 | \
221 CSR_FH_INT_BIT_RX_CHNL0)
222
223#define CSR49_FH_INT_TX_MASK (CSR_FH_INT_BIT_TX_CHNL1 | \
224 CSR_FH_INT_BIT_TX_CHNL0)
225
226/* GPIO */
227#define CSR_GPIO_IN_BIT_AUX_POWER (0x00000200)
228#define CSR_GPIO_IN_VAL_VAUX_PWR_SRC (0x00000000)
229#define CSR_GPIO_IN_VAL_VMAIN_PWR_SRC (0x00000200)
230
231/* RESET */
232#define CSR_RESET_REG_FLAG_NEVO_RESET (0x00000001)
233#define CSR_RESET_REG_FLAG_FORCE_NMI (0x00000002)
234#define CSR_RESET_REG_FLAG_SW_RESET (0x00000080)
235#define CSR_RESET_REG_FLAG_MASTER_DISABLED (0x00000100)
236#define CSR_RESET_REG_FLAG_STOP_MASTER (0x00000200)
237#define CSR_RESET_LINK_PWR_MGMT_DISABLED (0x80000000)
238
239/*
240 * GP (general purpose) CONTROL REGISTER
241 * Bit fields:
242 * 27: HW_RF_KILL_SW
243 * Indicates state of (platform's) hardware RF-Kill switch
244 * 26-24: POWER_SAVE_TYPE
245 * Indicates current power-saving mode:
246 * 000 -- No power saving
247 * 001 -- MAC power-down
248 * 010 -- PHY (radio) power-down
249 * 011 -- Error
250 * 9-6: SYS_CONFIG
251 * Indicates current system configuration, reflecting pins on chip
252 * as forced high/low by device circuit board.
253 * 4: GOING_TO_SLEEP
254 * Indicates MAC is entering a power-saving sleep power-down.
255 * Not a good time to access device-internal resources.
256 * 3: MAC_ACCESS_REQ
257 * Host sets this to request and maintain MAC wakeup, to allow host
258 * access to device-internal resources. Host must wait for
259 * MAC_CLOCK_READY (and !GOING_TO_SLEEP) before accessing non-CSR
260 * device registers.
261 * 2: INIT_DONE
262 * Host sets this to put device into fully operational D0 power mode.
263 * Host resets this after SW_RESET to put device into low power mode.
264 * 0: MAC_CLOCK_READY
265 * Indicates MAC (ucode processor, etc.) is powered up and can run.
266 * Internal resources are accessible.
267 * NOTE: This does not indicate that the processor is actually running.
268 * NOTE: This does not indicate that 4965 or 3945 has completed
269 * init or post-power-down restore of internal SRAM memory.
270 * Use CSR_UCODE_DRV_GP1_BIT_MAC_SLEEP as indication that
271 * SRAM is restored and uCode is in normal operation mode.
272 * Later devices (5xxx/6xxx/1xxx) use non-volatile SRAM, and
273 * do not need to save/restore it.
274 * NOTE: After device reset, this bit remains "0" until host sets
275 * INIT_DONE
276 */
277#define CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY (0x00000001)
278#define CSR_GP_CNTRL_REG_FLAG_INIT_DONE (0x00000004)
279#define CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ (0x00000008)
280#define CSR_GP_CNTRL_REG_FLAG_GOING_TO_SLEEP (0x00000010)
281
282#define CSR_GP_CNTRL_REG_VAL_MAC_ACCESS_EN (0x00000001)
283
284#define CSR_GP_CNTRL_REG_MSK_POWER_SAVE_TYPE (0x07000000)
285#define CSR_GP_CNTRL_REG_FLAG_MAC_POWER_SAVE (0x04000000)
286#define CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW (0x08000000)
287
288
289/* EEPROM REG */
290#define CSR_EEPROM_REG_READ_VALID_MSK (0x00000001)
291#define CSR_EEPROM_REG_BIT_CMD (0x00000002)
292#define CSR_EEPROM_REG_MSK_ADDR (0x0000FFFC)
293#define CSR_EEPROM_REG_MSK_DATA (0xFFFF0000)
294
295/* EEPROM GP */
296#define CSR_EEPROM_GP_VALID_MSK (0x00000007) /* signature */
297#define CSR_EEPROM_GP_IF_OWNER_MSK (0x00000180)
298#define CSR_EEPROM_GP_GOOD_SIG_EEP_LESS_THAN_4K (0x00000002)
299#define CSR_EEPROM_GP_GOOD_SIG_EEP_MORE_THAN_4K (0x00000004)
300
301/* GP REG */
302#define CSR_GP_REG_POWER_SAVE_STATUS_MSK (0x03000000) /* bit 24/25 */
303#define CSR_GP_REG_NO_POWER_SAVE (0x00000000)
304#define CSR_GP_REG_MAC_POWER_SAVE (0x01000000)
305#define CSR_GP_REG_PHY_POWER_SAVE (0x02000000)
306#define CSR_GP_REG_POWER_SAVE_ERROR (0x03000000)
307
308
309/* CSR GIO */
310#define CSR_GIO_REG_VAL_L0S_ENABLED (0x00000002)
311
312/*
313 * UCODE-DRIVER GP (general purpose) mailbox register 1
314 * Host driver and uCode write and/or read this register to communicate with
315 * each other.
316 * Bit fields:
317 * 4: UCODE_DISABLE
318 * Host sets this to request permanent halt of uCode, same as
319 * sending CARD_STATE command with "halt" bit set.
320 * 3: CT_KILL_EXIT
321 * Host sets this to request exit from CT_KILL state, i.e. host thinks
322 * device temperature is low enough to continue normal operation.
323 * 2: CMD_BLOCKED
324 * Host sets this during RF KILL power-down sequence (HW, SW, CT KILL)
325 * to release uCode to clear all Tx and command queues, enter
326 * unassociated mode, and power down.
327 * NOTE: Some devices also use HBUS_TARG_MBX_C register for this bit.
328 * 1: SW_BIT_RFKILL
329 * Host sets this when issuing CARD_STATE command to request
330 * device sleep.
331 * 0: MAC_SLEEP
332 * uCode sets this when preparing a power-saving power-down.
333 * uCode resets this when power-up is complete and SRAM is sane.
334 * NOTE: 3945/4965 saves internal SRAM data to host when powering down,
335 * and must restore this data after powering back up.
336 * MAC_SLEEP is the best indication that restore is complete.
337 * Later devices (5xxx/6xxx/1xxx) use non-volatile SRAM, and
338 * do not need to save/restore it.
339 */
340#define CSR_UCODE_DRV_GP1_BIT_MAC_SLEEP (0x00000001)
341#define CSR_UCODE_SW_BIT_RFKILL (0x00000002)
342#define CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED (0x00000004)
343#define CSR_UCODE_DRV_GP1_REG_BIT_CT_KILL_EXIT (0x00000008)
344
345/* GIO Chicken Bits (PCI Express bus link power management) */
346#define CSR_GIO_CHICKEN_BITS_REG_BIT_L1A_NO_L0S_RX (0x00800000)
347#define CSR_GIO_CHICKEN_BITS_REG_BIT_DIS_L0S_EXIT_TIMER (0x20000000)
348
349/* LED */
350#define CSR_LED_BSM_CTRL_MSK (0xFFFFFFDF)
351#define CSR_LED_REG_TRUN_ON (0x78)
352#define CSR_LED_REG_TRUN_OFF (0x38)
353
354/* ANA_PLL */
355#define CSR39_ANA_PLL_CFG_VAL (0x01000000)
356
357/* HPET MEM debug */
358#define CSR_DBG_HPET_MEM_REG_VAL (0xFFFF0000)
359
360/* DRAM INT TABLE */
361#define CSR_DRAM_INT_TBL_ENABLE (1 << 31)
362#define CSR_DRAM_INIT_TBL_WRAP_CHECK (1 << 27)
363
364/*
365 * HBUS (Host-side Bus)
366 *
367 * HBUS registers are mapped directly into PCI bus space, but are used
368 * to indirectly access device's internal memory or registers that
369 * may be powered-down.
370 *
371 * Use iwl_legacy_write_direct32()/iwl_legacy_read_direct32() family
372 * for these registers;
373 * host must "grab nic access" via CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ
374 * to make sure the MAC (uCode processor, etc.) is powered up for accessing
375 * internal resources.
376 *
377 * Do not use iwl_write32()/iwl_read32() family to access these registers;
378 * these provide only simple PCI bus access, without waking up the MAC.
379 */
380#define HBUS_BASE (0x400)
381
382/*
383 * Registers for accessing device's internal SRAM memory (e.g. SCD SRAM
384 * structures, error log, event log, verifying uCode load).
385 * First write to address register, then read from or write to data register
386 * to complete the job. Once the address register is set up, accesses to
387 * data registers auto-increment the address by one dword.
388 * Bit usage for address registers (read or write):
389 * 0-31: memory address within device
390 */
391#define HBUS_TARG_MEM_RADDR (HBUS_BASE+0x00c)
392#define HBUS_TARG_MEM_WADDR (HBUS_BASE+0x010)
393#define HBUS_TARG_MEM_WDAT (HBUS_BASE+0x018)
394#define HBUS_TARG_MEM_RDAT (HBUS_BASE+0x01c)
395
396/* Mailbox C, used as workaround alternative to CSR_UCODE_DRV_GP1 mailbox */
397#define HBUS_TARG_MBX_C (HBUS_BASE+0x030)
398#define HBUS_TARG_MBX_C_REG_BIT_CMD_BLOCKED (0x00000004)
399
400/*
401 * Registers for accessing device's internal peripheral registers
402 * (e.g. SCD, BSM, etc.). First write to address register,
403 * then read from or write to data register to complete the job.
404 * Bit usage for address registers (read or write):
405 * 0-15: register address (offset) within device
406 * 24-25: (# bytes - 1) to read or write (e.g. 3 for dword)
407 */
408#define HBUS_TARG_PRPH_WADDR (HBUS_BASE+0x044)
409#define HBUS_TARG_PRPH_RADDR (HBUS_BASE+0x048)
410#define HBUS_TARG_PRPH_WDAT (HBUS_BASE+0x04c)
411#define HBUS_TARG_PRPH_RDAT (HBUS_BASE+0x050)
412
413/*
414 * Per-Tx-queue write pointer (index, really!)
415 * Indicates index to next TFD that driver will fill (1 past latest filled).
416 * Bit usage:
417 * 0-7: queue write index
418 * 11-8: queue selector
419 */
420#define HBUS_TARG_WRPTR (HBUS_BASE+0x060)
421
422#endif /* !__iwl_legacy_csr_h__ */
diff --git a/drivers/net/wireless/iwlegacy/iwl-debug.h b/drivers/net/wireless/iwlegacy/iwl-debug.h
new file mode 100644
index 00000000000..ae13112701b
--- /dev/null
+++ b/drivers/net/wireless/iwlegacy/iwl-debug.h
@@ -0,0 +1,198 @@
1/******************************************************************************
2 *
3 * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved.
4 *
5 * Portions of this file are derived from the ipw3945 project.
6 *
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms of version 2 of the GNU General Public License as
9 * published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful, but WITHOUT
12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 * more details.
15 *
16 * You should have received a copy of the GNU General Public License along with
17 * this program; if not, write to the Free Software Foundation, Inc.,
18 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
19 *
20 * The full GNU General Public License is included in this distribution in the
21 * file called LICENSE.
22 *
23 * Contact Information:
24 * Intel Linux Wireless <ilw@linux.intel.com>
25 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
26 *
27 *****************************************************************************/
28
29#ifndef __iwl_legacy_debug_h__
30#define __iwl_legacy_debug_h__
31
32struct iwl_priv;
33extern u32 iwlegacy_debug_level;
34
35#define IWL_ERR(p, f, a...) dev_err(&((p)->pci_dev->dev), f, ## a)
36#define IWL_WARN(p, f, a...) dev_warn(&((p)->pci_dev->dev), f, ## a)
37#define IWL_INFO(p, f, a...) dev_info(&((p)->pci_dev->dev), f, ## a)
38#define IWL_CRIT(p, f, a...) dev_crit(&((p)->pci_dev->dev), f, ## a)
39
40#define iwl_print_hex_error(priv, p, len) \
41do { \
42 print_hex_dump(KERN_ERR, "iwl data: ", \
43 DUMP_PREFIX_OFFSET, 16, 1, p, len, 1); \
44} while (0)
45
46#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
47#define IWL_DEBUG(__priv, level, fmt, args...) \
48do { \
49 if (iwl_legacy_get_debug_level(__priv) & (level)) \
50 dev_printk(KERN_ERR, &(__priv->hw->wiphy->dev), \
51 "%c %s " fmt, in_interrupt() ? 'I' : 'U', \
52 __func__ , ## args); \
53} while (0)
54
55#define IWL_DEBUG_LIMIT(__priv, level, fmt, args...) \
56do { \
57 if ((iwl_legacy_get_debug_level(__priv) & (level)) && net_ratelimit()) \
58 dev_printk(KERN_ERR, &(__priv->hw->wiphy->dev), \
59 "%c %s " fmt, in_interrupt() ? 'I' : 'U', \
60 __func__ , ## args); \
61} while (0)
62
63#define iwl_print_hex_dump(priv, level, p, len) \
64do { \
65 if (iwl_legacy_get_debug_level(priv) & level) \
66 print_hex_dump(KERN_DEBUG, "iwl data: ", \
67 DUMP_PREFIX_OFFSET, 16, 1, p, len, 1); \
68} while (0)
69
70#else
71#define IWL_DEBUG(__priv, level, fmt, args...)
72#define IWL_DEBUG_LIMIT(__priv, level, fmt, args...)
73static inline void iwl_print_hex_dump(struct iwl_priv *priv, int level,
74 const void *p, u32 len)
75{}
76#endif /* CONFIG_IWLWIFI_LEGACY_DEBUG */
77
78#ifdef CONFIG_IWLWIFI_LEGACY_DEBUGFS
79int iwl_legacy_dbgfs_register(struct iwl_priv *priv, const char *name);
80void iwl_legacy_dbgfs_unregister(struct iwl_priv *priv);
81#else
82static inline int
83iwl_legacy_dbgfs_register(struct iwl_priv *priv, const char *name)
84{
85 return 0;
86}
87static inline void iwl_legacy_dbgfs_unregister(struct iwl_priv *priv)
88{
89}
90#endif /* CONFIG_IWLWIFI_LEGACY_DEBUGFS */
91
92/*
93 * To use the debug system:
94 *
95 * If you are defining a new debug classification, simply add it to the #define
96 * list here in the form of
97 *
98 * #define IWL_DL_xxxx VALUE
99 *
100 * where xxxx should be the name of the classification (for example, WEP).
101 *
102 * You then need to either add a IWL_xxxx_DEBUG() macro definition for your
103 * classification, or use IWL_DEBUG(IWL_DL_xxxx, ...) whenever you want
104 * to send output to that classification.
105 *
106 * The active debug levels can be accessed via files
107 *
108 * /sys/module/iwl4965/parameters/debug{50}
109 * /sys/module/iwl3945/parameters/debug
110 * /sys/class/net/wlan0/device/debug_level
111 *
112 * when CONFIG_IWLWIFI_LEGACY_DEBUG=y.
113 */
114
115/* 0x0000000F - 0x00000001 */
116#define IWL_DL_INFO (1 << 0)
117#define IWL_DL_MAC80211 (1 << 1)
118#define IWL_DL_HCMD (1 << 2)
119#define IWL_DL_STATE (1 << 3)
120/* 0x000000F0 - 0x00000010 */
121#define IWL_DL_MACDUMP (1 << 4)
122#define IWL_DL_HCMD_DUMP (1 << 5)
123#define IWL_DL_EEPROM (1 << 6)
124#define IWL_DL_RADIO (1 << 7)
125/* 0x00000F00 - 0x00000100 */
126#define IWL_DL_POWER (1 << 8)
127#define IWL_DL_TEMP (1 << 9)
128#define IWL_DL_NOTIF (1 << 10)
129#define IWL_DL_SCAN (1 << 11)
130/* 0x0000F000 - 0x00001000 */
131#define IWL_DL_ASSOC (1 << 12)
132#define IWL_DL_DROP (1 << 13)
133#define IWL_DL_TXPOWER (1 << 14)
134#define IWL_DL_AP (1 << 15)
135/* 0x000F0000 - 0x00010000 */
136#define IWL_DL_FW (1 << 16)
137#define IWL_DL_RF_KILL (1 << 17)
138#define IWL_DL_FW_ERRORS (1 << 18)
139#define IWL_DL_LED (1 << 19)
140/* 0x00F00000 - 0x00100000 */
141#define IWL_DL_RATE (1 << 20)
142#define IWL_DL_CALIB (1 << 21)
143#define IWL_DL_WEP (1 << 22)
144#define IWL_DL_TX (1 << 23)
145/* 0x0F000000 - 0x01000000 */
146#define IWL_DL_RX (1 << 24)
147#define IWL_DL_ISR (1 << 25)
148#define IWL_DL_HT (1 << 26)
149#define IWL_DL_IO (1 << 27)
150/* 0xF0000000 - 0x10000000 */
151#define IWL_DL_11H (1 << 28)
152#define IWL_DL_STATS (1 << 29)
153#define IWL_DL_TX_REPLY (1 << 30)
154#define IWL_DL_QOS (1 << 31)
155
156#define IWL_DEBUG_INFO(p, f, a...) IWL_DEBUG(p, IWL_DL_INFO, f, ## a)
157#define IWL_DEBUG_MAC80211(p, f, a...) IWL_DEBUG(p, IWL_DL_MAC80211, f, ## a)
158#define IWL_DEBUG_MACDUMP(p, f, a...) IWL_DEBUG(p, IWL_DL_MACDUMP, f, ## a)
159#define IWL_DEBUG_TEMP(p, f, a...) IWL_DEBUG(p, IWL_DL_TEMP, f, ## a)
160#define IWL_DEBUG_SCAN(p, f, a...) IWL_DEBUG(p, IWL_DL_SCAN, f, ## a)
161#define IWL_DEBUG_RX(p, f, a...) IWL_DEBUG(p, IWL_DL_RX, f, ## a)
162#define IWL_DEBUG_TX(p, f, a...) IWL_DEBUG(p, IWL_DL_TX, f, ## a)
163#define IWL_DEBUG_ISR(p, f, a...) IWL_DEBUG(p, IWL_DL_ISR, f, ## a)
164#define IWL_DEBUG_LED(p, f, a...) IWL_DEBUG(p, IWL_DL_LED, f, ## a)
165#define IWL_DEBUG_WEP(p, f, a...) IWL_DEBUG(p, IWL_DL_WEP, f, ## a)
166#define IWL_DEBUG_HC(p, f, a...) IWL_DEBUG(p, IWL_DL_HCMD, f, ## a)
167#define IWL_DEBUG_HC_DUMP(p, f, a...) IWL_DEBUG(p, IWL_DL_HCMD_DUMP, f, ## a)
168#define IWL_DEBUG_EEPROM(p, f, a...) IWL_DEBUG(p, IWL_DL_EEPROM, f, ## a)
169#define IWL_DEBUG_CALIB(p, f, a...) IWL_DEBUG(p, IWL_DL_CALIB, f, ## a)
170#define IWL_DEBUG_FW(p, f, a...) IWL_DEBUG(p, IWL_DL_FW, f, ## a)
171#define IWL_DEBUG_RF_KILL(p, f, a...) IWL_DEBUG(p, IWL_DL_RF_KILL, f, ## a)
172#define IWL_DEBUG_DROP(p, f, a...) IWL_DEBUG(p, IWL_DL_DROP, f, ## a)
173#define IWL_DEBUG_DROP_LIMIT(p, f, a...) \
174 IWL_DEBUG_LIMIT(p, IWL_DL_DROP, f, ## a)
175#define IWL_DEBUG_AP(p, f, a...) IWL_DEBUG(p, IWL_DL_AP, f, ## a)
176#define IWL_DEBUG_TXPOWER(p, f, a...) IWL_DEBUG(p, IWL_DL_TXPOWER, f, ## a)
177#define IWL_DEBUG_IO(p, f, a...) IWL_DEBUG(p, IWL_DL_IO, f, ## a)
178#define IWL_DEBUG_RATE(p, f, a...) IWL_DEBUG(p, IWL_DL_RATE, f, ## a)
179#define IWL_DEBUG_RATE_LIMIT(p, f, a...) \
180 IWL_DEBUG_LIMIT(p, IWL_DL_RATE, f, ## a)
181#define IWL_DEBUG_NOTIF(p, f, a...) IWL_DEBUG(p, IWL_DL_NOTIF, f, ## a)
182#define IWL_DEBUG_ASSOC(p, f, a...) \
183 IWL_DEBUG(p, IWL_DL_ASSOC | IWL_DL_INFO, f, ## a)
184#define IWL_DEBUG_ASSOC_LIMIT(p, f, a...) \
185 IWL_DEBUG_LIMIT(p, IWL_DL_ASSOC | IWL_DL_INFO, f, ## a)
186#define IWL_DEBUG_HT(p, f, a...) IWL_DEBUG(p, IWL_DL_HT, f, ## a)
187#define IWL_DEBUG_STATS(p, f, a...) IWL_DEBUG(p, IWL_DL_STATS, f, ## a)
188#define IWL_DEBUG_STATS_LIMIT(p, f, a...) \
189 IWL_DEBUG_LIMIT(p, IWL_DL_STATS, f, ## a)
190#define IWL_DEBUG_TX_REPLY(p, f, a...) IWL_DEBUG(p, IWL_DL_TX_REPLY, f, ## a)
191#define IWL_DEBUG_TX_REPLY_LIMIT(p, f, a...) \
192 IWL_DEBUG_LIMIT(p, IWL_DL_TX_REPLY, f, ## a)
193#define IWL_DEBUG_QOS(p, f, a...) IWL_DEBUG(p, IWL_DL_QOS, f, ## a)
194#define IWL_DEBUG_RADIO(p, f, a...) IWL_DEBUG(p, IWL_DL_RADIO, f, ## a)
195#define IWL_DEBUG_POWER(p, f, a...) IWL_DEBUG(p, IWL_DL_POWER, f, ## a)
196#define IWL_DEBUG_11H(p, f, a...) IWL_DEBUG(p, IWL_DL_11H, f, ## a)
197
198#endif
diff --git a/drivers/net/wireless/iwlegacy/iwl-debugfs.c b/drivers/net/wireless/iwlegacy/iwl-debugfs.c
new file mode 100644
index 00000000000..996996a7165
--- /dev/null
+++ b/drivers/net/wireless/iwlegacy/iwl-debugfs.c
@@ -0,0 +1,1313 @@
1/******************************************************************************
2 *
3 * GPL LICENSE SUMMARY
4 *
5 * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of version 2 of the GNU General Public License as
9 * published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful, but
12 * WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
19 * USA
20 *
21 * The full GNU General Public License is included in this distribution
22 * in the file called LICENSE.GPL.
23 *
24 * Contact Information:
25 * Intel Linux Wireless <ilw@linux.intel.com>
26 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
27 *****************************************************************************/
28#include <linux/ieee80211.h>
29#include <net/mac80211.h>
30
31
32#include "iwl-dev.h"
33#include "iwl-debug.h"
34#include "iwl-core.h"
35#include "iwl-io.h"
36
37/* create and remove of files */
38#define DEBUGFS_ADD_FILE(name, parent, mode) do { \
39 if (!debugfs_create_file(#name, mode, parent, priv, \
40 &iwl_legacy_dbgfs_##name##_ops)) \
41 goto err; \
42} while (0)
43
44#define DEBUGFS_ADD_BOOL(name, parent, ptr) do { \
45 struct dentry *__tmp; \
46 __tmp = debugfs_create_bool(#name, S_IWUSR | S_IRUSR, \
47 parent, ptr); \
48 if (IS_ERR(__tmp) || !__tmp) \
49 goto err; \
50} while (0)
51
52#define DEBUGFS_ADD_X32(name, parent, ptr) do { \
53 struct dentry *__tmp; \
54 __tmp = debugfs_create_x32(#name, S_IWUSR | S_IRUSR, \
55 parent, ptr); \
56 if (IS_ERR(__tmp) || !__tmp) \
57 goto err; \
58} while (0)
59
60/* file operation */
61#define DEBUGFS_READ_FUNC(name) \
62static ssize_t iwl_legacy_dbgfs_##name##_read(struct file *file, \
63 char __user *user_buf, \
64 size_t count, loff_t *ppos);
65
66#define DEBUGFS_WRITE_FUNC(name) \
67static ssize_t iwl_legacy_dbgfs_##name##_write(struct file *file, \
68 const char __user *user_buf, \
69 size_t count, loff_t *ppos);
70
71
72static int
73iwl_legacy_dbgfs_open_file_generic(struct inode *inode, struct file *file)
74{
75 file->private_data = inode->i_private;
76 return 0;
77}
78
79#define DEBUGFS_READ_FILE_OPS(name) \
80 DEBUGFS_READ_FUNC(name); \
81static const struct file_operations iwl_legacy_dbgfs_##name##_ops = { \
82 .read = iwl_legacy_dbgfs_##name##_read, \
83 .open = iwl_legacy_dbgfs_open_file_generic, \
84 .llseek = generic_file_llseek, \
85};
86
87#define DEBUGFS_WRITE_FILE_OPS(name) \
88 DEBUGFS_WRITE_FUNC(name); \
89static const struct file_operations iwl_legacy_dbgfs_##name##_ops = { \
90 .write = iwl_legacy_dbgfs_##name##_write, \
91 .open = iwl_legacy_dbgfs_open_file_generic, \
92 .llseek = generic_file_llseek, \
93};
94
95#define DEBUGFS_READ_WRITE_FILE_OPS(name) \
96 DEBUGFS_READ_FUNC(name); \
97 DEBUGFS_WRITE_FUNC(name); \
98static const struct file_operations iwl_legacy_dbgfs_##name##_ops = { \
99 .write = iwl_legacy_dbgfs_##name##_write, \
100 .read = iwl_legacy_dbgfs_##name##_read, \
101 .open = iwl_legacy_dbgfs_open_file_generic, \
102 .llseek = generic_file_llseek, \
103};
104
105static ssize_t iwl_legacy_dbgfs_tx_statistics_read(struct file *file,
106 char __user *user_buf,
107 size_t count, loff_t *ppos) {
108
109 struct iwl_priv *priv = file->private_data;
110 char *buf;
111 int pos = 0;
112
113 int cnt;
114 ssize_t ret;
115 const size_t bufsz = 100 +
116 sizeof(char) * 50 * (MANAGEMENT_MAX + CONTROL_MAX);
117 buf = kzalloc(bufsz, GFP_KERNEL);
118 if (!buf)
119 return -ENOMEM;
120 pos += scnprintf(buf + pos, bufsz - pos, "Management:\n");
121 for (cnt = 0; cnt < MANAGEMENT_MAX; cnt++) {
122 pos += scnprintf(buf + pos, bufsz - pos,
123 "\t%25s\t\t: %u\n",
124 iwl_legacy_get_mgmt_string(cnt),
125 priv->tx_stats.mgmt[cnt]);
126 }
127 pos += scnprintf(buf + pos, bufsz - pos, "Control\n");
128 for (cnt = 0; cnt < CONTROL_MAX; cnt++) {
129 pos += scnprintf(buf + pos, bufsz - pos,
130 "\t%25s\t\t: %u\n",
131 iwl_legacy_get_ctrl_string(cnt),
132 priv->tx_stats.ctrl[cnt]);
133 }
134 pos += scnprintf(buf + pos, bufsz - pos, "Data:\n");
135 pos += scnprintf(buf + pos, bufsz - pos, "\tcnt: %u\n",
136 priv->tx_stats.data_cnt);
137 pos += scnprintf(buf + pos, bufsz - pos, "\tbytes: %llu\n",
138 priv->tx_stats.data_bytes);
139 ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
140 kfree(buf);
141 return ret;
142}
143
144static ssize_t
145iwl_legacy_dbgfs_clear_traffic_statistics_write(struct file *file,
146 const char __user *user_buf,
147 size_t count, loff_t *ppos)
148{
149 struct iwl_priv *priv = file->private_data;
150 u32 clear_flag;
151 char buf[8];
152 int buf_size;
153
154 memset(buf, 0, sizeof(buf));
155 buf_size = min(count, sizeof(buf) - 1);
156 if (copy_from_user(buf, user_buf, buf_size))
157 return -EFAULT;
158 if (sscanf(buf, "%x", &clear_flag) != 1)
159 return -EFAULT;
160 iwl_legacy_clear_traffic_stats(priv);
161
162 return count;
163}
164
165static ssize_t iwl_legacy_dbgfs_rx_statistics_read(struct file *file,
166 char __user *user_buf,
167 size_t count, loff_t *ppos) {
168
169 struct iwl_priv *priv = file->private_data;
170 char *buf;
171 int pos = 0;
172 int cnt;
173 ssize_t ret;
174 const size_t bufsz = 100 +
175 sizeof(char) * 50 * (MANAGEMENT_MAX + CONTROL_MAX);
176 buf = kzalloc(bufsz, GFP_KERNEL);
177 if (!buf)
178 return -ENOMEM;
179
180 pos += scnprintf(buf + pos, bufsz - pos, "Management:\n");
181 for (cnt = 0; cnt < MANAGEMENT_MAX; cnt++) {
182 pos += scnprintf(buf + pos, bufsz - pos,
183 "\t%25s\t\t: %u\n",
184 iwl_legacy_get_mgmt_string(cnt),
185 priv->rx_stats.mgmt[cnt]);
186 }
187 pos += scnprintf(buf + pos, bufsz - pos, "Control:\n");
188 for (cnt = 0; cnt < CONTROL_MAX; cnt++) {
189 pos += scnprintf(buf + pos, bufsz - pos,
190 "\t%25s\t\t: %u\n",
191 iwl_legacy_get_ctrl_string(cnt),
192 priv->rx_stats.ctrl[cnt]);
193 }
194 pos += scnprintf(buf + pos, bufsz - pos, "Data:\n");
195 pos += scnprintf(buf + pos, bufsz - pos, "\tcnt: %u\n",
196 priv->rx_stats.data_cnt);
197 pos += scnprintf(buf + pos, bufsz - pos, "\tbytes: %llu\n",
198 priv->rx_stats.data_bytes);
199
200 ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
201 kfree(buf);
202 return ret;
203}
204
205#define BYTE1_MASK 0x000000ff;
206#define BYTE2_MASK 0x0000ffff;
207#define BYTE3_MASK 0x00ffffff;
208static ssize_t iwl_legacy_dbgfs_sram_read(struct file *file,
209 char __user *user_buf,
210 size_t count, loff_t *ppos)
211{
212 u32 val;
213 char *buf;
214 ssize_t ret;
215 int i;
216 int pos = 0;
217 struct iwl_priv *priv = file->private_data;
218 size_t bufsz;
219
220 /* default is to dump the entire data segment */
221 if (!priv->dbgfs_sram_offset && !priv->dbgfs_sram_len) {
222 priv->dbgfs_sram_offset = 0x800000;
223 if (priv->ucode_type == UCODE_INIT)
224 priv->dbgfs_sram_len = priv->ucode_init_data.len;
225 else
226 priv->dbgfs_sram_len = priv->ucode_data.len;
227 }
228 bufsz = 30 + priv->dbgfs_sram_len * sizeof(char) * 10;
229 buf = kmalloc(bufsz, GFP_KERNEL);
230 if (!buf)
231 return -ENOMEM;
232 pos += scnprintf(buf + pos, bufsz - pos, "sram_len: 0x%x\n",
233 priv->dbgfs_sram_len);
234 pos += scnprintf(buf + pos, bufsz - pos, "sram_offset: 0x%x\n",
235 priv->dbgfs_sram_offset);
236 for (i = priv->dbgfs_sram_len; i > 0; i -= 4) {
237 val = iwl_legacy_read_targ_mem(priv, priv->dbgfs_sram_offset + \
238 priv->dbgfs_sram_len - i);
239 if (i < 4) {
240 switch (i) {
241 case 1:
242 val &= BYTE1_MASK;
243 break;
244 case 2:
245 val &= BYTE2_MASK;
246 break;
247 case 3:
248 val &= BYTE3_MASK;
249 break;
250 }
251 }
252 if (!(i % 16))
253 pos += scnprintf(buf + pos, bufsz - pos, "\n");
254 pos += scnprintf(buf + pos, bufsz - pos, "0x%08x ", val);
255 }
256 pos += scnprintf(buf + pos, bufsz - pos, "\n");
257
258 ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
259 kfree(buf);
260 return ret;
261}
262
263static ssize_t iwl_legacy_dbgfs_sram_write(struct file *file,
264 const char __user *user_buf,
265 size_t count, loff_t *ppos)
266{
267 struct iwl_priv *priv = file->private_data;
268 char buf[64];
269 int buf_size;
270 u32 offset, len;
271
272 memset(buf, 0, sizeof(buf));
273 buf_size = min(count, sizeof(buf) - 1);
274 if (copy_from_user(buf, user_buf, buf_size))
275 return -EFAULT;
276
277 if (sscanf(buf, "%x,%x", &offset, &len) == 2) {
278 priv->dbgfs_sram_offset = offset;
279 priv->dbgfs_sram_len = len;
280 } else {
281 priv->dbgfs_sram_offset = 0;
282 priv->dbgfs_sram_len = 0;
283 }
284
285 return count;
286}
287
288static ssize_t
289iwl_legacy_dbgfs_stations_read(struct file *file, char __user *user_buf,
290 size_t count, loff_t *ppos)
291{
292 struct iwl_priv *priv = file->private_data;
293 struct iwl_station_entry *station;
294 int max_sta = priv->hw_params.max_stations;
295 char *buf;
296 int i, j, pos = 0;
297 ssize_t ret;
298 /* Add 30 for initial string */
299 const size_t bufsz = 30 + sizeof(char) * 500 * (priv->num_stations);
300
301 buf = kmalloc(bufsz, GFP_KERNEL);
302 if (!buf)
303 return -ENOMEM;
304
305 pos += scnprintf(buf + pos, bufsz - pos, "num of stations: %d\n\n",
306 priv->num_stations);
307
308 for (i = 0; i < max_sta; i++) {
309 station = &priv->stations[i];
310 if (!station->used)
311 continue;
312 pos += scnprintf(buf + pos, bufsz - pos,
313 "station %d - addr: %pM, flags: %#x\n",
314 i, station->sta.sta.addr,
315 station->sta.station_flags_msk);
316 pos += scnprintf(buf + pos, bufsz - pos,
317 "TID\tseq_num\ttxq_id\tframes\ttfds\t");
318 pos += scnprintf(buf + pos, bufsz - pos,
319 "start_idx\tbitmap\t\t\trate_n_flags\n");
320
321 for (j = 0; j < MAX_TID_COUNT; j++) {
322 pos += scnprintf(buf + pos, bufsz - pos,
323 "%d:\t%#x\t%#x\t%u\t%u\t%u\t\t%#.16llx\t%#x",
324 j, station->tid[j].seq_number,
325 station->tid[j].agg.txq_id,
326 station->tid[j].agg.frame_count,
327 station->tid[j].tfds_in_queue,
328 station->tid[j].agg.start_idx,
329 station->tid[j].agg.bitmap,
330 station->tid[j].agg.rate_n_flags);
331
332 if (station->tid[j].agg.wait_for_ba)
333 pos += scnprintf(buf + pos, bufsz - pos,
334 " - waitforba");
335 pos += scnprintf(buf + pos, bufsz - pos, "\n");
336 }
337
338 pos += scnprintf(buf + pos, bufsz - pos, "\n");
339 }
340
341 ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
342 kfree(buf);
343 return ret;
344}
345
346static ssize_t iwl_legacy_dbgfs_nvm_read(struct file *file,
347 char __user *user_buf,
348 size_t count,
349 loff_t *ppos)
350{
351 ssize_t ret;
352 struct iwl_priv *priv = file->private_data;
353 int pos = 0, ofs = 0, buf_size = 0;
354 const u8 *ptr;
355 char *buf;
356 u16 eeprom_ver;
357 size_t eeprom_len = priv->cfg->base_params->eeprom_size;
358 buf_size = 4 * eeprom_len + 256;
359
360 if (eeprom_len % 16) {
361 IWL_ERR(priv, "NVM size is not multiple of 16.\n");
362 return -ENODATA;
363 }
364
365 ptr = priv->eeprom;
366 if (!ptr) {
367 IWL_ERR(priv, "Invalid EEPROM memory\n");
368 return -ENOMEM;
369 }
370
371 /* 4 characters for byte 0xYY */
372 buf = kzalloc(buf_size, GFP_KERNEL);
373 if (!buf) {
374 IWL_ERR(priv, "Can not allocate Buffer\n");
375 return -ENOMEM;
376 }
377 eeprom_ver = iwl_legacy_eeprom_query16(priv, EEPROM_VERSION);
378 pos += scnprintf(buf + pos, buf_size - pos, "EEPROM "
379 "version: 0x%x\n", eeprom_ver);
380 for (ofs = 0 ; ofs < eeprom_len ; ofs += 16) {
381 pos += scnprintf(buf + pos, buf_size - pos, "0x%.4x ", ofs);
382 hex_dump_to_buffer(ptr + ofs, 16 , 16, 2, buf + pos,
383 buf_size - pos, 0);
384 pos += strlen(buf + pos);
385 if (buf_size - pos > 0)
386 buf[pos++] = '\n';
387 }
388
389 ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
390 kfree(buf);
391 return ret;
392}
393
394static ssize_t
395iwl_legacy_dbgfs_channels_read(struct file *file, char __user *user_buf,
396 size_t count, loff_t *ppos)
397{
398 struct iwl_priv *priv = file->private_data;
399 struct ieee80211_channel *channels = NULL;
400 const struct ieee80211_supported_band *supp_band = NULL;
401 int pos = 0, i, bufsz = PAGE_SIZE;
402 char *buf;
403 ssize_t ret;
404
405 if (!test_bit(STATUS_GEO_CONFIGURED, &priv->status))
406 return -EAGAIN;
407
408 buf = kzalloc(bufsz, GFP_KERNEL);
409 if (!buf) {
410 IWL_ERR(priv, "Can not allocate Buffer\n");
411 return -ENOMEM;
412 }
413
414 supp_band = iwl_get_hw_mode(priv, IEEE80211_BAND_2GHZ);
415 if (supp_band) {
416 channels = supp_band->channels;
417
418 pos += scnprintf(buf + pos, bufsz - pos,
419 "Displaying %d channels in 2.4GHz band 802.11bg):\n",
420 supp_band->n_channels);
421
422 for (i = 0; i < supp_band->n_channels; i++)
423 pos += scnprintf(buf + pos, bufsz - pos,
424 "%d: %ddBm: BSS%s%s, %s.\n",
425 channels[i].hw_value,
426 channels[i].max_power,
427 channels[i].flags & IEEE80211_CHAN_RADAR ?
428 " (IEEE 802.11h required)" : "",
429 ((channels[i].flags & IEEE80211_CHAN_NO_IBSS)
430 || (channels[i].flags &
431 IEEE80211_CHAN_RADAR)) ? "" :
432 ", IBSS",
433 channels[i].flags &
434 IEEE80211_CHAN_PASSIVE_SCAN ?
435 "passive only" : "active/passive");
436 }
437 supp_band = iwl_get_hw_mode(priv, IEEE80211_BAND_5GHZ);
438 if (supp_band) {
439 channels = supp_band->channels;
440
441 pos += scnprintf(buf + pos, bufsz - pos,
442 "Displaying %d channels in 5.2GHz band (802.11a)\n",
443 supp_band->n_channels);
444
445 for (i = 0; i < supp_band->n_channels; i++)
446 pos += scnprintf(buf + pos, bufsz - pos,
447 "%d: %ddBm: BSS%s%s, %s.\n",
448 channels[i].hw_value,
449 channels[i].max_power,
450 channels[i].flags & IEEE80211_CHAN_RADAR ?
451 " (IEEE 802.11h required)" : "",
452 ((channels[i].flags & IEEE80211_CHAN_NO_IBSS)
453 || (channels[i].flags &
454 IEEE80211_CHAN_RADAR)) ? "" :
455 ", IBSS",
456 channels[i].flags &
457 IEEE80211_CHAN_PASSIVE_SCAN ?
458 "passive only" : "active/passive");
459 }
460 ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
461 kfree(buf);
462 return ret;
463}
464
465static ssize_t iwl_legacy_dbgfs_status_read(struct file *file,
466 char __user *user_buf,
467 size_t count, loff_t *ppos) {
468
469 struct iwl_priv *priv = file->private_data;
470 char buf[512];
471 int pos = 0;
472 const size_t bufsz = sizeof(buf);
473
474 pos += scnprintf(buf + pos, bufsz - pos, "STATUS_HCMD_ACTIVE:\t %d\n",
475 test_bit(STATUS_HCMD_ACTIVE, &priv->status));
476 pos += scnprintf(buf + pos, bufsz - pos, "STATUS_INT_ENABLED:\t %d\n",
477 test_bit(STATUS_INT_ENABLED, &priv->status));
478 pos += scnprintf(buf + pos, bufsz - pos, "STATUS_RF_KILL_HW:\t %d\n",
479 test_bit(STATUS_RF_KILL_HW, &priv->status));
480 pos += scnprintf(buf + pos, bufsz - pos, "STATUS_CT_KILL:\t\t %d\n",
481 test_bit(STATUS_CT_KILL, &priv->status));
482 pos += scnprintf(buf + pos, bufsz - pos, "STATUS_INIT:\t\t %d\n",
483 test_bit(STATUS_INIT, &priv->status));
484 pos += scnprintf(buf + pos, bufsz - pos, "STATUS_ALIVE:\t\t %d\n",
485 test_bit(STATUS_ALIVE, &priv->status));
486 pos += scnprintf(buf + pos, bufsz - pos, "STATUS_READY:\t\t %d\n",
487 test_bit(STATUS_READY, &priv->status));
488 pos += scnprintf(buf + pos, bufsz - pos, "STATUS_TEMPERATURE:\t %d\n",
489 test_bit(STATUS_TEMPERATURE, &priv->status));
490 pos += scnprintf(buf + pos, bufsz - pos, "STATUS_GEO_CONFIGURED:\t %d\n",
491 test_bit(STATUS_GEO_CONFIGURED, &priv->status));
492 pos += scnprintf(buf + pos, bufsz - pos, "STATUS_EXIT_PENDING:\t %d\n",
493 test_bit(STATUS_EXIT_PENDING, &priv->status));
494 pos += scnprintf(buf + pos, bufsz - pos, "STATUS_STATISTICS:\t %d\n",
495 test_bit(STATUS_STATISTICS, &priv->status));
496 pos += scnprintf(buf + pos, bufsz - pos, "STATUS_SCANNING:\t %d\n",
497 test_bit(STATUS_SCANNING, &priv->status));
498 pos += scnprintf(buf + pos, bufsz - pos, "STATUS_SCAN_ABORTING:\t %d\n",
499 test_bit(STATUS_SCAN_ABORTING, &priv->status));
500 pos += scnprintf(buf + pos, bufsz - pos, "STATUS_SCAN_HW:\t\t %d\n",
501 test_bit(STATUS_SCAN_HW, &priv->status));
502 pos += scnprintf(buf + pos, bufsz - pos, "STATUS_POWER_PMI:\t %d\n",
503 test_bit(STATUS_POWER_PMI, &priv->status));
504 pos += scnprintf(buf + pos, bufsz - pos, "STATUS_FW_ERROR:\t %d\n",
505 test_bit(STATUS_FW_ERROR, &priv->status));
506 return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
507}
508
509static ssize_t iwl_legacy_dbgfs_interrupt_read(struct file *file,
510 char __user *user_buf,
511 size_t count, loff_t *ppos) {
512
513 struct iwl_priv *priv = file->private_data;
514 int pos = 0;
515 int cnt = 0;
516 char *buf;
517 int bufsz = 24 * 64; /* 24 items * 64 char per item */
518 ssize_t ret;
519
520 buf = kzalloc(bufsz, GFP_KERNEL);
521 if (!buf) {
522 IWL_ERR(priv, "Can not allocate Buffer\n");
523 return -ENOMEM;
524 }
525
526 pos += scnprintf(buf + pos, bufsz - pos,
527 "Interrupt Statistics Report:\n");
528
529 pos += scnprintf(buf + pos, bufsz - pos, "HW Error:\t\t\t %u\n",
530 priv->isr_stats.hw);
531 pos += scnprintf(buf + pos, bufsz - pos, "SW Error:\t\t\t %u\n",
532 priv->isr_stats.sw);
533 if (priv->isr_stats.sw || priv->isr_stats.hw) {
534 pos += scnprintf(buf + pos, bufsz - pos,
535 "\tLast Restarting Code: 0x%X\n",
536 priv->isr_stats.err_code);
537 }
538#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
539 pos += scnprintf(buf + pos, bufsz - pos, "Frame transmitted:\t\t %u\n",
540 priv->isr_stats.sch);
541 pos += scnprintf(buf + pos, bufsz - pos, "Alive interrupt:\t\t %u\n",
542 priv->isr_stats.alive);
543#endif
544 pos += scnprintf(buf + pos, bufsz - pos,
545 "HW RF KILL switch toggled:\t %u\n",
546 priv->isr_stats.rfkill);
547
548 pos += scnprintf(buf + pos, bufsz - pos, "CT KILL:\t\t\t %u\n",
549 priv->isr_stats.ctkill);
550
551 pos += scnprintf(buf + pos, bufsz - pos, "Wakeup Interrupt:\t\t %u\n",
552 priv->isr_stats.wakeup);
553
554 pos += scnprintf(buf + pos, bufsz - pos,
555 "Rx command responses:\t\t %u\n",
556 priv->isr_stats.rx);
557 for (cnt = 0; cnt < REPLY_MAX; cnt++) {
558 if (priv->isr_stats.rx_handlers[cnt] > 0)
559 pos += scnprintf(buf + pos, bufsz - pos,
560 "\tRx handler[%36s]:\t\t %u\n",
561 iwl_legacy_get_cmd_string(cnt),
562 priv->isr_stats.rx_handlers[cnt]);
563 }
564
565 pos += scnprintf(buf + pos, bufsz - pos, "Tx/FH interrupt:\t\t %u\n",
566 priv->isr_stats.tx);
567
568 pos += scnprintf(buf + pos, bufsz - pos, "Unexpected INTA:\t\t %u\n",
569 priv->isr_stats.unhandled);
570
571 ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
572 kfree(buf);
573 return ret;
574}
575
576static ssize_t iwl_legacy_dbgfs_interrupt_write(struct file *file,
577 const char __user *user_buf,
578 size_t count, loff_t *ppos)
579{
580 struct iwl_priv *priv = file->private_data;
581 char buf[8];
582 int buf_size;
583 u32 reset_flag;
584
585 memset(buf, 0, sizeof(buf));
586 buf_size = min(count, sizeof(buf) - 1);
587 if (copy_from_user(buf, user_buf, buf_size))
588 return -EFAULT;
589 if (sscanf(buf, "%x", &reset_flag) != 1)
590 return -EFAULT;
591 if (reset_flag == 0)
592 iwl_legacy_clear_isr_stats(priv);
593
594 return count;
595}
596
597static ssize_t
598iwl_legacy_dbgfs_qos_read(struct file *file, char __user *user_buf,
599 size_t count, loff_t *ppos)
600{
601 struct iwl_priv *priv = file->private_data;
602 struct iwl_rxon_context *ctx;
603 int pos = 0, i;
604 char buf[256 * NUM_IWL_RXON_CTX];
605 const size_t bufsz = sizeof(buf);
606
607 for_each_context(priv, ctx) {
608 pos += scnprintf(buf + pos, bufsz - pos, "context %d:\n",
609 ctx->ctxid);
610 for (i = 0; i < AC_NUM; i++) {
611 pos += scnprintf(buf + pos, bufsz - pos,
612 "\tcw_min\tcw_max\taifsn\ttxop\n");
613 pos += scnprintf(buf + pos, bufsz - pos,
614 "AC[%d]\t%u\t%u\t%u\t%u\n", i,
615 ctx->qos_data.def_qos_parm.ac[i].cw_min,
616 ctx->qos_data.def_qos_parm.ac[i].cw_max,
617 ctx->qos_data.def_qos_parm.ac[i].aifsn,
618 ctx->qos_data.def_qos_parm.ac[i].edca_txop);
619 }
620 pos += scnprintf(buf + pos, bufsz - pos, "\n");
621 }
622 return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
623}
624
625static ssize_t iwl_legacy_dbgfs_disable_ht40_write(struct file *file,
626 const char __user *user_buf,
627 size_t count, loff_t *ppos)
628{
629 struct iwl_priv *priv = file->private_data;
630 char buf[8];
631 int buf_size;
632 int ht40;
633
634 memset(buf, 0, sizeof(buf));
635 buf_size = min(count, sizeof(buf) - 1);
636 if (copy_from_user(buf, user_buf, buf_size))
637 return -EFAULT;
638 if (sscanf(buf, "%d", &ht40) != 1)
639 return -EFAULT;
640 if (!iwl_legacy_is_any_associated(priv))
641 priv->disable_ht40 = ht40 ? true : false;
642 else {
643 IWL_ERR(priv, "Sta associated with AP - "
644 "Change to 40MHz channel support is not allowed\n");
645 return -EINVAL;
646 }
647
648 return count;
649}
650
651static ssize_t iwl_legacy_dbgfs_disable_ht40_read(struct file *file,
652 char __user *user_buf,
653 size_t count, loff_t *ppos)
654{
655 struct iwl_priv *priv = file->private_data;
656 char buf[100];
657 int pos = 0;
658 const size_t bufsz = sizeof(buf);
659
660 pos += scnprintf(buf + pos, bufsz - pos,
661 "11n 40MHz Mode: %s\n",
662 priv->disable_ht40 ? "Disabled" : "Enabled");
663 return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
664}
665
666DEBUGFS_READ_WRITE_FILE_OPS(sram);
667DEBUGFS_READ_FILE_OPS(nvm);
668DEBUGFS_READ_FILE_OPS(stations);
669DEBUGFS_READ_FILE_OPS(channels);
670DEBUGFS_READ_FILE_OPS(status);
671DEBUGFS_READ_WRITE_FILE_OPS(interrupt);
672DEBUGFS_READ_FILE_OPS(qos);
673DEBUGFS_READ_WRITE_FILE_OPS(disable_ht40);
674
675static ssize_t iwl_legacy_dbgfs_traffic_log_read(struct file *file,
676 char __user *user_buf,
677 size_t count, loff_t *ppos)
678{
679 struct iwl_priv *priv = file->private_data;
680 int pos = 0, ofs = 0;
681 int cnt = 0, entry;
682 struct iwl_tx_queue *txq;
683 struct iwl_queue *q;
684 struct iwl_rx_queue *rxq = &priv->rxq;
685 char *buf;
686 int bufsz = ((IWL_TRAFFIC_ENTRIES * IWL_TRAFFIC_ENTRY_SIZE * 64) * 2) +
687 (priv->cfg->base_params->num_of_queues * 32 * 8) + 400;
688 const u8 *ptr;
689 ssize_t ret;
690
691 if (!priv->txq) {
692 IWL_ERR(priv, "txq not ready\n");
693 return -EAGAIN;
694 }
695 buf = kzalloc(bufsz, GFP_KERNEL);
696 if (!buf) {
697 IWL_ERR(priv, "Can not allocate buffer\n");
698 return -ENOMEM;
699 }
700 pos += scnprintf(buf + pos, bufsz - pos, "Tx Queue\n");
701 for (cnt = 0; cnt < priv->hw_params.max_txq_num; cnt++) {
702 txq = &priv->txq[cnt];
703 q = &txq->q;
704 pos += scnprintf(buf + pos, bufsz - pos,
705 "q[%d]: read_ptr: %u, write_ptr: %u\n",
706 cnt, q->read_ptr, q->write_ptr);
707 }
708 if (priv->tx_traffic && (iwlegacy_debug_level & IWL_DL_TX)) {
709 ptr = priv->tx_traffic;
710 pos += scnprintf(buf + pos, bufsz - pos,
711 "Tx Traffic idx: %u\n", priv->tx_traffic_idx);
712 for (cnt = 0, ofs = 0; cnt < IWL_TRAFFIC_ENTRIES; cnt++) {
713 for (entry = 0; entry < IWL_TRAFFIC_ENTRY_SIZE / 16;
714 entry++, ofs += 16) {
715 pos += scnprintf(buf + pos, bufsz - pos,
716 "0x%.4x ", ofs);
717 hex_dump_to_buffer(ptr + ofs, 16, 16, 2,
718 buf + pos, bufsz - pos, 0);
719 pos += strlen(buf + pos);
720 if (bufsz - pos > 0)
721 buf[pos++] = '\n';
722 }
723 }
724 }
725
726 pos += scnprintf(buf + pos, bufsz - pos, "Rx Queue\n");
727 pos += scnprintf(buf + pos, bufsz - pos,
728 "read: %u, write: %u\n",
729 rxq->read, rxq->write);
730
731 if (priv->rx_traffic && (iwlegacy_debug_level & IWL_DL_RX)) {
732 ptr = priv->rx_traffic;
733 pos += scnprintf(buf + pos, bufsz - pos,
734 "Rx Traffic idx: %u\n", priv->rx_traffic_idx);
735 for (cnt = 0, ofs = 0; cnt < IWL_TRAFFIC_ENTRIES; cnt++) {
736 for (entry = 0; entry < IWL_TRAFFIC_ENTRY_SIZE / 16;
737 entry++, ofs += 16) {
738 pos += scnprintf(buf + pos, bufsz - pos,
739 "0x%.4x ", ofs);
740 hex_dump_to_buffer(ptr + ofs, 16, 16, 2,
741 buf + pos, bufsz - pos, 0);
742 pos += strlen(buf + pos);
743 if (bufsz - pos > 0)
744 buf[pos++] = '\n';
745 }
746 }
747 }
748
749 ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
750 kfree(buf);
751 return ret;
752}
753
754static ssize_t iwl_legacy_dbgfs_traffic_log_write(struct file *file,
755 const char __user *user_buf,
756 size_t count, loff_t *ppos)
757{
758 struct iwl_priv *priv = file->private_data;
759 char buf[8];
760 int buf_size;
761 int traffic_log;
762
763 memset(buf, 0, sizeof(buf));
764 buf_size = min(count, sizeof(buf) - 1);
765 if (copy_from_user(buf, user_buf, buf_size))
766 return -EFAULT;
767 if (sscanf(buf, "%d", &traffic_log) != 1)
768 return -EFAULT;
769 if (traffic_log == 0)
770 iwl_legacy_reset_traffic_log(priv);
771
772 return count;
773}
774
775static ssize_t iwl_legacy_dbgfs_tx_queue_read(struct file *file,
776 char __user *user_buf,
777 size_t count, loff_t *ppos) {
778
779 struct iwl_priv *priv = file->private_data;
780 struct iwl_tx_queue *txq;
781 struct iwl_queue *q;
782 char *buf;
783 int pos = 0;
784 int cnt;
785 int ret;
786 const size_t bufsz = sizeof(char) * 64 *
787 priv->cfg->base_params->num_of_queues;
788
789 if (!priv->txq) {
790 IWL_ERR(priv, "txq not ready\n");
791 return -EAGAIN;
792 }
793 buf = kzalloc(bufsz, GFP_KERNEL);
794 if (!buf)
795 return -ENOMEM;
796
797 for (cnt = 0; cnt < priv->hw_params.max_txq_num; cnt++) {
798 txq = &priv->txq[cnt];
799 q = &txq->q;
800 pos += scnprintf(buf + pos, bufsz - pos,
801 "hwq %.2d: read=%u write=%u stop=%d"
802 " swq_id=%#.2x (ac %d/hwq %d)\n",
803 cnt, q->read_ptr, q->write_ptr,
804 !!test_bit(cnt, priv->queue_stopped),
805 txq->swq_id, txq->swq_id & 3,
806 (txq->swq_id >> 2) & 0x1f);
807 if (cnt >= 4)
808 continue;
809 /* for the ACs, display the stop count too */
810 pos += scnprintf(buf + pos, bufsz - pos,
811 " stop-count: %d\n",
812 atomic_read(&priv->queue_stop_count[cnt]));
813 }
814 ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
815 kfree(buf);
816 return ret;
817}
818
819static ssize_t iwl_legacy_dbgfs_rx_queue_read(struct file *file,
820 char __user *user_buf,
821 size_t count, loff_t *ppos) {
822
823 struct iwl_priv *priv = file->private_data;
824 struct iwl_rx_queue *rxq = &priv->rxq;
825 char buf[256];
826 int pos = 0;
827 const size_t bufsz = sizeof(buf);
828
829 pos += scnprintf(buf + pos, bufsz - pos, "read: %u\n",
830 rxq->read);
831 pos += scnprintf(buf + pos, bufsz - pos, "write: %u\n",
832 rxq->write);
833 pos += scnprintf(buf + pos, bufsz - pos, "free_count: %u\n",
834 rxq->free_count);
835 if (rxq->rb_stts) {
836 pos += scnprintf(buf + pos, bufsz - pos, "closed_rb_num: %u\n",
837 le16_to_cpu(rxq->rb_stts->closed_rb_num) & 0x0FFF);
838 } else {
839 pos += scnprintf(buf + pos, bufsz - pos,
840 "closed_rb_num: Not Allocated\n");
841 }
842 return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
843}
844
845static ssize_t iwl_legacy_dbgfs_ucode_rx_stats_read(struct file *file,
846 char __user *user_buf,
847 size_t count, loff_t *ppos)
848{
849 struct iwl_priv *priv = file->private_data;
850 return priv->cfg->ops->lib->debugfs_ops.rx_stats_read(file,
851 user_buf, count, ppos);
852}
853
854static ssize_t iwl_legacy_dbgfs_ucode_tx_stats_read(struct file *file,
855 char __user *user_buf,
856 size_t count, loff_t *ppos)
857{
858 struct iwl_priv *priv = file->private_data;
859 return priv->cfg->ops->lib->debugfs_ops.tx_stats_read(file,
860 user_buf, count, ppos);
861}
862
863static ssize_t iwl_legacy_dbgfs_ucode_general_stats_read(struct file *file,
864 char __user *user_buf,
865 size_t count, loff_t *ppos)
866{
867 struct iwl_priv *priv = file->private_data;
868 return priv->cfg->ops->lib->debugfs_ops.general_stats_read(file,
869 user_buf, count, ppos);
870}
871
872static ssize_t iwl_legacy_dbgfs_sensitivity_read(struct file *file,
873 char __user *user_buf,
874 size_t count, loff_t *ppos) {
875
876 struct iwl_priv *priv = file->private_data;
877 int pos = 0;
878 int cnt = 0;
879 char *buf;
880 int bufsz = sizeof(struct iwl_sensitivity_data) * 4 + 100;
881 ssize_t ret;
882 struct iwl_sensitivity_data *data;
883
884 data = &priv->sensitivity_data;
885 buf = kzalloc(bufsz, GFP_KERNEL);
886 if (!buf) {
887 IWL_ERR(priv, "Can not allocate Buffer\n");
888 return -ENOMEM;
889 }
890
891 pos += scnprintf(buf + pos, bufsz - pos, "auto_corr_ofdm:\t\t\t %u\n",
892 data->auto_corr_ofdm);
893 pos += scnprintf(buf + pos, bufsz - pos,
894 "auto_corr_ofdm_mrc:\t\t %u\n",
895 data->auto_corr_ofdm_mrc);
896 pos += scnprintf(buf + pos, bufsz - pos, "auto_corr_ofdm_x1:\t\t %u\n",
897 data->auto_corr_ofdm_x1);
898 pos += scnprintf(buf + pos, bufsz - pos,
899 "auto_corr_ofdm_mrc_x1:\t\t %u\n",
900 data->auto_corr_ofdm_mrc_x1);
901 pos += scnprintf(buf + pos, bufsz - pos, "auto_corr_cck:\t\t\t %u\n",
902 data->auto_corr_cck);
903 pos += scnprintf(buf + pos, bufsz - pos, "auto_corr_cck_mrc:\t\t %u\n",
904 data->auto_corr_cck_mrc);
905 pos += scnprintf(buf + pos, bufsz - pos,
906 "last_bad_plcp_cnt_ofdm:\t\t %u\n",
907 data->last_bad_plcp_cnt_ofdm);
908 pos += scnprintf(buf + pos, bufsz - pos, "last_fa_cnt_ofdm:\t\t %u\n",
909 data->last_fa_cnt_ofdm);
910 pos += scnprintf(buf + pos, bufsz - pos,
911 "last_bad_plcp_cnt_cck:\t\t %u\n",
912 data->last_bad_plcp_cnt_cck);
913 pos += scnprintf(buf + pos, bufsz - pos, "last_fa_cnt_cck:\t\t %u\n",
914 data->last_fa_cnt_cck);
915 pos += scnprintf(buf + pos, bufsz - pos, "nrg_curr_state:\t\t\t %u\n",
916 data->nrg_curr_state);
917 pos += scnprintf(buf + pos, bufsz - pos, "nrg_prev_state:\t\t\t %u\n",
918 data->nrg_prev_state);
919 pos += scnprintf(buf + pos, bufsz - pos, "nrg_value:\t\t\t");
920 for (cnt = 0; cnt < 10; cnt++) {
921 pos += scnprintf(buf + pos, bufsz - pos, " %u",
922 data->nrg_value[cnt]);
923 }
924 pos += scnprintf(buf + pos, bufsz - pos, "\n");
925 pos += scnprintf(buf + pos, bufsz - pos, "nrg_silence_rssi:\t\t");
926 for (cnt = 0; cnt < NRG_NUM_PREV_STAT_L; cnt++) {
927 pos += scnprintf(buf + pos, bufsz - pos, " %u",
928 data->nrg_silence_rssi[cnt]);
929 }
930 pos += scnprintf(buf + pos, bufsz - pos, "\n");
931 pos += scnprintf(buf + pos, bufsz - pos, "nrg_silence_ref:\t\t %u\n",
932 data->nrg_silence_ref);
933 pos += scnprintf(buf + pos, bufsz - pos, "nrg_energy_idx:\t\t\t %u\n",
934 data->nrg_energy_idx);
935 pos += scnprintf(buf + pos, bufsz - pos, "nrg_silence_idx:\t\t %u\n",
936 data->nrg_silence_idx);
937 pos += scnprintf(buf + pos, bufsz - pos, "nrg_th_cck:\t\t\t %u\n",
938 data->nrg_th_cck);
939 pos += scnprintf(buf + pos, bufsz - pos,
940 "nrg_auto_corr_silence_diff:\t %u\n",
941 data->nrg_auto_corr_silence_diff);
942 pos += scnprintf(buf + pos, bufsz - pos, "num_in_cck_no_fa:\t\t %u\n",
943 data->num_in_cck_no_fa);
944 pos += scnprintf(buf + pos, bufsz - pos, "nrg_th_ofdm:\t\t\t %u\n",
945 data->nrg_th_ofdm);
946
947 ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
948 kfree(buf);
949 return ret;
950}
951
952
953static ssize_t iwl_legacy_dbgfs_chain_noise_read(struct file *file,
954 char __user *user_buf,
955 size_t count, loff_t *ppos) {
956
957 struct iwl_priv *priv = file->private_data;
958 int pos = 0;
959 int cnt = 0;
960 char *buf;
961 int bufsz = sizeof(struct iwl_chain_noise_data) * 4 + 100;
962 ssize_t ret;
963 struct iwl_chain_noise_data *data;
964
965 data = &priv->chain_noise_data;
966 buf = kzalloc(bufsz, GFP_KERNEL);
967 if (!buf) {
968 IWL_ERR(priv, "Can not allocate Buffer\n");
969 return -ENOMEM;
970 }
971
972 pos += scnprintf(buf + pos, bufsz - pos, "active_chains:\t\t\t %u\n",
973 data->active_chains);
974 pos += scnprintf(buf + pos, bufsz - pos, "chain_noise_a:\t\t\t %u\n",
975 data->chain_noise_a);
976 pos += scnprintf(buf + pos, bufsz - pos, "chain_noise_b:\t\t\t %u\n",
977 data->chain_noise_b);
978 pos += scnprintf(buf + pos, bufsz - pos, "chain_noise_c:\t\t\t %u\n",
979 data->chain_noise_c);
980 pos += scnprintf(buf + pos, bufsz - pos, "chain_signal_a:\t\t\t %u\n",
981 data->chain_signal_a);
982 pos += scnprintf(buf + pos, bufsz - pos, "chain_signal_b:\t\t\t %u\n",
983 data->chain_signal_b);
984 pos += scnprintf(buf + pos, bufsz - pos, "chain_signal_c:\t\t\t %u\n",
985 data->chain_signal_c);
986 pos += scnprintf(buf + pos, bufsz - pos, "beacon_count:\t\t\t %u\n",
987 data->beacon_count);
988
989 pos += scnprintf(buf + pos, bufsz - pos, "disconn_array:\t\t\t");
990 for (cnt = 0; cnt < NUM_RX_CHAINS; cnt++) {
991 pos += scnprintf(buf + pos, bufsz - pos, " %u",
992 data->disconn_array[cnt]);
993 }
994 pos += scnprintf(buf + pos, bufsz - pos, "\n");
995 pos += scnprintf(buf + pos, bufsz - pos, "delta_gain_code:\t\t");
996 for (cnt = 0; cnt < NUM_RX_CHAINS; cnt++) {
997 pos += scnprintf(buf + pos, bufsz - pos, " %u",
998 data->delta_gain_code[cnt]);
999 }
1000 pos += scnprintf(buf + pos, bufsz - pos, "\n");
1001 pos += scnprintf(buf + pos, bufsz - pos, "radio_write:\t\t\t %u\n",
1002 data->radio_write);
1003 pos += scnprintf(buf + pos, bufsz - pos, "state:\t\t\t\t %u\n",
1004 data->state);
1005
1006 ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
1007 kfree(buf);
1008 return ret;
1009}
1010
1011static ssize_t iwl_legacy_dbgfs_power_save_status_read(struct file *file,
1012 char __user *user_buf,
1013 size_t count, loff_t *ppos)
1014{
1015 struct iwl_priv *priv = file->private_data;
1016 char buf[60];
1017 int pos = 0;
1018 const size_t bufsz = sizeof(buf);
1019 u32 pwrsave_status;
1020
1021 pwrsave_status = iwl_read32(priv, CSR_GP_CNTRL) &
1022 CSR_GP_REG_POWER_SAVE_STATUS_MSK;
1023
1024 pos += scnprintf(buf + pos, bufsz - pos, "Power Save Status: ");
1025 pos += scnprintf(buf + pos, bufsz - pos, "%s\n",
1026 (pwrsave_status == CSR_GP_REG_NO_POWER_SAVE) ? "none" :
1027 (pwrsave_status == CSR_GP_REG_MAC_POWER_SAVE) ? "MAC" :
1028 (pwrsave_status == CSR_GP_REG_PHY_POWER_SAVE) ? "PHY" :
1029 "error");
1030
1031 return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
1032}
1033
1034static ssize_t iwl_legacy_dbgfs_clear_ucode_statistics_write(struct file *file,
1035 const char __user *user_buf,
1036 size_t count, loff_t *ppos)
1037{
1038 struct iwl_priv *priv = file->private_data;
1039 char buf[8];
1040 int buf_size;
1041 int clear;
1042
1043 memset(buf, 0, sizeof(buf));
1044 buf_size = min(count, sizeof(buf) - 1);
1045 if (copy_from_user(buf, user_buf, buf_size))
1046 return -EFAULT;
1047 if (sscanf(buf, "%d", &clear) != 1)
1048 return -EFAULT;
1049
1050 /* make request to uCode to retrieve statistics information */
1051 mutex_lock(&priv->mutex);
1052 iwl_legacy_send_statistics_request(priv, CMD_SYNC, true);
1053 mutex_unlock(&priv->mutex);
1054
1055 return count;
1056}
1057
1058static ssize_t iwl_legacy_dbgfs_rxon_flags_read(struct file *file,
1059 char __user *user_buf,
1060 size_t count, loff_t *ppos) {
1061
1062 struct iwl_priv *priv = file->private_data;
1063 int len = 0;
1064 char buf[20];
1065
1066 len = sprintf(buf, "0x%04X\n",
1067 le32_to_cpu(priv->contexts[IWL_RXON_CTX_BSS].active.flags));
1068 return simple_read_from_buffer(user_buf, count, ppos, buf, len);
1069}
1070
1071static ssize_t iwl_legacy_dbgfs_rxon_filter_flags_read(struct file *file,
1072 char __user *user_buf,
1073 size_t count, loff_t *ppos) {
1074
1075 struct iwl_priv *priv = file->private_data;
1076 int len = 0;
1077 char buf[20];
1078
1079 len = sprintf(buf, "0x%04X\n",
1080 le32_to_cpu(priv->contexts[IWL_RXON_CTX_BSS].active.filter_flags));
1081 return simple_read_from_buffer(user_buf, count, ppos, buf, len);
1082}
1083
1084static ssize_t iwl_legacy_dbgfs_fh_reg_read(struct file *file,
1085 char __user *user_buf,
1086 size_t count, loff_t *ppos)
1087{
1088 struct iwl_priv *priv = file->private_data;
1089 char *buf;
1090 int pos = 0;
1091 ssize_t ret = -EFAULT;
1092
1093 if (priv->cfg->ops->lib->dump_fh) {
1094 ret = pos = priv->cfg->ops->lib->dump_fh(priv, &buf, true);
1095 if (buf) {
1096 ret = simple_read_from_buffer(user_buf,
1097 count, ppos, buf, pos);
1098 kfree(buf);
1099 }
1100 }
1101
1102 return ret;
1103}
1104
1105static ssize_t iwl_legacy_dbgfs_missed_beacon_read(struct file *file,
1106 char __user *user_buf,
1107 size_t count, loff_t *ppos) {
1108
1109 struct iwl_priv *priv = file->private_data;
1110 int pos = 0;
1111 char buf[12];
1112 const size_t bufsz = sizeof(buf);
1113
1114 pos += scnprintf(buf + pos, bufsz - pos, "%d\n",
1115 priv->missed_beacon_threshold);
1116
1117 return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
1118}
1119
1120static ssize_t iwl_legacy_dbgfs_missed_beacon_write(struct file *file,
1121 const char __user *user_buf,
1122 size_t count, loff_t *ppos)
1123{
1124 struct iwl_priv *priv = file->private_data;
1125 char buf[8];
1126 int buf_size;
1127 int missed;
1128
1129 memset(buf, 0, sizeof(buf));
1130 buf_size = min(count, sizeof(buf) - 1);
1131 if (copy_from_user(buf, user_buf, buf_size))
1132 return -EFAULT;
1133 if (sscanf(buf, "%d", &missed) != 1)
1134 return -EINVAL;
1135
1136 if (missed < IWL_MISSED_BEACON_THRESHOLD_MIN ||
1137 missed > IWL_MISSED_BEACON_THRESHOLD_MAX)
1138 priv->missed_beacon_threshold =
1139 IWL_MISSED_BEACON_THRESHOLD_DEF;
1140 else
1141 priv->missed_beacon_threshold = missed;
1142
1143 return count;
1144}
1145
1146static ssize_t iwl_legacy_dbgfs_force_reset_read(struct file *file,
1147 char __user *user_buf,
1148 size_t count, loff_t *ppos) {
1149
1150 struct iwl_priv *priv = file->private_data;
1151 int pos = 0;
1152 char buf[300];
1153 const size_t bufsz = sizeof(buf);
1154 struct iwl_force_reset *force_reset;
1155
1156 force_reset = &priv->force_reset;
1157
1158 pos += scnprintf(buf + pos, bufsz - pos,
1159 "\tnumber of reset request: %d\n",
1160 force_reset->reset_request_count);
1161 pos += scnprintf(buf + pos, bufsz - pos,
1162 "\tnumber of reset request success: %d\n",
1163 force_reset->reset_success_count);
1164 pos += scnprintf(buf + pos, bufsz - pos,
1165 "\tnumber of reset request reject: %d\n",
1166 force_reset->reset_reject_count);
1167 pos += scnprintf(buf + pos, bufsz - pos,
1168 "\treset duration: %lu\n",
1169 force_reset->reset_duration);
1170
1171 return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
1172}
1173
1174static ssize_t iwl_legacy_dbgfs_force_reset_write(struct file *file,
1175 const char __user *user_buf,
1176 size_t count, loff_t *ppos) {
1177
1178 int ret;
1179 struct iwl_priv *priv = file->private_data;
1180
1181 ret = iwl_legacy_force_reset(priv, true);
1182
1183 return ret ? ret : count;
1184}
1185
1186static ssize_t iwl_legacy_dbgfs_wd_timeout_write(struct file *file,
1187 const char __user *user_buf,
1188 size_t count, loff_t *ppos) {
1189
1190 struct iwl_priv *priv = file->private_data;
1191 char buf[8];
1192 int buf_size;
1193 int timeout;
1194
1195 memset(buf, 0, sizeof(buf));
1196 buf_size = min(count, sizeof(buf) - 1);
1197 if (copy_from_user(buf, user_buf, buf_size))
1198 return -EFAULT;
1199 if (sscanf(buf, "%d", &timeout) != 1)
1200 return -EINVAL;
1201 if (timeout < 0 || timeout > IWL_MAX_WD_TIMEOUT)
1202 timeout = IWL_DEF_WD_TIMEOUT;
1203
1204 priv->cfg->base_params->wd_timeout = timeout;
1205 iwl_legacy_setup_watchdog(priv);
1206 return count;
1207}
1208
1209DEBUGFS_READ_FILE_OPS(rx_statistics);
1210DEBUGFS_READ_FILE_OPS(tx_statistics);
1211DEBUGFS_READ_WRITE_FILE_OPS(traffic_log);
1212DEBUGFS_READ_FILE_OPS(rx_queue);
1213DEBUGFS_READ_FILE_OPS(tx_queue);
1214DEBUGFS_READ_FILE_OPS(ucode_rx_stats);
1215DEBUGFS_READ_FILE_OPS(ucode_tx_stats);
1216DEBUGFS_READ_FILE_OPS(ucode_general_stats);
1217DEBUGFS_READ_FILE_OPS(sensitivity);
1218DEBUGFS_READ_FILE_OPS(chain_noise);
1219DEBUGFS_READ_FILE_OPS(power_save_status);
1220DEBUGFS_WRITE_FILE_OPS(clear_ucode_statistics);
1221DEBUGFS_WRITE_FILE_OPS(clear_traffic_statistics);
1222DEBUGFS_READ_FILE_OPS(fh_reg);
1223DEBUGFS_READ_WRITE_FILE_OPS(missed_beacon);
1224DEBUGFS_READ_WRITE_FILE_OPS(force_reset);
1225DEBUGFS_READ_FILE_OPS(rxon_flags);
1226DEBUGFS_READ_FILE_OPS(rxon_filter_flags);
1227DEBUGFS_WRITE_FILE_OPS(wd_timeout);
1228
1229/*
1230 * Create the debugfs files and directories
1231 *
1232 */
1233int iwl_legacy_dbgfs_register(struct iwl_priv *priv, const char *name)
1234{
1235 struct dentry *phyd = priv->hw->wiphy->debugfsdir;
1236 struct dentry *dir_drv, *dir_data, *dir_rf, *dir_debug;
1237
1238 dir_drv = debugfs_create_dir(name, phyd);
1239 if (!dir_drv)
1240 return -ENOMEM;
1241
1242 priv->debugfs_dir = dir_drv;
1243
1244 dir_data = debugfs_create_dir("data", dir_drv);
1245 if (!dir_data)
1246 goto err;
1247 dir_rf = debugfs_create_dir("rf", dir_drv);
1248 if (!dir_rf)
1249 goto err;
1250 dir_debug = debugfs_create_dir("debug", dir_drv);
1251 if (!dir_debug)
1252 goto err;
1253
1254 DEBUGFS_ADD_FILE(nvm, dir_data, S_IRUSR);
1255 DEBUGFS_ADD_FILE(sram, dir_data, S_IWUSR | S_IRUSR);
1256 DEBUGFS_ADD_FILE(stations, dir_data, S_IRUSR);
1257 DEBUGFS_ADD_FILE(channels, dir_data, S_IRUSR);
1258 DEBUGFS_ADD_FILE(status, dir_data, S_IRUSR);
1259 DEBUGFS_ADD_FILE(interrupt, dir_data, S_IWUSR | S_IRUSR);
1260 DEBUGFS_ADD_FILE(qos, dir_data, S_IRUSR);
1261 DEBUGFS_ADD_FILE(disable_ht40, dir_data, S_IWUSR | S_IRUSR);
1262 DEBUGFS_ADD_FILE(rx_statistics, dir_debug, S_IRUSR);
1263 DEBUGFS_ADD_FILE(tx_statistics, dir_debug, S_IRUSR);
1264 DEBUGFS_ADD_FILE(traffic_log, dir_debug, S_IWUSR | S_IRUSR);
1265 DEBUGFS_ADD_FILE(rx_queue, dir_debug, S_IRUSR);
1266 DEBUGFS_ADD_FILE(tx_queue, dir_debug, S_IRUSR);
1267 DEBUGFS_ADD_FILE(power_save_status, dir_debug, S_IRUSR);
1268 DEBUGFS_ADD_FILE(clear_ucode_statistics, dir_debug, S_IWUSR);
1269 DEBUGFS_ADD_FILE(clear_traffic_statistics, dir_debug, S_IWUSR);
1270 DEBUGFS_ADD_FILE(fh_reg, dir_debug, S_IRUSR);
1271 DEBUGFS_ADD_FILE(missed_beacon, dir_debug, S_IWUSR);
1272 DEBUGFS_ADD_FILE(force_reset, dir_debug, S_IWUSR | S_IRUSR);
1273 DEBUGFS_ADD_FILE(ucode_rx_stats, dir_debug, S_IRUSR);
1274 DEBUGFS_ADD_FILE(ucode_tx_stats, dir_debug, S_IRUSR);
1275 DEBUGFS_ADD_FILE(ucode_general_stats, dir_debug, S_IRUSR);
1276
1277 if (priv->cfg->base_params->sensitivity_calib_by_driver)
1278 DEBUGFS_ADD_FILE(sensitivity, dir_debug, S_IRUSR);
1279 if (priv->cfg->base_params->chain_noise_calib_by_driver)
1280 DEBUGFS_ADD_FILE(chain_noise, dir_debug, S_IRUSR);
1281 DEBUGFS_ADD_FILE(rxon_flags, dir_debug, S_IWUSR);
1282 DEBUGFS_ADD_FILE(rxon_filter_flags, dir_debug, S_IWUSR);
1283 DEBUGFS_ADD_FILE(wd_timeout, dir_debug, S_IWUSR);
1284 if (priv->cfg->base_params->sensitivity_calib_by_driver)
1285 DEBUGFS_ADD_BOOL(disable_sensitivity, dir_rf,
1286 &priv->disable_sens_cal);
1287 if (priv->cfg->base_params->chain_noise_calib_by_driver)
1288 DEBUGFS_ADD_BOOL(disable_chain_noise, dir_rf,
1289 &priv->disable_chain_noise_cal);
1290 DEBUGFS_ADD_BOOL(disable_tx_power, dir_rf,
1291 &priv->disable_tx_power_cal);
1292 return 0;
1293
1294err:
1295 IWL_ERR(priv, "Can't create the debugfs directory\n");
1296 iwl_legacy_dbgfs_unregister(priv);
1297 return -ENOMEM;
1298}
1299EXPORT_SYMBOL(iwl_legacy_dbgfs_register);
1300
1301/**
1302 * Remove the debugfs files and directories
1303 *
1304 */
1305void iwl_legacy_dbgfs_unregister(struct iwl_priv *priv)
1306{
1307 if (!priv->debugfs_dir)
1308 return;
1309
1310 debugfs_remove_recursive(priv->debugfs_dir);
1311 priv->debugfs_dir = NULL;
1312}
1313EXPORT_SYMBOL(iwl_legacy_dbgfs_unregister);
diff --git a/drivers/net/wireless/iwlegacy/iwl-dev.h b/drivers/net/wireless/iwlegacy/iwl-dev.h
new file mode 100644
index 00000000000..9c786edf56f
--- /dev/null
+++ b/drivers/net/wireless/iwlegacy/iwl-dev.h
@@ -0,0 +1,1364 @@
1/******************************************************************************
2 *
3 * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of version 2 of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
17 *
18 * The full GNU General Public License is included in this distribution in the
19 * file called LICENSE.
20 *
21 * Contact Information:
22 * Intel Linux Wireless <ilw@linux.intel.com>
23 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
24 *
25 *****************************************************************************/
26/*
27 * Please use this file (iwl-dev.h) for driver implementation definitions.
28 * Please use iwl-commands.h for uCode API definitions.
29 * Please use iwl-4965-hw.h for hardware-related definitions.
30 */
31
32#ifndef __iwl_legacy_dev_h__
33#define __iwl_legacy_dev_h__
34
35#include <linux/interrupt.h>
36#include <linux/pci.h> /* for struct pci_device_id */
37#include <linux/kernel.h>
38#include <linux/leds.h>
39#include <linux/wait.h>
40#include <net/ieee80211_radiotap.h>
41
42#include "iwl-eeprom.h"
43#include "iwl-csr.h"
44#include "iwl-prph.h"
45#include "iwl-fh.h"
46#include "iwl-debug.h"
47#include "iwl-4965-hw.h"
48#include "iwl-3945-hw.h"
49#include "iwl-led.h"
50#include "iwl-power.h"
51#include "iwl-legacy-rs.h"
52
53struct iwl_tx_queue;
54
55/* CT-KILL constants */
56#define CT_KILL_THRESHOLD_LEGACY 110 /* in Celsius */
57
58/* Default noise level to report when noise measurement is not available.
59 * This may be because we're:
60 * 1) Not associated (4965, no beacon statistics being sent to driver)
61 * 2) Scanning (noise measurement does not apply to associated channel)
62 * 3) Receiving CCK (3945 delivers noise info only for OFDM frames)
63 * Use default noise value of -127 ... this is below the range of measurable
64 * Rx dBm for either 3945 or 4965, so it can indicate "unmeasurable" to user.
65 * Also, -127 works better than 0 when averaging frames with/without
66 * noise info (e.g. averaging might be done in app); measured dBm values are
67 * always negative ... using a negative value as the default keeps all
68 * averages within an s8's (used in some apps) range of negative values. */
69#define IWL_NOISE_MEAS_NOT_AVAILABLE (-127)
70
71/*
72 * RTS threshold here is total size [2347] minus 4 FCS bytes
73 * Per spec:
74 * a value of 0 means RTS on all data/management packets
75 * a value > max MSDU size means no RTS
76 * else RTS for data/management frames where MPDU is larger
77 * than RTS value.
78 */
79#define DEFAULT_RTS_THRESHOLD 2347U
80#define MIN_RTS_THRESHOLD 0U
81#define MAX_RTS_THRESHOLD 2347U
82#define MAX_MSDU_SIZE 2304U
83#define MAX_MPDU_SIZE 2346U
84#define DEFAULT_BEACON_INTERVAL 100U
85#define DEFAULT_SHORT_RETRY_LIMIT 7U
86#define DEFAULT_LONG_RETRY_LIMIT 4U
87
88struct iwl_rx_mem_buffer {
89 dma_addr_t page_dma;
90 struct page *page;
91 struct list_head list;
92};
93
94#define rxb_addr(r) page_address(r->page)
95
96/* defined below */
97struct iwl_device_cmd;
98
99struct iwl_cmd_meta {
100 /* only for SYNC commands, iff the reply skb is wanted */
101 struct iwl_host_cmd *source;
102 /*
103 * only for ASYNC commands
104 * (which is somewhat stupid -- look at iwl-sta.c for instance
105 * which duplicates a bunch of code because the callback isn't
106 * invoked for SYNC commands, if it were and its result passed
107 * through it would be simpler...)
108 */
109 void (*callback)(struct iwl_priv *priv,
110 struct iwl_device_cmd *cmd,
111 struct iwl_rx_packet *pkt);
112
113 /* The CMD_SIZE_HUGE flag bit indicates that the command
114 * structure is stored at the end of the shared queue memory. */
115 u32 flags;
116
117 DEFINE_DMA_UNMAP_ADDR(mapping);
118 DEFINE_DMA_UNMAP_LEN(len);
119};
120
121/*
122 * Generic queue structure
123 *
124 * Contains common data for Rx and Tx queues
125 */
126struct iwl_queue {
127 int n_bd; /* number of BDs in this queue */
128 int write_ptr; /* 1-st empty entry (index) host_w*/
129 int read_ptr; /* last used entry (index) host_r*/
130 /* use for monitoring and recovering the stuck queue */
131 dma_addr_t dma_addr; /* physical addr for BD's */
132 int n_window; /* safe queue window */
133 u32 id;
134 int low_mark; /* low watermark, resume queue if free
135 * space more than this */
136 int high_mark; /* high watermark, stop queue if free
137 * space less than this */
138};
139
140/* One for each TFD */
141struct iwl_tx_info {
142 struct sk_buff *skb;
143 struct iwl_rxon_context *ctx;
144};
145
146/**
147 * struct iwl_tx_queue - Tx Queue for DMA
148 * @q: generic Rx/Tx queue descriptor
149 * @bd: base of circular buffer of TFDs
150 * @cmd: array of command/TX buffer pointers
151 * @meta: array of meta data for each command/tx buffer
152 * @dma_addr_cmd: physical address of cmd/tx buffer array
153 * @txb: array of per-TFD driver data
154 * @time_stamp: time (in jiffies) of last read_ptr change
155 * @need_update: indicates need to update read/write index
156 * @sched_retry: indicates queue is high-throughput aggregation (HT AGG) enabled
157 *
158 * A Tx queue consists of circular buffer of BDs (a.k.a. TFDs, transmit frame
159 * descriptors) and required locking structures.
160 */
161#define TFD_TX_CMD_SLOTS 256
162#define TFD_CMD_SLOTS 32
163
164struct iwl_tx_queue {
165 struct iwl_queue q;
166 void *tfds;
167 struct iwl_device_cmd **cmd;
168 struct iwl_cmd_meta *meta;
169 struct iwl_tx_info *txb;
170 unsigned long time_stamp;
171 u8 need_update;
172 u8 sched_retry;
173 u8 active;
174 u8 swq_id;
175};
176
177#define IWL_NUM_SCAN_RATES (2)
178
179struct iwl4965_channel_tgd_info {
180 u8 type;
181 s8 max_power;
182};
183
184struct iwl4965_channel_tgh_info {
185 s64 last_radar_time;
186};
187
188#define IWL4965_MAX_RATE (33)
189
190struct iwl3945_clip_group {
191 /* maximum power level to prevent clipping for each rate, derived by
192 * us from this band's saturation power in EEPROM */
193 const s8 clip_powers[IWL_MAX_RATES];
194};
195
196/* current Tx power values to use, one for each rate for each channel.
197 * requested power is limited by:
198 * -- regulatory EEPROM limits for this channel
199 * -- hardware capabilities (clip-powers)
200 * -- spectrum management
201 * -- user preference (e.g. iwconfig)
202 * when requested power is set, base power index must also be set. */
203struct iwl3945_channel_power_info {
204 struct iwl3945_tx_power tpc; /* actual radio and DSP gain settings */
205 s8 power_table_index; /* actual (compenst'd) index into gain table */
206 s8 base_power_index; /* gain index for power at factory temp. */
207 s8 requested_power; /* power (dBm) requested for this chnl/rate */
208};
209
210/* current scan Tx power values to use, one for each scan rate for each
211 * channel. */
212struct iwl3945_scan_power_info {
213 struct iwl3945_tx_power tpc; /* actual radio and DSP gain settings */
214 s8 power_table_index; /* actual (compenst'd) index into gain table */
215 s8 requested_power; /* scan pwr (dBm) requested for chnl/rate */
216};
217
218/*
219 * One for each channel, holds all channel setup data
220 * Some of the fields (e.g. eeprom and flags/max_power_avg) are redundant
221 * with one another!
222 */
223struct iwl_channel_info {
224 struct iwl4965_channel_tgd_info tgd;
225 struct iwl4965_channel_tgh_info tgh;
226 struct iwl_eeprom_channel eeprom; /* EEPROM regulatory limit */
227 struct iwl_eeprom_channel ht40_eeprom; /* EEPROM regulatory limit for
228 * HT40 channel */
229
230 u8 channel; /* channel number */
231 u8 flags; /* flags copied from EEPROM */
232 s8 max_power_avg; /* (dBm) regul. eeprom, normal Tx, any rate */
233 s8 curr_txpow; /* (dBm) regulatory/spectrum/user (not h/w) limit */
234 s8 min_power; /* always 0 */
235 s8 scan_power; /* (dBm) regul. eeprom, direct scans, any rate */
236
237 u8 group_index; /* 0-4, maps channel to group1/2/3/4/5 */
238 u8 band_index; /* 0-4, maps channel to band1/2/3/4/5 */
239 enum ieee80211_band band;
240
241 /* HT40 channel info */
242 s8 ht40_max_power_avg; /* (dBm) regul. eeprom, normal Tx, any rate */
243 u8 ht40_flags; /* flags copied from EEPROM */
244 u8 ht40_extension_channel; /* HT_IE_EXT_CHANNEL_* */
245
246 /* Radio/DSP gain settings for each "normal" data Tx rate.
247 * These include, in addition to RF and DSP gain, a few fields for
248 * remembering/modifying gain settings (indexes). */
249 struct iwl3945_channel_power_info power_info[IWL4965_MAX_RATE];
250
251 /* Radio/DSP gain settings for each scan rate, for directed scans. */
252 struct iwl3945_scan_power_info scan_pwr_info[IWL_NUM_SCAN_RATES];
253};
254
255#define IWL_TX_FIFO_BK 0 /* shared */
256#define IWL_TX_FIFO_BE 1
257#define IWL_TX_FIFO_VI 2 /* shared */
258#define IWL_TX_FIFO_VO 3
259#define IWL_TX_FIFO_UNUSED -1
260
261/* Minimum number of queues. MAX_NUM is defined in hw specific files.
262 * Set the minimum to accommodate the 4 standard TX queues, 1 command
263 * queue, 2 (unused) HCCA queues, and 4 HT queues (one for each AC) */
264#define IWL_MIN_NUM_QUEUES 10
265
266#define IWL_DEFAULT_CMD_QUEUE_NUM 4
267
268#define IEEE80211_DATA_LEN 2304
269#define IEEE80211_4ADDR_LEN 30
270#define IEEE80211_HLEN (IEEE80211_4ADDR_LEN)
271#define IEEE80211_FRAME_LEN (IEEE80211_DATA_LEN + IEEE80211_HLEN)
272
273struct iwl_frame {
274 union {
275 struct ieee80211_hdr frame;
276 struct iwl_tx_beacon_cmd beacon;
277 u8 raw[IEEE80211_FRAME_LEN];
278 u8 cmd[360];
279 } u;
280 struct list_head list;
281};
282
283#define SEQ_TO_SN(seq) (((seq) & IEEE80211_SCTL_SEQ) >> 4)
284#define SN_TO_SEQ(ssn) (((ssn) << 4) & IEEE80211_SCTL_SEQ)
285#define MAX_SN ((IEEE80211_SCTL_SEQ) >> 4)
286
287enum {
288 CMD_SYNC = 0,
289 CMD_SIZE_NORMAL = 0,
290 CMD_NO_SKB = 0,
291 CMD_SIZE_HUGE = (1 << 0),
292 CMD_ASYNC = (1 << 1),
293 CMD_WANT_SKB = (1 << 2),
294 CMD_MAPPED = (1 << 3),
295};
296
297#define DEF_CMD_PAYLOAD_SIZE 320
298
299/**
300 * struct iwl_device_cmd
301 *
302 * For allocation of the command and tx queues, this establishes the overall
303 * size of the largest command we send to uCode, except for a scan command
304 * (which is relatively huge; space is allocated separately).
305 */
306struct iwl_device_cmd {
307 struct iwl_cmd_header hdr; /* uCode API */
308 union {
309 u32 flags;
310 u8 val8;
311 u16 val16;
312 u32 val32;
313 struct iwl_tx_cmd tx;
314 u8 payload[DEF_CMD_PAYLOAD_SIZE];
315 } __packed cmd;
316} __packed;
317
318#define TFD_MAX_PAYLOAD_SIZE (sizeof(struct iwl_device_cmd))
319
320
321struct iwl_host_cmd {
322 const void *data;
323 unsigned long reply_page;
324 void (*callback)(struct iwl_priv *priv,
325 struct iwl_device_cmd *cmd,
326 struct iwl_rx_packet *pkt);
327 u32 flags;
328 u16 len;
329 u8 id;
330};
331
332#define SUP_RATE_11A_MAX_NUM_CHANNELS 8
333#define SUP_RATE_11B_MAX_NUM_CHANNELS 4
334#define SUP_RATE_11G_MAX_NUM_CHANNELS 12
335
336/**
337 * struct iwl_rx_queue - Rx queue
338 * @bd: driver's pointer to buffer of receive buffer descriptors (rbd)
339 * @bd_dma: bus address of buffer of receive buffer descriptors (rbd)
340 * @read: Shared index to newest available Rx buffer
341 * @write: Shared index to oldest written Rx packet
342 * @free_count: Number of pre-allocated buffers in rx_free
343 * @rx_free: list of free SKBs for use
344 * @rx_used: List of Rx buffers with no SKB
345 * @need_update: flag to indicate we need to update read/write index
346 * @rb_stts: driver's pointer to receive buffer status
347 * @rb_stts_dma: bus address of receive buffer status
348 *
349 * NOTE: rx_free and rx_used are used as a FIFO for iwl_rx_mem_buffers
350 */
351struct iwl_rx_queue {
352 __le32 *bd;
353 dma_addr_t bd_dma;
354 struct iwl_rx_mem_buffer pool[RX_QUEUE_SIZE + RX_FREE_BUFFERS];
355 struct iwl_rx_mem_buffer *queue[RX_QUEUE_SIZE];
356 u32 read;
357 u32 write;
358 u32 free_count;
359 u32 write_actual;
360 struct list_head rx_free;
361 struct list_head rx_used;
362 int need_update;
363 struct iwl_rb_status *rb_stts;
364 dma_addr_t rb_stts_dma;
365 spinlock_t lock;
366};
367
368#define IWL_SUPPORTED_RATES_IE_LEN 8
369
370#define MAX_TID_COUNT 9
371
372#define IWL_INVALID_RATE 0xFF
373#define IWL_INVALID_VALUE -1
374
375/**
376 * struct iwl_ht_agg -- aggregation status while waiting for block-ack
377 * @txq_id: Tx queue used for Tx attempt
378 * @frame_count: # frames attempted by Tx command
379 * @wait_for_ba: Expect block-ack before next Tx reply
380 * @start_idx: Index of 1st Transmit Frame Descriptor (TFD) in Tx window
381 * @bitmap0: Low order bitmap, one bit for each frame pending ACK in Tx window
382 * @bitmap1: High order, one bit for each frame pending ACK in Tx window
383 * @rate_n_flags: Rate at which Tx was attempted
384 *
385 * If REPLY_TX indicates that aggregation was attempted, driver must wait
386 * for block ack (REPLY_COMPRESSED_BA). This struct stores tx reply info
387 * until block ack arrives.
388 */
389struct iwl_ht_agg {
390 u16 txq_id;
391 u16 frame_count;
392 u16 wait_for_ba;
393 u16 start_idx;
394 u64 bitmap;
395 u32 rate_n_flags;
396#define IWL_AGG_OFF 0
397#define IWL_AGG_ON 1
398#define IWL_EMPTYING_HW_QUEUE_ADDBA 2
399#define IWL_EMPTYING_HW_QUEUE_DELBA 3
400 u8 state;
401};
402
403
404struct iwl_tid_data {
405 u16 seq_number; /* 4965 only */
406 u16 tfds_in_queue;
407 struct iwl_ht_agg agg;
408};
409
410struct iwl_hw_key {
411 u32 cipher;
412 int keylen;
413 u8 keyidx;
414 u8 key[32];
415};
416
417union iwl_ht_rate_supp {
418 u16 rates;
419 struct {
420 u8 siso_rate;
421 u8 mimo_rate;
422 };
423};
424
425#define CFG_HT_RX_AMPDU_FACTOR_8K (0x0)
426#define CFG_HT_RX_AMPDU_FACTOR_16K (0x1)
427#define CFG_HT_RX_AMPDU_FACTOR_32K (0x2)
428#define CFG_HT_RX_AMPDU_FACTOR_64K (0x3)
429#define CFG_HT_RX_AMPDU_FACTOR_DEF CFG_HT_RX_AMPDU_FACTOR_64K
430#define CFG_HT_RX_AMPDU_FACTOR_MAX CFG_HT_RX_AMPDU_FACTOR_64K
431#define CFG_HT_RX_AMPDU_FACTOR_MIN CFG_HT_RX_AMPDU_FACTOR_8K
432
433/*
434 * Maximal MPDU density for TX aggregation
435 * 4 - 2us density
436 * 5 - 4us density
437 * 6 - 8us density
438 * 7 - 16us density
439 */
440#define CFG_HT_MPDU_DENSITY_2USEC (0x4)
441#define CFG_HT_MPDU_DENSITY_4USEC (0x5)
442#define CFG_HT_MPDU_DENSITY_8USEC (0x6)
443#define CFG_HT_MPDU_DENSITY_16USEC (0x7)
444#define CFG_HT_MPDU_DENSITY_DEF CFG_HT_MPDU_DENSITY_4USEC
445#define CFG_HT_MPDU_DENSITY_MAX CFG_HT_MPDU_DENSITY_16USEC
446#define CFG_HT_MPDU_DENSITY_MIN (0x1)
447
448struct iwl_ht_config {
449 bool single_chain_sufficient;
450 enum ieee80211_smps_mode smps; /* current smps mode */
451};
452
453/* QoS structures */
454struct iwl_qos_info {
455 int qos_active;
456 struct iwl_qosparam_cmd def_qos_parm;
457};
458
459/*
460 * Structure should be accessed with sta_lock held. When station addition
461 * is in progress (IWL_STA_UCODE_INPROGRESS) it is possible to access only
462 * the commands (iwl_legacy_addsta_cmd and iwl_link_quality_cmd) without
463 * sta_lock held.
464 */
465struct iwl_station_entry {
466 struct iwl_legacy_addsta_cmd sta;
467 struct iwl_tid_data tid[MAX_TID_COUNT];
468 u8 used, ctxid;
469 struct iwl_hw_key keyinfo;
470 struct iwl_link_quality_cmd *lq;
471};
472
473struct iwl_station_priv_common {
474 struct iwl_rxon_context *ctx;
475 u8 sta_id;
476};
477
478/*
479 * iwl_station_priv: Driver's private station information
480 *
481 * When mac80211 creates a station it reserves some space (hw->sta_data_size)
482 * in the structure for use by driver. This structure is places in that
483 * space.
484 *
485 * The common struct MUST be first because it is shared between
486 * 3945 and 4965!
487 */
488struct iwl_station_priv {
489 struct iwl_station_priv_common common;
490 struct iwl_lq_sta lq_sta;
491 atomic_t pending_frames;
492 bool client;
493 bool asleep;
494};
495
496/**
497 * struct iwl_vif_priv - driver's private per-interface information
498 *
499 * When mac80211 allocates a virtual interface, it can allocate
500 * space for us to put data into.
501 */
502struct iwl_vif_priv {
503 struct iwl_rxon_context *ctx;
504 u8 ibss_bssid_sta_id;
505};
506
507/* one for each uCode image (inst/data, boot/init/runtime) */
508struct fw_desc {
509 void *v_addr; /* access by driver */
510 dma_addr_t p_addr; /* access by card's busmaster DMA */
511 u32 len; /* bytes */
512};
513
514/* uCode file layout */
515struct iwl_ucode_header {
516 __le32 ver; /* major/minor/API/serial */
517 struct {
518 __le32 inst_size; /* bytes of runtime code */
519 __le32 data_size; /* bytes of runtime data */
520 __le32 init_size; /* bytes of init code */
521 __le32 init_data_size; /* bytes of init data */
522 __le32 boot_size; /* bytes of bootstrap code */
523 u8 data[0]; /* in same order as sizes */
524 } v1;
525};
526
527struct iwl4965_ibss_seq {
528 u8 mac[ETH_ALEN];
529 u16 seq_num;
530 u16 frag_num;
531 unsigned long packet_time;
532 struct list_head list;
533};
534
535struct iwl_sensitivity_ranges {
536 u16 min_nrg_cck;
537 u16 max_nrg_cck;
538
539 u16 nrg_th_cck;
540 u16 nrg_th_ofdm;
541
542 u16 auto_corr_min_ofdm;
543 u16 auto_corr_min_ofdm_mrc;
544 u16 auto_corr_min_ofdm_x1;
545 u16 auto_corr_min_ofdm_mrc_x1;
546
547 u16 auto_corr_max_ofdm;
548 u16 auto_corr_max_ofdm_mrc;
549 u16 auto_corr_max_ofdm_x1;
550 u16 auto_corr_max_ofdm_mrc_x1;
551
552 u16 auto_corr_max_cck;
553 u16 auto_corr_max_cck_mrc;
554 u16 auto_corr_min_cck;
555 u16 auto_corr_min_cck_mrc;
556
557 u16 barker_corr_th_min;
558 u16 barker_corr_th_min_mrc;
559 u16 nrg_th_cca;
560};
561
562
563#define KELVIN_TO_CELSIUS(x) ((x)-273)
564#define CELSIUS_TO_KELVIN(x) ((x)+273)
565
566
567/**
568 * struct iwl_hw_params
569 * @max_txq_num: Max # Tx queues supported
570 * @dma_chnl_num: Number of Tx DMA/FIFO channels
571 * @scd_bc_tbls_size: size of scheduler byte count tables
572 * @tfd_size: TFD size
573 * @tx/rx_chains_num: Number of TX/RX chains
574 * @valid_tx/rx_ant: usable antennas
575 * @max_rxq_size: Max # Rx frames in Rx queue (must be power-of-2)
576 * @max_rxq_log: Log-base-2 of max_rxq_size
577 * @rx_page_order: Rx buffer page order
578 * @rx_wrt_ptr_reg: FH{39}_RSCSR_CHNL0_WPTR
579 * @max_stations:
580 * @ht40_channel: is 40MHz width possible in band 2.4
581 * BIT(IEEE80211_BAND_5GHZ) BIT(IEEE80211_BAND_5GHZ)
582 * @sw_crypto: 0 for hw, 1 for sw
583 * @max_xxx_size: for ucode uses
584 * @ct_kill_threshold: temperature threshold
585 * @beacon_time_tsf_bits: number of valid tsf bits for beacon time
586 * @struct iwl_sensitivity_ranges: range of sensitivity values
587 */
588struct iwl_hw_params {
589 u8 max_txq_num;
590 u8 dma_chnl_num;
591 u16 scd_bc_tbls_size;
592 u32 tfd_size;
593 u8 tx_chains_num;
594 u8 rx_chains_num;
595 u8 valid_tx_ant;
596 u8 valid_rx_ant;
597 u16 max_rxq_size;
598 u16 max_rxq_log;
599 u32 rx_page_order;
600 u32 rx_wrt_ptr_reg;
601 u8 max_stations;
602 u8 ht40_channel;
603 u8 max_beacon_itrvl; /* in 1024 ms */
604 u32 max_inst_size;
605 u32 max_data_size;
606 u32 max_bsm_size;
607 u32 ct_kill_threshold; /* value in hw-dependent units */
608 u16 beacon_time_tsf_bits;
609 const struct iwl_sensitivity_ranges *sens;
610};
611
612
613/******************************************************************************
614 *
615 * Functions implemented in core module which are forward declared here
616 * for use by iwl-[4-5].c
617 *
618 * NOTE: The implementation of these functions are not hardware specific
619 * which is why they are in the core module files.
620 *
621 * Naming convention --
622 * iwl_ <-- Is part of iwlwifi
623 * iwlXXXX_ <-- Hardware specific (implemented in iwl-XXXX.c for XXXX)
624 * iwl4965_bg_ <-- Called from work queue context
625 * iwl4965_mac_ <-- mac80211 callback
626 *
627 ****************************************************************************/
628extern void iwl4965_update_chain_flags(struct iwl_priv *priv);
629extern const u8 iwlegacy_bcast_addr[ETH_ALEN];
630extern int iwl_legacy_queue_space(const struct iwl_queue *q);
631static inline int iwl_legacy_queue_used(const struct iwl_queue *q, int i)
632{
633 return q->write_ptr >= q->read_ptr ?
634 (i >= q->read_ptr && i < q->write_ptr) :
635 !(i < q->read_ptr && i >= q->write_ptr);
636}
637
638
639static inline u8 iwl_legacy_get_cmd_index(struct iwl_queue *q, u32 index,
640 int is_huge)
641{
642 /*
643 * This is for init calibration result and scan command which
644 * required buffer > TFD_MAX_PAYLOAD_SIZE,
645 * the big buffer at end of command array
646 */
647 if (is_huge)
648 return q->n_window; /* must be power of 2 */
649
650 /* Otherwise, use normal size buffers */
651 return index & (q->n_window - 1);
652}
653
654
655struct iwl_dma_ptr {
656 dma_addr_t dma;
657 void *addr;
658 size_t size;
659};
660
661#define IWL_OPERATION_MODE_AUTO 0
662#define IWL_OPERATION_MODE_HT_ONLY 1
663#define IWL_OPERATION_MODE_MIXED 2
664#define IWL_OPERATION_MODE_20MHZ 3
665
666#define IWL_TX_CRC_SIZE 4
667#define IWL_TX_DELIMITER_SIZE 4
668
669#define TX_POWER_IWL_ILLEGAL_VOLTAGE -10000
670
671/* Sensitivity and chain noise calibration */
672#define INITIALIZATION_VALUE 0xFFFF
673#define IWL4965_CAL_NUM_BEACONS 20
674#define IWL_CAL_NUM_BEACONS 16
675#define MAXIMUM_ALLOWED_PATHLOSS 15
676
677#define CHAIN_NOISE_MAX_DELTA_GAIN_CODE 3
678
679#define MAX_FA_OFDM 50
680#define MIN_FA_OFDM 5
681#define MAX_FA_CCK 50
682#define MIN_FA_CCK 5
683
684#define AUTO_CORR_STEP_OFDM 1
685
686#define AUTO_CORR_STEP_CCK 3
687#define AUTO_CORR_MAX_TH_CCK 160
688
689#define NRG_DIFF 2
690#define NRG_STEP_CCK 2
691#define NRG_MARGIN 8
692#define MAX_NUMBER_CCK_NO_FA 100
693
694#define AUTO_CORR_CCK_MIN_VAL_DEF (125)
695
696#define CHAIN_A 0
697#define CHAIN_B 1
698#define CHAIN_C 2
699#define CHAIN_NOISE_DELTA_GAIN_INIT_VAL 4
700#define ALL_BAND_FILTER 0xFF00
701#define IN_BAND_FILTER 0xFF
702#define MIN_AVERAGE_NOISE_MAX_VALUE 0xFFFFFFFF
703
704#define NRG_NUM_PREV_STAT_L 20
705#define NUM_RX_CHAINS 3
706
707enum iwl4965_false_alarm_state {
708 IWL_FA_TOO_MANY = 0,
709 IWL_FA_TOO_FEW = 1,
710 IWL_FA_GOOD_RANGE = 2,
711};
712
713enum iwl4965_chain_noise_state {
714 IWL_CHAIN_NOISE_ALIVE = 0, /* must be 0 */
715 IWL_CHAIN_NOISE_ACCUMULATE,
716 IWL_CHAIN_NOISE_CALIBRATED,
717 IWL_CHAIN_NOISE_DONE,
718};
719
720enum iwl4965_calib_enabled_state {
721 IWL_CALIB_DISABLED = 0, /* must be 0 */
722 IWL_CALIB_ENABLED = 1,
723};
724
725/*
726 * enum iwl_calib
727 * defines the order in which results of initial calibrations
728 * should be sent to the runtime uCode
729 */
730enum iwl_calib {
731 IWL_CALIB_MAX,
732};
733
734/* Opaque calibration results */
735struct iwl_calib_result {
736 void *buf;
737 size_t buf_len;
738};
739
740enum ucode_type {
741 UCODE_NONE = 0,
742 UCODE_INIT,
743 UCODE_RT
744};
745
746/* Sensitivity calib data */
747struct iwl_sensitivity_data {
748 u32 auto_corr_ofdm;
749 u32 auto_corr_ofdm_mrc;
750 u32 auto_corr_ofdm_x1;
751 u32 auto_corr_ofdm_mrc_x1;
752 u32 auto_corr_cck;
753 u32 auto_corr_cck_mrc;
754
755 u32 last_bad_plcp_cnt_ofdm;
756 u32 last_fa_cnt_ofdm;
757 u32 last_bad_plcp_cnt_cck;
758 u32 last_fa_cnt_cck;
759
760 u32 nrg_curr_state;
761 u32 nrg_prev_state;
762 u32 nrg_value[10];
763 u8 nrg_silence_rssi[NRG_NUM_PREV_STAT_L];
764 u32 nrg_silence_ref;
765 u32 nrg_energy_idx;
766 u32 nrg_silence_idx;
767 u32 nrg_th_cck;
768 s32 nrg_auto_corr_silence_diff;
769 u32 num_in_cck_no_fa;
770 u32 nrg_th_ofdm;
771
772 u16 barker_corr_th_min;
773 u16 barker_corr_th_min_mrc;
774 u16 nrg_th_cca;
775};
776
777/* Chain noise (differential Rx gain) calib data */
778struct iwl_chain_noise_data {
779 u32 active_chains;
780 u32 chain_noise_a;
781 u32 chain_noise_b;
782 u32 chain_noise_c;
783 u32 chain_signal_a;
784 u32 chain_signal_b;
785 u32 chain_signal_c;
786 u16 beacon_count;
787 u8 disconn_array[NUM_RX_CHAINS];
788 u8 delta_gain_code[NUM_RX_CHAINS];
789 u8 radio_write;
790 u8 state;
791};
792
793#define EEPROM_SEM_TIMEOUT 10 /* milliseconds */
794#define EEPROM_SEM_RETRY_LIMIT 1000 /* number of attempts (not time) */
795
796#define IWL_TRAFFIC_ENTRIES (256)
797#define IWL_TRAFFIC_ENTRY_SIZE (64)
798
799enum {
800 MEASUREMENT_READY = (1 << 0),
801 MEASUREMENT_ACTIVE = (1 << 1),
802};
803
804/* interrupt statistics */
805struct isr_statistics {
806 u32 hw;
807 u32 sw;
808 u32 err_code;
809 u32 sch;
810 u32 alive;
811 u32 rfkill;
812 u32 ctkill;
813 u32 wakeup;
814 u32 rx;
815 u32 rx_handlers[REPLY_MAX];
816 u32 tx;
817 u32 unhandled;
818};
819
820/* management statistics */
821enum iwl_mgmt_stats {
822 MANAGEMENT_ASSOC_REQ = 0,
823 MANAGEMENT_ASSOC_RESP,
824 MANAGEMENT_REASSOC_REQ,
825 MANAGEMENT_REASSOC_RESP,
826 MANAGEMENT_PROBE_REQ,
827 MANAGEMENT_PROBE_RESP,
828 MANAGEMENT_BEACON,
829 MANAGEMENT_ATIM,
830 MANAGEMENT_DISASSOC,
831 MANAGEMENT_AUTH,
832 MANAGEMENT_DEAUTH,
833 MANAGEMENT_ACTION,
834 MANAGEMENT_MAX,
835};
836/* control statistics */
837enum iwl_ctrl_stats {
838 CONTROL_BACK_REQ = 0,
839 CONTROL_BACK,
840 CONTROL_PSPOLL,
841 CONTROL_RTS,
842 CONTROL_CTS,
843 CONTROL_ACK,
844 CONTROL_CFEND,
845 CONTROL_CFENDACK,
846 CONTROL_MAX,
847};
848
849struct traffic_stats {
850#ifdef CONFIG_IWLWIFI_LEGACY_DEBUGFS
851 u32 mgmt[MANAGEMENT_MAX];
852 u32 ctrl[CONTROL_MAX];
853 u32 data_cnt;
854 u64 data_bytes;
855#endif
856};
857
858/*
859 * host interrupt timeout value
860 * used with setting interrupt coalescing timer
861 * the CSR_INT_COALESCING is an 8 bit register in 32-usec unit
862 *
863 * default interrupt coalescing timer is 64 x 32 = 2048 usecs
864 * default interrupt coalescing calibration timer is 16 x 32 = 512 usecs
865 */
866#define IWL_HOST_INT_TIMEOUT_MAX (0xFF)
867#define IWL_HOST_INT_TIMEOUT_DEF (0x40)
868#define IWL_HOST_INT_TIMEOUT_MIN (0x0)
869#define IWL_HOST_INT_CALIB_TIMEOUT_MAX (0xFF)
870#define IWL_HOST_INT_CALIB_TIMEOUT_DEF (0x10)
871#define IWL_HOST_INT_CALIB_TIMEOUT_MIN (0x0)
872
873#define IWL_DELAY_NEXT_FORCE_FW_RELOAD (HZ*5)
874
875/* TX queue watchdog timeouts in mSecs */
876#define IWL_DEF_WD_TIMEOUT (2000)
877#define IWL_LONG_WD_TIMEOUT (10000)
878#define IWL_MAX_WD_TIMEOUT (120000)
879
880struct iwl_force_reset {
881 int reset_request_count;
882 int reset_success_count;
883 int reset_reject_count;
884 unsigned long reset_duration;
885 unsigned long last_force_reset_jiffies;
886};
887
888/* extend beacon time format bit shifting */
889/*
890 * for _3945 devices
891 * bits 31:24 - extended
892 * bits 23:0 - interval
893 */
894#define IWL3945_EXT_BEACON_TIME_POS 24
895/*
896 * for _4965 devices
897 * bits 31:22 - extended
898 * bits 21:0 - interval
899 */
900#define IWL4965_EXT_BEACON_TIME_POS 22
901
902enum iwl_rxon_context_id {
903 IWL_RXON_CTX_BSS,
904
905 NUM_IWL_RXON_CTX
906};
907
908struct iwl_rxon_context {
909 struct ieee80211_vif *vif;
910
911 const u8 *ac_to_fifo;
912 const u8 *ac_to_queue;
913 u8 mcast_queue;
914
915 /*
916 * We could use the vif to indicate active, but we
917 * also need it to be active during disabling when
918 * we already removed the vif for type setting.
919 */
920 bool always_active, is_active;
921
922 bool ht_need_multiple_chains;
923
924 enum iwl_rxon_context_id ctxid;
925
926 u32 interface_modes, exclusive_interface_modes;
927 u8 unused_devtype, ap_devtype, ibss_devtype, station_devtype;
928
929 /*
930 * We declare this const so it can only be
931 * changed via explicit cast within the
932 * routines that actually update the physical
933 * hardware.
934 */
935 const struct iwl_legacy_rxon_cmd active;
936 struct iwl_legacy_rxon_cmd staging;
937
938 struct iwl_rxon_time_cmd timing;
939
940 struct iwl_qos_info qos_data;
941
942 u8 bcast_sta_id, ap_sta_id;
943
944 u8 rxon_cmd, rxon_assoc_cmd, rxon_timing_cmd;
945 u8 qos_cmd;
946 u8 wep_key_cmd;
947
948 struct iwl_wep_key wep_keys[WEP_KEYS_MAX];
949 u8 key_mapping_keys;
950
951 __le32 station_flags;
952
953 struct {
954 bool non_gf_sta_present;
955 u8 protection;
956 bool enabled, is_40mhz;
957 u8 extension_chan_offset;
958 } ht;
959};
960
961struct iwl_priv {
962
963 /* ieee device used by generic ieee processing code */
964 struct ieee80211_hw *hw;
965 struct ieee80211_channel *ieee_channels;
966 struct ieee80211_rate *ieee_rates;
967 struct iwl_cfg *cfg;
968
969 /* temporary frame storage list */
970 struct list_head free_frames;
971 int frames_count;
972
973 enum ieee80211_band band;
974 int alloc_rxb_page;
975
976 void (*rx_handlers[REPLY_MAX])(struct iwl_priv *priv,
977 struct iwl_rx_mem_buffer *rxb);
978
979 struct ieee80211_supported_band bands[IEEE80211_NUM_BANDS];
980
981 /* spectrum measurement report caching */
982 struct iwl_spectrum_notification measure_report;
983 u8 measurement_status;
984
985 /* ucode beacon time */
986 u32 ucode_beacon_time;
987 int missed_beacon_threshold;
988
989 /* track IBSS manager (last beacon) status */
990 u32 ibss_manager;
991
992 /* force reset */
993 struct iwl_force_reset force_reset;
994
995 /* we allocate array of iwl_channel_info for NIC's valid channels.
996 * Access via channel # using indirect index array */
997 struct iwl_channel_info *channel_info; /* channel info array */
998 u8 channel_count; /* # of channels */
999
1000 /* thermal calibration */
1001 s32 temperature; /* degrees Kelvin */
1002 s32 last_temperature;
1003
1004 /* init calibration results */
1005 struct iwl_calib_result calib_results[IWL_CALIB_MAX];
1006
1007 /* Scan related variables */
1008 unsigned long scan_start;
1009 unsigned long scan_start_tsf;
1010 void *scan_cmd;
1011 enum ieee80211_band scan_band;
1012 struct cfg80211_scan_request *scan_request;
1013 struct ieee80211_vif *scan_vif;
1014 u8 scan_tx_ant[IEEE80211_NUM_BANDS];
1015 u8 mgmt_tx_ant;
1016
1017 /* spinlock */
1018 spinlock_t lock; /* protect general shared data */
1019 spinlock_t hcmd_lock; /* protect hcmd */
1020 spinlock_t reg_lock; /* protect hw register access */
1021 struct mutex mutex;
1022
1023 /* basic pci-network driver stuff */
1024 struct pci_dev *pci_dev;
1025
1026 /* pci hardware address support */
1027 void __iomem *hw_base;
1028 u32 hw_rev;
1029 u32 hw_wa_rev;
1030 u8 rev_id;
1031
1032 /* microcode/device supports multiple contexts */
1033 u8 valid_contexts;
1034
1035 /* command queue number */
1036 u8 cmd_queue;
1037
1038 /* max number of station keys */
1039 u8 sta_key_max_num;
1040
1041 /* EEPROM MAC addresses */
1042 struct mac_address addresses[1];
1043
1044 /* uCode images, save to reload in case of failure */
1045 int fw_index; /* firmware we're trying to load */
1046 u32 ucode_ver; /* version of ucode, copy of
1047 iwl_ucode.ver */
1048 struct fw_desc ucode_code; /* runtime inst */
1049 struct fw_desc ucode_data; /* runtime data original */
1050 struct fw_desc ucode_data_backup; /* runtime data save/restore */
1051 struct fw_desc ucode_init; /* initialization inst */
1052 struct fw_desc ucode_init_data; /* initialization data */
1053 struct fw_desc ucode_boot; /* bootstrap inst */
1054 enum ucode_type ucode_type;
1055 u8 ucode_write_complete; /* the image write is complete */
1056 char firmware_name[25];
1057
1058 struct iwl_rxon_context contexts[NUM_IWL_RXON_CTX];
1059
1060 __le16 switch_channel;
1061
1062 /* 1st responses from initialize and runtime uCode images.
1063 * _4965's initialize alive response contains some calibration data. */
1064 struct iwl_init_alive_resp card_alive_init;
1065 struct iwl_alive_resp card_alive;
1066
1067 u16 active_rate;
1068
1069 u8 start_calib;
1070 struct iwl_sensitivity_data sensitivity_data;
1071 struct iwl_chain_noise_data chain_noise_data;
1072 __le16 sensitivity_tbl[HD_TABLE_SIZE];
1073
1074 struct iwl_ht_config current_ht_config;
1075
1076 /* Rate scaling data */
1077 u8 retry_rate;
1078
1079 wait_queue_head_t wait_command_queue;
1080
1081 int activity_timer_active;
1082
1083 /* Rx and Tx DMA processing queues */
1084 struct iwl_rx_queue rxq;
1085 struct iwl_tx_queue *txq;
1086 unsigned long txq_ctx_active_msk;
1087 struct iwl_dma_ptr kw; /* keep warm address */
1088 struct iwl_dma_ptr scd_bc_tbls;
1089
1090 u32 scd_base_addr; /* scheduler sram base address */
1091
1092 unsigned long status;
1093
1094 /* counts mgmt, ctl, and data packets */
1095 struct traffic_stats tx_stats;
1096 struct traffic_stats rx_stats;
1097
1098 /* counts interrupts */
1099 struct isr_statistics isr_stats;
1100
1101 struct iwl_power_mgr power_data;
1102
1103 /* context information */
1104 u8 bssid[ETH_ALEN]; /* used only on 3945 but filled by core */
1105
1106 /* station table variables */
1107
1108 /* Note: if lock and sta_lock are needed, lock must be acquired first */
1109 spinlock_t sta_lock;
1110 int num_stations;
1111 struct iwl_station_entry stations[IWL_STATION_COUNT];
1112 unsigned long ucode_key_table;
1113
1114 /* queue refcounts */
1115#define IWL_MAX_HW_QUEUES 32
1116 unsigned long queue_stopped[BITS_TO_LONGS(IWL_MAX_HW_QUEUES)];
1117 /* for each AC */
1118 atomic_t queue_stop_count[4];
1119
1120 /* Indication if ieee80211_ops->open has been called */
1121 u8 is_open;
1122
1123 u8 mac80211_registered;
1124
1125 /* eeprom -- this is in the card's little endian byte order */
1126 u8 *eeprom;
1127 struct iwl_eeprom_calib_info *calib_info;
1128
1129 enum nl80211_iftype iw_mode;
1130
1131 /* Last Rx'd beacon timestamp */
1132 u64 timestamp;
1133
1134 union {
1135#if defined(CONFIG_IWL3945) || defined(CONFIG_IWL3945_MODULE)
1136 struct {
1137 void *shared_virt;
1138 dma_addr_t shared_phys;
1139
1140 struct delayed_work thermal_periodic;
1141 struct delayed_work rfkill_poll;
1142
1143 struct iwl3945_notif_statistics statistics;
1144#ifdef CONFIG_IWLWIFI_LEGACY_DEBUGFS
1145 struct iwl3945_notif_statistics accum_statistics;
1146 struct iwl3945_notif_statistics delta_statistics;
1147 struct iwl3945_notif_statistics max_delta;
1148#endif
1149
1150 u32 sta_supp_rates;
1151 int last_rx_rssi; /* From Rx packet statistics */
1152
1153 /* Rx'd packet timing information */
1154 u32 last_beacon_time;
1155 u64 last_tsf;
1156
1157 /*
1158 * each calibration channel group in the
1159 * EEPROM has a derived clip setting for
1160 * each rate.
1161 */
1162 const struct iwl3945_clip_group clip_groups[5];
1163
1164 } _3945;
1165#endif
1166#if defined(CONFIG_IWL4965) || defined(CONFIG_IWL4965_MODULE)
1167 struct {
1168 struct iwl_rx_phy_res last_phy_res;
1169 bool last_phy_res_valid;
1170
1171 struct completion firmware_loading_complete;
1172
1173 /*
1174 * chain noise reset and gain commands are the
1175 * two extra calibration commands follows the standard
1176 * phy calibration commands
1177 */
1178 u8 phy_calib_chain_noise_reset_cmd;
1179 u8 phy_calib_chain_noise_gain_cmd;
1180
1181 struct iwl_notif_statistics statistics;
1182#ifdef CONFIG_IWLWIFI_LEGACY_DEBUGFS
1183 struct iwl_notif_statistics accum_statistics;
1184 struct iwl_notif_statistics delta_statistics;
1185 struct iwl_notif_statistics max_delta;
1186#endif
1187
1188 } _4965;
1189#endif
1190 };
1191
1192 struct iwl_hw_params hw_params;
1193
1194 u32 inta_mask;
1195
1196 struct workqueue_struct *workqueue;
1197
1198 struct work_struct restart;
1199 struct work_struct scan_completed;
1200 struct work_struct rx_replenish;
1201 struct work_struct abort_scan;
1202
1203 struct iwl_rxon_context *beacon_ctx;
1204 struct sk_buff *beacon_skb;
1205
1206 struct work_struct tx_flush;
1207
1208 struct tasklet_struct irq_tasklet;
1209
1210 struct delayed_work init_alive_start;
1211 struct delayed_work alive_start;
1212 struct delayed_work scan_check;
1213
1214 /* TX Power */
1215 s8 tx_power_user_lmt;
1216 s8 tx_power_device_lmt;
1217 s8 tx_power_next;
1218
1219
1220#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
1221 /* debugging info */
1222 u32 debug_level; /* per device debugging will override global
1223 iwlegacy_debug_level if set */
1224#endif /* CONFIG_IWLWIFI_LEGACY_DEBUG */
1225#ifdef CONFIG_IWLWIFI_LEGACY_DEBUGFS
1226 /* debugfs */
1227 u16 tx_traffic_idx;
1228 u16 rx_traffic_idx;
1229 u8 *tx_traffic;
1230 u8 *rx_traffic;
1231 struct dentry *debugfs_dir;
1232 u32 dbgfs_sram_offset, dbgfs_sram_len;
1233 bool disable_ht40;
1234#endif /* CONFIG_IWLWIFI_LEGACY_DEBUGFS */
1235
1236 struct work_struct txpower_work;
1237 u32 disable_sens_cal;
1238 u32 disable_chain_noise_cal;
1239 u32 disable_tx_power_cal;
1240 struct work_struct run_time_calib_work;
1241 struct timer_list statistics_periodic;
1242 struct timer_list watchdog;
1243 bool hw_ready;
1244
1245 struct led_classdev led;
1246 unsigned long blink_on, blink_off;
1247 bool led_registered;
1248}; /*iwl_priv */
1249
1250static inline void iwl_txq_ctx_activate(struct iwl_priv *priv, int txq_id)
1251{
1252 set_bit(txq_id, &priv->txq_ctx_active_msk);
1253}
1254
1255static inline void iwl_txq_ctx_deactivate(struct iwl_priv *priv, int txq_id)
1256{
1257 clear_bit(txq_id, &priv->txq_ctx_active_msk);
1258}
1259
1260#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
1261/*
1262 * iwl_legacy_get_debug_level: Return active debug level for device
1263 *
1264 * Using sysfs it is possible to set per device debug level. This debug
1265 * level will be used if set, otherwise the global debug level which can be
1266 * set via module parameter is used.
1267 */
1268static inline u32 iwl_legacy_get_debug_level(struct iwl_priv *priv)
1269{
1270 if (priv->debug_level)
1271 return priv->debug_level;
1272 else
1273 return iwlegacy_debug_level;
1274}
1275#else
1276static inline u32 iwl_legacy_get_debug_level(struct iwl_priv *priv)
1277{
1278 return iwlegacy_debug_level;
1279}
1280#endif
1281
1282
1283static inline struct ieee80211_hdr *
1284iwl_legacy_tx_queue_get_hdr(struct iwl_priv *priv,
1285 int txq_id, int idx)
1286{
1287 if (priv->txq[txq_id].txb[idx].skb)
1288 return (struct ieee80211_hdr *)priv->txq[txq_id].
1289 txb[idx].skb->data;
1290 return NULL;
1291}
1292
1293static inline struct iwl_rxon_context *
1294iwl_legacy_rxon_ctx_from_vif(struct ieee80211_vif *vif)
1295{
1296 struct iwl_vif_priv *vif_priv = (void *)vif->drv_priv;
1297
1298 return vif_priv->ctx;
1299}
1300
1301#define for_each_context(priv, ctx) \
1302 for (ctx = &priv->contexts[IWL_RXON_CTX_BSS]; \
1303 ctx < &priv->contexts[NUM_IWL_RXON_CTX]; ctx++) \
1304 if (priv->valid_contexts & BIT(ctx->ctxid))
1305
1306static inline int iwl_legacy_is_associated(struct iwl_priv *priv,
1307 enum iwl_rxon_context_id ctxid)
1308{
1309 return (priv->contexts[ctxid].active.filter_flags &
1310 RXON_FILTER_ASSOC_MSK) ? 1 : 0;
1311}
1312
1313static inline int iwl_legacy_is_any_associated(struct iwl_priv *priv)
1314{
1315 return iwl_legacy_is_associated(priv, IWL_RXON_CTX_BSS);
1316}
1317
1318static inline int iwl_legacy_is_associated_ctx(struct iwl_rxon_context *ctx)
1319{
1320 return (ctx->active.filter_flags & RXON_FILTER_ASSOC_MSK) ? 1 : 0;
1321}
1322
1323static inline int iwl_legacy_is_channel_valid(const struct iwl_channel_info *ch_info)
1324{
1325 if (ch_info == NULL)
1326 return 0;
1327 return (ch_info->flags & EEPROM_CHANNEL_VALID) ? 1 : 0;
1328}
1329
1330static inline int iwl_legacy_is_channel_radar(const struct iwl_channel_info *ch_info)
1331{
1332 return (ch_info->flags & EEPROM_CHANNEL_RADAR) ? 1 : 0;
1333}
1334
1335static inline u8 iwl_legacy_is_channel_a_band(const struct iwl_channel_info *ch_info)
1336{
1337 return ch_info->band == IEEE80211_BAND_5GHZ;
1338}
1339
1340static inline int
1341iwl_legacy_is_channel_passive(const struct iwl_channel_info *ch)
1342{
1343 return (!(ch->flags & EEPROM_CHANNEL_ACTIVE)) ? 1 : 0;
1344}
1345
1346static inline int
1347iwl_legacy_is_channel_ibss(const struct iwl_channel_info *ch)
1348{
1349 return (ch->flags & EEPROM_CHANNEL_IBSS) ? 1 : 0;
1350}
1351
1352static inline void
1353__iwl_legacy_free_pages(struct iwl_priv *priv, struct page *page)
1354{
1355 __free_pages(page, priv->hw_params.rx_page_order);
1356 priv->alloc_rxb_page--;
1357}
1358
1359static inline void iwl_legacy_free_pages(struct iwl_priv *priv, unsigned long page)
1360{
1361 free_pages(page, priv->hw_params.rx_page_order);
1362 priv->alloc_rxb_page--;
1363}
1364#endif /* __iwl_legacy_dev_h__ */
diff --git a/drivers/net/wireless/iwlegacy/iwl-devtrace.c b/drivers/net/wireless/iwlegacy/iwl-devtrace.c
new file mode 100644
index 00000000000..acec99197ce
--- /dev/null
+++ b/drivers/net/wireless/iwlegacy/iwl-devtrace.c
@@ -0,0 +1,42 @@
1/******************************************************************************
2 *
3 * Copyright(c) 2009 - 2011 Intel Corporation. All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of version 2 of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
17 *
18 * The full GNU General Public License is included in this distribution in the
19 * file called LICENSE.
20 *
21 * Contact Information:
22 * Intel Linux Wireless <ilw@linux.intel.com>
23 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
24 *
25 *****************************************************************************/
26
27#include <linux/module.h>
28
29/* sparse doesn't like tracepoint macros */
30#ifndef __CHECKER__
31#include "iwl-dev.h"
32
33#define CREATE_TRACE_POINTS
34#include "iwl-devtrace.h"
35
36EXPORT_TRACEPOINT_SYMBOL(iwlwifi_legacy_dev_iowrite8);
37EXPORT_TRACEPOINT_SYMBOL(iwlwifi_legacy_dev_ioread32);
38EXPORT_TRACEPOINT_SYMBOL(iwlwifi_legacy_dev_iowrite32);
39EXPORT_TRACEPOINT_SYMBOL(iwlwifi_legacy_dev_rx);
40EXPORT_TRACEPOINT_SYMBOL(iwlwifi_legacy_dev_tx);
41EXPORT_TRACEPOINT_SYMBOL(iwlwifi_legacy_dev_ucode_error);
42#endif
diff --git a/drivers/net/wireless/iwlegacy/iwl-devtrace.h b/drivers/net/wireless/iwlegacy/iwl-devtrace.h
new file mode 100644
index 00000000000..a443725ba6b
--- /dev/null
+++ b/drivers/net/wireless/iwlegacy/iwl-devtrace.h
@@ -0,0 +1,210 @@
1/******************************************************************************
2 *
3 * Copyright(c) 2009 - 2011 Intel Corporation. All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of version 2 of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
17 *
18 * The full GNU General Public License is included in this distribution in the
19 * file called LICENSE.
20 *
21 * Contact Information:
22 * Intel Linux Wireless <ilw@linux.intel.com>
23 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
24 *
25 *****************************************************************************/
26
27#if !defined(__IWLWIFI_LEGACY_DEVICE_TRACE) || defined(TRACE_HEADER_MULTI_READ)
28#define __IWLWIFI_LEGACY_DEVICE_TRACE
29
30#include <linux/tracepoint.h>
31
32#if !defined(CONFIG_IWLWIFI_LEGACY_DEVICE_TRACING) || defined(__CHECKER__)
33#undef TRACE_EVENT
34#define TRACE_EVENT(name, proto, ...) \
35static inline void trace_ ## name(proto) {}
36#endif
37
38
39#define PRIV_ENTRY __field(struct iwl_priv *, priv)
40#define PRIV_ASSIGN (__entry->priv = priv)
41
42#undef TRACE_SYSTEM
43#define TRACE_SYSTEM iwlwifi_legacy_io
44
45TRACE_EVENT(iwlwifi_legacy_dev_ioread32,
46 TP_PROTO(struct iwl_priv *priv, u32 offs, u32 val),
47 TP_ARGS(priv, offs, val),
48 TP_STRUCT__entry(
49 PRIV_ENTRY
50 __field(u32, offs)
51 __field(u32, val)
52 ),
53 TP_fast_assign(
54 PRIV_ASSIGN;
55 __entry->offs = offs;
56 __entry->val = val;
57 ),
58 TP_printk("[%p] read io[%#x] = %#x", __entry->priv,
59 __entry->offs, __entry->val)
60);
61
62TRACE_EVENT(iwlwifi_legacy_dev_iowrite8,
63 TP_PROTO(struct iwl_priv *priv, u32 offs, u8 val),
64 TP_ARGS(priv, offs, val),
65 TP_STRUCT__entry(
66 PRIV_ENTRY
67 __field(u32, offs)
68 __field(u8, val)
69 ),
70 TP_fast_assign(
71 PRIV_ASSIGN;
72 __entry->offs = offs;
73 __entry->val = val;
74 ),
75 TP_printk("[%p] write io[%#x] = %#x)", __entry->priv,
76 __entry->offs, __entry->val)
77);
78
79TRACE_EVENT(iwlwifi_legacy_dev_iowrite32,
80 TP_PROTO(struct iwl_priv *priv, u32 offs, u32 val),
81 TP_ARGS(priv, offs, val),
82 TP_STRUCT__entry(
83 PRIV_ENTRY
84 __field(u32, offs)
85 __field(u32, val)
86 ),
87 TP_fast_assign(
88 PRIV_ASSIGN;
89 __entry->offs = offs;
90 __entry->val = val;
91 ),
92 TP_printk("[%p] write io[%#x] = %#x)", __entry->priv,
93 __entry->offs, __entry->val)
94);
95
96#undef TRACE_SYSTEM
97#define TRACE_SYSTEM iwlwifi_legacy_ucode
98
99#undef TRACE_SYSTEM
100#define TRACE_SYSTEM iwlwifi
101
102TRACE_EVENT(iwlwifi_legacy_dev_hcmd,
103 TP_PROTO(struct iwl_priv *priv, void *hcmd, size_t len, u32 flags),
104 TP_ARGS(priv, hcmd, len, flags),
105 TP_STRUCT__entry(
106 PRIV_ENTRY
107 __dynamic_array(u8, hcmd, len)
108 __field(u32, flags)
109 ),
110 TP_fast_assign(
111 PRIV_ASSIGN;
112 memcpy(__get_dynamic_array(hcmd), hcmd, len);
113 __entry->flags = flags;
114 ),
115 TP_printk("[%p] hcmd %#.2x (%ssync)",
116 __entry->priv, ((u8 *)__get_dynamic_array(hcmd))[0],
117 __entry->flags & CMD_ASYNC ? "a" : "")
118);
119
120TRACE_EVENT(iwlwifi_legacy_dev_rx,
121 TP_PROTO(struct iwl_priv *priv, void *rxbuf, size_t len),
122 TP_ARGS(priv, rxbuf, len),
123 TP_STRUCT__entry(
124 PRIV_ENTRY
125 __dynamic_array(u8, rxbuf, len)
126 ),
127 TP_fast_assign(
128 PRIV_ASSIGN;
129 memcpy(__get_dynamic_array(rxbuf), rxbuf, len);
130 ),
131 TP_printk("[%p] RX cmd %#.2x",
132 __entry->priv, ((u8 *)__get_dynamic_array(rxbuf))[4])
133);
134
135TRACE_EVENT(iwlwifi_legacy_dev_tx,
136 TP_PROTO(struct iwl_priv *priv, void *tfd, size_t tfdlen,
137 void *buf0, size_t buf0_len,
138 void *buf1, size_t buf1_len),
139 TP_ARGS(priv, tfd, tfdlen, buf0, buf0_len, buf1, buf1_len),
140 TP_STRUCT__entry(
141 PRIV_ENTRY
142
143 __field(size_t, framelen)
144 __dynamic_array(u8, tfd, tfdlen)
145
146 /*
147 * Do not insert between or below these items,
148 * we want to keep the frame together (except
149 * for the possible padding).
150 */
151 __dynamic_array(u8, buf0, buf0_len)
152 __dynamic_array(u8, buf1, buf1_len)
153 ),
154 TP_fast_assign(
155 PRIV_ASSIGN;
156 __entry->framelen = buf0_len + buf1_len;
157 memcpy(__get_dynamic_array(tfd), tfd, tfdlen);
158 memcpy(__get_dynamic_array(buf0), buf0, buf0_len);
159 memcpy(__get_dynamic_array(buf1), buf1, buf1_len);
160 ),
161 TP_printk("[%p] TX %.2x (%zu bytes)",
162 __entry->priv,
163 ((u8 *)__get_dynamic_array(buf0))[0],
164 __entry->framelen)
165);
166
167TRACE_EVENT(iwlwifi_legacy_dev_ucode_error,
168 TP_PROTO(struct iwl_priv *priv, u32 desc, u32 time,
169 u32 data1, u32 data2, u32 line, u32 blink1,
170 u32 blink2, u32 ilink1, u32 ilink2),
171 TP_ARGS(priv, desc, time, data1, data2, line,
172 blink1, blink2, ilink1, ilink2),
173 TP_STRUCT__entry(
174 PRIV_ENTRY
175 __field(u32, desc)
176 __field(u32, time)
177 __field(u32, data1)
178 __field(u32, data2)
179 __field(u32, line)
180 __field(u32, blink1)
181 __field(u32, blink2)
182 __field(u32, ilink1)
183 __field(u32, ilink2)
184 ),
185 TP_fast_assign(
186 PRIV_ASSIGN;
187 __entry->desc = desc;
188 __entry->time = time;
189 __entry->data1 = data1;
190 __entry->data2 = data2;
191 __entry->line = line;
192 __entry->blink1 = blink1;
193 __entry->blink2 = blink2;
194 __entry->ilink1 = ilink1;
195 __entry->ilink2 = ilink2;
196 ),
197 TP_printk("[%p] #%02d %010u data 0x%08X 0x%08X line %u, "
198 "blink 0x%05X 0x%05X ilink 0x%05X 0x%05X",
199 __entry->priv, __entry->desc, __entry->time, __entry->data1,
200 __entry->data2, __entry->line, __entry->blink1,
201 __entry->blink2, __entry->ilink1, __entry->ilink2)
202);
203
204#endif /* __IWLWIFI_DEVICE_TRACE */
205
206#undef TRACE_INCLUDE_PATH
207#define TRACE_INCLUDE_PATH .
208#undef TRACE_INCLUDE_FILE
209#define TRACE_INCLUDE_FILE iwl-devtrace
210#include <trace/define_trace.h>
diff --git a/drivers/net/wireless/iwlegacy/iwl-eeprom.c b/drivers/net/wireless/iwlegacy/iwl-eeprom.c
new file mode 100644
index 00000000000..5bf3f49b74a
--- /dev/null
+++ b/drivers/net/wireless/iwlegacy/iwl-eeprom.c
@@ -0,0 +1,553 @@
1/******************************************************************************
2 *
3 * This file is provided under a dual BSD/GPLv2 license. When using or
4 * redistributing this file, you may do so under either license.
5 *
6 * GPL LICENSE SUMMARY
7 *
8 * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of version 2 of the GNU General Public License as
12 * published by the Free Software Foundation.
13 *
14 * This program is distributed in the hope that it will be useful, but
15 * WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
22 * USA
23 *
24 * The full GNU General Public License is included in this distribution
25 * in the file called LICENSE.GPL.
26 *
27 * Contact Information:
28 * Intel Linux Wireless <ilw@linux.intel.com>
29 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
30 *
31 * BSD LICENSE
32 *
33 * Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved.
34 * All rights reserved.
35 *
36 * Redistribution and use in source and binary forms, with or without
37 * modification, are permitted provided that the following conditions
38 * are met:
39 *
40 * * Redistributions of source code must retain the above copyright
41 * notice, this list of conditions and the following disclaimer.
42 * * Redistributions in binary form must reproduce the above copyright
43 * notice, this list of conditions and the following disclaimer in
44 * the documentation and/or other materials provided with the
45 * distribution.
46 * * Neither the name Intel Corporation nor the names of its
47 * contributors may be used to endorse or promote products derived
48 * from this software without specific prior written permission.
49 *
50 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
51 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
52 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
53 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
54 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
55 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
56 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
57 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
58 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
59 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
60 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
61 *****************************************************************************/
62
63
64#include <linux/kernel.h>
65#include <linux/module.h>
66#include <linux/slab.h>
67#include <linux/init.h>
68
69#include <net/mac80211.h>
70
71#include "iwl-commands.h"
72#include "iwl-dev.h"
73#include "iwl-core.h"
74#include "iwl-debug.h"
75#include "iwl-eeprom.h"
76#include "iwl-io.h"
77
78/************************** EEPROM BANDS ****************************
79 *
80 * The iwlegacy_eeprom_band definitions below provide the mapping from the
81 * EEPROM contents to the specific channel number supported for each
82 * band.
83 *
84 * For example, iwl_priv->eeprom.band_3_channels[4] from the band_3
85 * definition below maps to physical channel 42 in the 5.2GHz spectrum.
86 * The specific geography and calibration information for that channel
87 * is contained in the eeprom map itself.
88 *
89 * During init, we copy the eeprom information and channel map
90 * information into priv->channel_info_24/52 and priv->channel_map_24/52
91 *
92 * channel_map_24/52 provides the index in the channel_info array for a
93 * given channel. We have to have two separate maps as there is channel
94 * overlap with the 2.4GHz and 5.2GHz spectrum as seen in band_1 and
95 * band_2
96 *
97 * A value of 0xff stored in the channel_map indicates that the channel
98 * is not supported by the hardware at all.
99 *
100 * A value of 0xfe in the channel_map indicates that the channel is not
101 * valid for Tx with the current hardware. This means that
102 * while the system can tune and receive on a given channel, it may not
103 * be able to associate or transmit any frames on that
104 * channel. There is no corresponding channel information for that
105 * entry.
106 *
107 *********************************************************************/
108
109/* 2.4 GHz */
110const u8 iwlegacy_eeprom_band_1[14] = {
111 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14
112};
113
114/* 5.2 GHz bands */
115static const u8 iwlegacy_eeprom_band_2[] = { /* 4915-5080MHz */
116 183, 184, 185, 187, 188, 189, 192, 196, 7, 8, 11, 12, 16
117};
118
119static const u8 iwlegacy_eeprom_band_3[] = { /* 5170-5320MHz */
120 34, 36, 38, 40, 42, 44, 46, 48, 52, 56, 60, 64
121};
122
123static const u8 iwlegacy_eeprom_band_4[] = { /* 5500-5700MHz */
124 100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140
125};
126
127static const u8 iwlegacy_eeprom_band_5[] = { /* 5725-5825MHz */
128 145, 149, 153, 157, 161, 165
129};
130
131static const u8 iwlegacy_eeprom_band_6[] = { /* 2.4 ht40 channel */
132 1, 2, 3, 4, 5, 6, 7
133};
134
135static const u8 iwlegacy_eeprom_band_7[] = { /* 5.2 ht40 channel */
136 36, 44, 52, 60, 100, 108, 116, 124, 132, 149, 157
137};
138
139/******************************************************************************
140 *
141 * EEPROM related functions
142 *
143******************************************************************************/
144
145static int iwl_legacy_eeprom_verify_signature(struct iwl_priv *priv)
146{
147 u32 gp = iwl_read32(priv, CSR_EEPROM_GP) & CSR_EEPROM_GP_VALID_MSK;
148 int ret = 0;
149
150 IWL_DEBUG_EEPROM(priv, "EEPROM signature=0x%08x\n", gp);
151 switch (gp) {
152 case CSR_EEPROM_GP_GOOD_SIG_EEP_LESS_THAN_4K:
153 case CSR_EEPROM_GP_GOOD_SIG_EEP_MORE_THAN_4K:
154 break;
155 default:
156 IWL_ERR(priv, "bad EEPROM signature,"
157 "EEPROM_GP=0x%08x\n", gp);
158 ret = -ENOENT;
159 break;
160 }
161 return ret;
162}
163
164const u8
165*iwl_legacy_eeprom_query_addr(const struct iwl_priv *priv, size_t offset)
166{
167 BUG_ON(offset >= priv->cfg->base_params->eeprom_size);
168 return &priv->eeprom[offset];
169}
170EXPORT_SYMBOL(iwl_legacy_eeprom_query_addr);
171
172u16 iwl_legacy_eeprom_query16(const struct iwl_priv *priv, size_t offset)
173{
174 if (!priv->eeprom)
175 return 0;
176 return (u16)priv->eeprom[offset] | ((u16)priv->eeprom[offset + 1] << 8);
177}
178EXPORT_SYMBOL(iwl_legacy_eeprom_query16);
179
180/**
181 * iwl_legacy_eeprom_init - read EEPROM contents
182 *
183 * Load the EEPROM contents from adapter into priv->eeprom
184 *
185 * NOTE: This routine uses the non-debug IO access functions.
186 */
187int iwl_legacy_eeprom_init(struct iwl_priv *priv)
188{
189 __le16 *e;
190 u32 gp = iwl_read32(priv, CSR_EEPROM_GP);
191 int sz;
192 int ret;
193 u16 addr;
194
195 /* allocate eeprom */
196 sz = priv->cfg->base_params->eeprom_size;
197 IWL_DEBUG_EEPROM(priv, "NVM size = %d\n", sz);
198 priv->eeprom = kzalloc(sz, GFP_KERNEL);
199 if (!priv->eeprom) {
200 ret = -ENOMEM;
201 goto alloc_err;
202 }
203 e = (__le16 *)priv->eeprom;
204
205 priv->cfg->ops->lib->apm_ops.init(priv);
206
207 ret = iwl_legacy_eeprom_verify_signature(priv);
208 if (ret < 0) {
209 IWL_ERR(priv, "EEPROM not found, EEPROM_GP=0x%08x\n", gp);
210 ret = -ENOENT;
211 goto err;
212 }
213
214 /* Make sure driver (instead of uCode) is allowed to read EEPROM */
215 ret = priv->cfg->ops->lib->eeprom_ops.acquire_semaphore(priv);
216 if (ret < 0) {
217 IWL_ERR(priv, "Failed to acquire EEPROM semaphore.\n");
218 ret = -ENOENT;
219 goto err;
220 }
221
222 /* eeprom is an array of 16bit values */
223 for (addr = 0; addr < sz; addr += sizeof(u16)) {
224 u32 r;
225
226 _iwl_legacy_write32(priv, CSR_EEPROM_REG,
227 CSR_EEPROM_REG_MSK_ADDR & (addr << 1));
228
229 ret = iwl_poll_bit(priv, CSR_EEPROM_REG,
230 CSR_EEPROM_REG_READ_VALID_MSK,
231 CSR_EEPROM_REG_READ_VALID_MSK,
232 IWL_EEPROM_ACCESS_TIMEOUT);
233 if (ret < 0) {
234 IWL_ERR(priv, "Time out reading EEPROM[%d]\n",
235 addr);
236 goto done;
237 }
238 r = _iwl_legacy_read_direct32(priv, CSR_EEPROM_REG);
239 e[addr / 2] = cpu_to_le16(r >> 16);
240 }
241
242 IWL_DEBUG_EEPROM(priv, "NVM Type: %s, version: 0x%x\n",
243 "EEPROM",
244 iwl_legacy_eeprom_query16(priv, EEPROM_VERSION));
245
246 ret = 0;
247done:
248 priv->cfg->ops->lib->eeprom_ops.release_semaphore(priv);
249
250err:
251 if (ret)
252 iwl_legacy_eeprom_free(priv);
253 /* Reset chip to save power until we load uCode during "up". */
254 iwl_legacy_apm_stop(priv);
255alloc_err:
256 return ret;
257}
258EXPORT_SYMBOL(iwl_legacy_eeprom_init);
259
260void iwl_legacy_eeprom_free(struct iwl_priv *priv)
261{
262 kfree(priv->eeprom);
263 priv->eeprom = NULL;
264}
265EXPORT_SYMBOL(iwl_legacy_eeprom_free);
266
267static void iwl_legacy_init_band_reference(const struct iwl_priv *priv,
268 int eep_band, int *eeprom_ch_count,
269 const struct iwl_eeprom_channel **eeprom_ch_info,
270 const u8 **eeprom_ch_index)
271{
272 u32 offset = priv->cfg->ops->lib->
273 eeprom_ops.regulatory_bands[eep_band - 1];
274 switch (eep_band) {
275 case 1: /* 2.4GHz band */
276 *eeprom_ch_count = ARRAY_SIZE(iwlegacy_eeprom_band_1);
277 *eeprom_ch_info = (struct iwl_eeprom_channel *)
278 iwl_legacy_eeprom_query_addr(priv, offset);
279 *eeprom_ch_index = iwlegacy_eeprom_band_1;
280 break;
281 case 2: /* 4.9GHz band */
282 *eeprom_ch_count = ARRAY_SIZE(iwlegacy_eeprom_band_2);
283 *eeprom_ch_info = (struct iwl_eeprom_channel *)
284 iwl_legacy_eeprom_query_addr(priv, offset);
285 *eeprom_ch_index = iwlegacy_eeprom_band_2;
286 break;
287 case 3: /* 5.2GHz band */
288 *eeprom_ch_count = ARRAY_SIZE(iwlegacy_eeprom_band_3);
289 *eeprom_ch_info = (struct iwl_eeprom_channel *)
290 iwl_legacy_eeprom_query_addr(priv, offset);
291 *eeprom_ch_index = iwlegacy_eeprom_band_3;
292 break;
293 case 4: /* 5.5GHz band */
294 *eeprom_ch_count = ARRAY_SIZE(iwlegacy_eeprom_band_4);
295 *eeprom_ch_info = (struct iwl_eeprom_channel *)
296 iwl_legacy_eeprom_query_addr(priv, offset);
297 *eeprom_ch_index = iwlegacy_eeprom_band_4;
298 break;
299 case 5: /* 5.7GHz band */
300 *eeprom_ch_count = ARRAY_SIZE(iwlegacy_eeprom_band_5);
301 *eeprom_ch_info = (struct iwl_eeprom_channel *)
302 iwl_legacy_eeprom_query_addr(priv, offset);
303 *eeprom_ch_index = iwlegacy_eeprom_band_5;
304 break;
305 case 6: /* 2.4GHz ht40 channels */
306 *eeprom_ch_count = ARRAY_SIZE(iwlegacy_eeprom_band_6);
307 *eeprom_ch_info = (struct iwl_eeprom_channel *)
308 iwl_legacy_eeprom_query_addr(priv, offset);
309 *eeprom_ch_index = iwlegacy_eeprom_band_6;
310 break;
311 case 7: /* 5 GHz ht40 channels */
312 *eeprom_ch_count = ARRAY_SIZE(iwlegacy_eeprom_band_7);
313 *eeprom_ch_info = (struct iwl_eeprom_channel *)
314 iwl_legacy_eeprom_query_addr(priv, offset);
315 *eeprom_ch_index = iwlegacy_eeprom_band_7;
316 break;
317 default:
318 BUG();
319 }
320}
321
322#define CHECK_AND_PRINT(x) ((eeprom_ch->flags & EEPROM_CHANNEL_##x) \
323 ? # x " " : "")
324/**
325 * iwl_legacy_mod_ht40_chan_info - Copy ht40 channel info into driver's priv.
326 *
327 * Does not set up a command, or touch hardware.
328 */
329static int iwl_legacy_mod_ht40_chan_info(struct iwl_priv *priv,
330 enum ieee80211_band band, u16 channel,
331 const struct iwl_eeprom_channel *eeprom_ch,
332 u8 clear_ht40_extension_channel)
333{
334 struct iwl_channel_info *ch_info;
335
336 ch_info = (struct iwl_channel_info *)
337 iwl_legacy_get_channel_info(priv, band, channel);
338
339 if (!iwl_legacy_is_channel_valid(ch_info))
340 return -1;
341
342 IWL_DEBUG_EEPROM(priv, "HT40 Ch. %d [%sGHz] %s%s%s%s%s(0x%02x %ddBm):"
343 " Ad-Hoc %ssupported\n",
344 ch_info->channel,
345 iwl_legacy_is_channel_a_band(ch_info) ?
346 "5.2" : "2.4",
347 CHECK_AND_PRINT(IBSS),
348 CHECK_AND_PRINT(ACTIVE),
349 CHECK_AND_PRINT(RADAR),
350 CHECK_AND_PRINT(WIDE),
351 CHECK_AND_PRINT(DFS),
352 eeprom_ch->flags,
353 eeprom_ch->max_power_avg,
354 ((eeprom_ch->flags & EEPROM_CHANNEL_IBSS)
355 && !(eeprom_ch->flags & EEPROM_CHANNEL_RADAR)) ?
356 "" : "not ");
357
358 ch_info->ht40_eeprom = *eeprom_ch;
359 ch_info->ht40_max_power_avg = eeprom_ch->max_power_avg;
360 ch_info->ht40_flags = eeprom_ch->flags;
361 if (eeprom_ch->flags & EEPROM_CHANNEL_VALID)
362 ch_info->ht40_extension_channel &=
363 ~clear_ht40_extension_channel;
364
365 return 0;
366}
367
368#define CHECK_AND_PRINT_I(x) ((eeprom_ch_info[ch].flags & EEPROM_CHANNEL_##x) \
369 ? # x " " : "")
370
371/**
372 * iwl_legacy_init_channel_map - Set up driver's info for all possible channels
373 */
374int iwl_legacy_init_channel_map(struct iwl_priv *priv)
375{
376 int eeprom_ch_count = 0;
377 const u8 *eeprom_ch_index = NULL;
378 const struct iwl_eeprom_channel *eeprom_ch_info = NULL;
379 int band, ch;
380 struct iwl_channel_info *ch_info;
381
382 if (priv->channel_count) {
383 IWL_DEBUG_EEPROM(priv, "Channel map already initialized.\n");
384 return 0;
385 }
386
387 IWL_DEBUG_EEPROM(priv, "Initializing regulatory info from EEPROM\n");
388
389 priv->channel_count =
390 ARRAY_SIZE(iwlegacy_eeprom_band_1) +
391 ARRAY_SIZE(iwlegacy_eeprom_band_2) +
392 ARRAY_SIZE(iwlegacy_eeprom_band_3) +
393 ARRAY_SIZE(iwlegacy_eeprom_band_4) +
394 ARRAY_SIZE(iwlegacy_eeprom_band_5);
395
396 IWL_DEBUG_EEPROM(priv, "Parsing data for %d channels.\n",
397 priv->channel_count);
398
399 priv->channel_info = kzalloc(sizeof(struct iwl_channel_info) *
400 priv->channel_count, GFP_KERNEL);
401 if (!priv->channel_info) {
402 IWL_ERR(priv, "Could not allocate channel_info\n");
403 priv->channel_count = 0;
404 return -ENOMEM;
405 }
406
407 ch_info = priv->channel_info;
408
409 /* Loop through the 5 EEPROM bands adding them in order to the
410 * channel map we maintain (that contains additional information than
411 * what just in the EEPROM) */
412 for (band = 1; band <= 5; band++) {
413
414 iwl_legacy_init_band_reference(priv, band, &eeprom_ch_count,
415 &eeprom_ch_info, &eeprom_ch_index);
416
417 /* Loop through each band adding each of the channels */
418 for (ch = 0; ch < eeprom_ch_count; ch++) {
419 ch_info->channel = eeprom_ch_index[ch];
420 ch_info->band = (band == 1) ? IEEE80211_BAND_2GHZ :
421 IEEE80211_BAND_5GHZ;
422
423 /* permanently store EEPROM's channel regulatory flags
424 * and max power in channel info database. */
425 ch_info->eeprom = eeprom_ch_info[ch];
426
427 /* Copy the run-time flags so they are there even on
428 * invalid channels */
429 ch_info->flags = eeprom_ch_info[ch].flags;
430 /* First write that ht40 is not enabled, and then enable
431 * one by one */
432 ch_info->ht40_extension_channel =
433 IEEE80211_CHAN_NO_HT40;
434
435 if (!(iwl_legacy_is_channel_valid(ch_info))) {
436 IWL_DEBUG_EEPROM(priv,
437 "Ch. %d Flags %x [%sGHz] - "
438 "No traffic\n",
439 ch_info->channel,
440 ch_info->flags,
441 iwl_legacy_is_channel_a_band(ch_info) ?
442 "5.2" : "2.4");
443 ch_info++;
444 continue;
445 }
446
447 /* Initialize regulatory-based run-time data */
448 ch_info->max_power_avg = ch_info->curr_txpow =
449 eeprom_ch_info[ch].max_power_avg;
450 ch_info->scan_power = eeprom_ch_info[ch].max_power_avg;
451 ch_info->min_power = 0;
452
453 IWL_DEBUG_EEPROM(priv, "Ch. %d [%sGHz] "
454 "%s%s%s%s%s%s(0x%02x %ddBm):"
455 " Ad-Hoc %ssupported\n",
456 ch_info->channel,
457 iwl_legacy_is_channel_a_band(ch_info) ?
458 "5.2" : "2.4",
459 CHECK_AND_PRINT_I(VALID),
460 CHECK_AND_PRINT_I(IBSS),
461 CHECK_AND_PRINT_I(ACTIVE),
462 CHECK_AND_PRINT_I(RADAR),
463 CHECK_AND_PRINT_I(WIDE),
464 CHECK_AND_PRINT_I(DFS),
465 eeprom_ch_info[ch].flags,
466 eeprom_ch_info[ch].max_power_avg,
467 ((eeprom_ch_info[ch].
468 flags & EEPROM_CHANNEL_IBSS)
469 && !(eeprom_ch_info[ch].
470 flags & EEPROM_CHANNEL_RADAR))
471 ? "" : "not ");
472
473 ch_info++;
474 }
475 }
476
477 /* Check if we do have HT40 channels */
478 if (priv->cfg->ops->lib->eeprom_ops.regulatory_bands[5] ==
479 EEPROM_REGULATORY_BAND_NO_HT40 &&
480 priv->cfg->ops->lib->eeprom_ops.regulatory_bands[6] ==
481 EEPROM_REGULATORY_BAND_NO_HT40)
482 return 0;
483
484 /* Two additional EEPROM bands for 2.4 and 5 GHz HT40 channels */
485 for (band = 6; band <= 7; band++) {
486 enum ieee80211_band ieeeband;
487
488 iwl_legacy_init_band_reference(priv, band, &eeprom_ch_count,
489 &eeprom_ch_info, &eeprom_ch_index);
490
491 /* EEPROM band 6 is 2.4, band 7 is 5 GHz */
492 ieeeband =
493 (band == 6) ? IEEE80211_BAND_2GHZ : IEEE80211_BAND_5GHZ;
494
495 /* Loop through each band adding each of the channels */
496 for (ch = 0; ch < eeprom_ch_count; ch++) {
497 /* Set up driver's info for lower half */
498 iwl_legacy_mod_ht40_chan_info(priv, ieeeband,
499 eeprom_ch_index[ch],
500 &eeprom_ch_info[ch],
501 IEEE80211_CHAN_NO_HT40PLUS);
502
503 /* Set up driver's info for upper half */
504 iwl_legacy_mod_ht40_chan_info(priv, ieeeband,
505 eeprom_ch_index[ch] + 4,
506 &eeprom_ch_info[ch],
507 IEEE80211_CHAN_NO_HT40MINUS);
508 }
509 }
510
511 return 0;
512}
513EXPORT_SYMBOL(iwl_legacy_init_channel_map);
514
515/*
516 * iwl_legacy_free_channel_map - undo allocations in iwl_legacy_init_channel_map
517 */
518void iwl_legacy_free_channel_map(struct iwl_priv *priv)
519{
520 kfree(priv->channel_info);
521 priv->channel_count = 0;
522}
523EXPORT_SYMBOL(iwl_legacy_free_channel_map);
524
525/**
526 * iwl_legacy_get_channel_info - Find driver's private channel info
527 *
528 * Based on band and channel number.
529 */
530const struct
531iwl_channel_info *iwl_legacy_get_channel_info(const struct iwl_priv *priv,
532 enum ieee80211_band band, u16 channel)
533{
534 int i;
535
536 switch (band) {
537 case IEEE80211_BAND_5GHZ:
538 for (i = 14; i < priv->channel_count; i++) {
539 if (priv->channel_info[i].channel == channel)
540 return &priv->channel_info[i];
541 }
542 break;
543 case IEEE80211_BAND_2GHZ:
544 if (channel >= 1 && channel <= 14)
545 return &priv->channel_info[channel - 1];
546 break;
547 default:
548 BUG();
549 }
550
551 return NULL;
552}
553EXPORT_SYMBOL(iwl_legacy_get_channel_info);
diff --git a/drivers/net/wireless/iwlegacy/iwl-eeprom.h b/drivers/net/wireless/iwlegacy/iwl-eeprom.h
new file mode 100644
index 00000000000..c59c8100202
--- /dev/null
+++ b/drivers/net/wireless/iwlegacy/iwl-eeprom.h
@@ -0,0 +1,344 @@
1/******************************************************************************
2 *
3 * This file is provided under a dual BSD/GPLv2 license. When using or
4 * redistributing this file, you may do so under either license.
5 *
6 * GPL LICENSE SUMMARY
7 *
8 * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of version 2 of the GNU General Public License as
12 * published by the Free Software Foundation.
13 *
14 * This program is distributed in the hope that it will be useful, but
15 * WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
22 * USA
23 *
24 * The full GNU General Public License is included in this distribution
25 * in the file called LICENSE.GPL.
26 *
27 * Contact Information:
28 * Intel Linux Wireless <ilw@linux.intel.com>
29 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
30 *
31 * BSD LICENSE
32 *
33 * Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved.
34 * All rights reserved.
35 *
36 * Redistribution and use in source and binary forms, with or without
37 * modification, are permitted provided that the following conditions
38 * are met:
39 *
40 * * Redistributions of source code must retain the above copyright
41 * notice, this list of conditions and the following disclaimer.
42 * * Redistributions in binary form must reproduce the above copyright
43 * notice, this list of conditions and the following disclaimer in
44 * the documentation and/or other materials provided with the
45 * distribution.
46 * * Neither the name Intel Corporation nor the names of its
47 * contributors may be used to endorse or promote products derived
48 * from this software without specific prior written permission.
49 *
50 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
51 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
52 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
53 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
54 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
55 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
56 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
57 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
58 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
59 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
60 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
61 *****************************************************************************/
62
63#ifndef __iwl_legacy_eeprom_h__
64#define __iwl_legacy_eeprom_h__
65
66#include <net/mac80211.h>
67
68struct iwl_priv;
69
70/*
71 * EEPROM access time values:
72 *
73 * Driver initiates EEPROM read by writing byte address << 1 to CSR_EEPROM_REG.
74 * Driver then polls CSR_EEPROM_REG for CSR_EEPROM_REG_READ_VALID_MSK (0x1).
75 * When polling, wait 10 uSec between polling loops, up to a maximum 5000 uSec.
76 * Driver reads 16-bit value from bits 31-16 of CSR_EEPROM_REG.
77 */
78#define IWL_EEPROM_ACCESS_TIMEOUT 5000 /* uSec */
79
80#define IWL_EEPROM_SEM_TIMEOUT 10 /* microseconds */
81#define IWL_EEPROM_SEM_RETRY_LIMIT 1000 /* number of attempts (not time) */
82
83
84/*
85 * Regulatory channel usage flags in EEPROM struct iwl4965_eeprom_channel.flags.
86 *
87 * IBSS and/or AP operation is allowed *only* on those channels with
88 * (VALID && IBSS && ACTIVE && !RADAR). This restriction is in place because
89 * RADAR detection is not supported by the 4965 driver, but is a
90 * requirement for establishing a new network for legal operation on channels
91 * requiring RADAR detection or restricting ACTIVE scanning.
92 *
93 * NOTE: "WIDE" flag does not indicate anything about "HT40" 40 MHz channels.
94 * It only indicates that 20 MHz channel use is supported; HT40 channel
95 * usage is indicated by a separate set of regulatory flags for each
96 * HT40 channel pair.
97 *
98 * NOTE: Using a channel inappropriately will result in a uCode error!
99 */
100#define IWL_NUM_TX_CALIB_GROUPS 5
101enum {
102 EEPROM_CHANNEL_VALID = (1 << 0), /* usable for this SKU/geo */
103 EEPROM_CHANNEL_IBSS = (1 << 1), /* usable as an IBSS channel */
104 /* Bit 2 Reserved */
105 EEPROM_CHANNEL_ACTIVE = (1 << 3), /* active scanning allowed */
106 EEPROM_CHANNEL_RADAR = (1 << 4), /* radar detection required */
107 EEPROM_CHANNEL_WIDE = (1 << 5), /* 20 MHz channel okay */
108 /* Bit 6 Reserved (was Narrow Channel) */
109 EEPROM_CHANNEL_DFS = (1 << 7), /* dynamic freq selection candidate */
110};
111
112/* SKU Capabilities */
113/* 3945 only */
114#define EEPROM_SKU_CAP_SW_RF_KILL_ENABLE (1 << 0)
115#define EEPROM_SKU_CAP_HW_RF_KILL_ENABLE (1 << 1)
116
117/* *regulatory* channel data format in eeprom, one for each channel.
118 * There are separate entries for HT40 (40 MHz) vs. normal (20 MHz) channels. */
119struct iwl_eeprom_channel {
120 u8 flags; /* EEPROM_CHANNEL_* flags copied from EEPROM */
121 s8 max_power_avg; /* max power (dBm) on this chnl, limit 31 */
122} __packed;
123
124/* 3945 Specific */
125#define EEPROM_3945_EEPROM_VERSION (0x2f)
126
127/* 4965 has two radio transmitters (and 3 radio receivers) */
128#define EEPROM_TX_POWER_TX_CHAINS (2)
129
130/* 4965 has room for up to 8 sets of txpower calibration data */
131#define EEPROM_TX_POWER_BANDS (8)
132
133/* 4965 factory calibration measures txpower gain settings for
134 * each of 3 target output levels */
135#define EEPROM_TX_POWER_MEASUREMENTS (3)
136
137/* 4965 Specific */
138/* 4965 driver does not work with txpower calibration version < 5 */
139#define EEPROM_4965_TX_POWER_VERSION (5)
140#define EEPROM_4965_EEPROM_VERSION (0x2f)
141#define EEPROM_4965_CALIB_VERSION_OFFSET (2*0xB6) /* 2 bytes */
142#define EEPROM_4965_CALIB_TXPOWER_OFFSET (2*0xE8) /* 48 bytes */
143#define EEPROM_4965_BOARD_REVISION (2*0x4F) /* 2 bytes */
144#define EEPROM_4965_BOARD_PBA (2*0x56+1) /* 9 bytes */
145
146/* 2.4 GHz */
147extern const u8 iwlegacy_eeprom_band_1[14];
148
149/*
150 * factory calibration data for one txpower level, on one channel,
151 * measured on one of the 2 tx chains (radio transmitter and associated
152 * antenna). EEPROM contains:
153 *
154 * 1) Temperature (degrees Celsius) of device when measurement was made.
155 *
156 * 2) Gain table index used to achieve the target measurement power.
157 * This refers to the "well-known" gain tables (see iwl-4965-hw.h).
158 *
159 * 3) Actual measured output power, in half-dBm ("34" = 17 dBm).
160 *
161 * 4) RF power amplifier detector level measurement (not used).
162 */
163struct iwl_eeprom_calib_measure {
164 u8 temperature; /* Device temperature (Celsius) */
165 u8 gain_idx; /* Index into gain table */
166 u8 actual_pow; /* Measured RF output power, half-dBm */
167 s8 pa_det; /* Power amp detector level (not used) */
168} __packed;
169
170
171/*
172 * measurement set for one channel. EEPROM contains:
173 *
174 * 1) Channel number measured
175 *
176 * 2) Measurements for each of 3 power levels for each of 2 radio transmitters
177 * (a.k.a. "tx chains") (6 measurements altogether)
178 */
179struct iwl_eeprom_calib_ch_info {
180 u8 ch_num;
181 struct iwl_eeprom_calib_measure
182 measurements[EEPROM_TX_POWER_TX_CHAINS]
183 [EEPROM_TX_POWER_MEASUREMENTS];
184} __packed;
185
186/*
187 * txpower subband info.
188 *
189 * For each frequency subband, EEPROM contains the following:
190 *
191 * 1) First and last channels within range of the subband. "0" values
192 * indicate that this sample set is not being used.
193 *
194 * 2) Sample measurement sets for 2 channels close to the range endpoints.
195 */
196struct iwl_eeprom_calib_subband_info {
197 u8 ch_from; /* channel number of lowest channel in subband */
198 u8 ch_to; /* channel number of highest channel in subband */
199 struct iwl_eeprom_calib_ch_info ch1;
200 struct iwl_eeprom_calib_ch_info ch2;
201} __packed;
202
203
204/*
205 * txpower calibration info. EEPROM contains:
206 *
207 * 1) Factory-measured saturation power levels (maximum levels at which
208 * tx power amplifier can output a signal without too much distortion).
209 * There is one level for 2.4 GHz band and one for 5 GHz band. These
210 * values apply to all channels within each of the bands.
211 *
212 * 2) Factory-measured power supply voltage level. This is assumed to be
213 * constant (i.e. same value applies to all channels/bands) while the
214 * factory measurements are being made.
215 *
216 * 3) Up to 8 sets of factory-measured txpower calibration values.
217 * These are for different frequency ranges, since txpower gain
218 * characteristics of the analog radio circuitry vary with frequency.
219 *
220 * Not all sets need to be filled with data;
221 * struct iwl_eeprom_calib_subband_info contains range of channels
222 * (0 if unused) for each set of data.
223 */
224struct iwl_eeprom_calib_info {
225 u8 saturation_power24; /* half-dBm (e.g. "34" = 17 dBm) */
226 u8 saturation_power52; /* half-dBm */
227 __le16 voltage; /* signed */
228 struct iwl_eeprom_calib_subband_info
229 band_info[EEPROM_TX_POWER_BANDS];
230} __packed;
231
232
233/* General */
234#define EEPROM_DEVICE_ID (2*0x08) /* 2 bytes */
235#define EEPROM_MAC_ADDRESS (2*0x15) /* 6 bytes */
236#define EEPROM_BOARD_REVISION (2*0x35) /* 2 bytes */
237#define EEPROM_BOARD_PBA_NUMBER (2*0x3B+1) /* 9 bytes */
238#define EEPROM_VERSION (2*0x44) /* 2 bytes */
239#define EEPROM_SKU_CAP (2*0x45) /* 2 bytes */
240#define EEPROM_OEM_MODE (2*0x46) /* 2 bytes */
241#define EEPROM_WOWLAN_MODE (2*0x47) /* 2 bytes */
242#define EEPROM_RADIO_CONFIG (2*0x48) /* 2 bytes */
243#define EEPROM_NUM_MAC_ADDRESS (2*0x4C) /* 2 bytes */
244
245/* The following masks are to be applied on EEPROM_RADIO_CONFIG */
246#define EEPROM_RF_CFG_TYPE_MSK(x) (x & 0x3) /* bits 0-1 */
247#define EEPROM_RF_CFG_STEP_MSK(x) ((x >> 2) & 0x3) /* bits 2-3 */
248#define EEPROM_RF_CFG_DASH_MSK(x) ((x >> 4) & 0x3) /* bits 4-5 */
249#define EEPROM_RF_CFG_PNUM_MSK(x) ((x >> 6) & 0x3) /* bits 6-7 */
250#define EEPROM_RF_CFG_TX_ANT_MSK(x) ((x >> 8) & 0xF) /* bits 8-11 */
251#define EEPROM_RF_CFG_RX_ANT_MSK(x) ((x >> 12) & 0xF) /* bits 12-15 */
252
253#define EEPROM_3945_RF_CFG_TYPE_MAX 0x0
254#define EEPROM_4965_RF_CFG_TYPE_MAX 0x1
255
256/*
257 * Per-channel regulatory data.
258 *
259 * Each channel that *might* be supported by iwl has a fixed location
260 * in EEPROM containing EEPROM_CHANNEL_* usage flags (LSB) and max regulatory
261 * txpower (MSB).
262 *
263 * Entries immediately below are for 20 MHz channel width. HT40 (40 MHz)
264 * channels (only for 4965, not supported by 3945) appear later in the EEPROM.
265 *
266 * 2.4 GHz channels 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14
267 */
268#define EEPROM_REGULATORY_SKU_ID (2*0x60) /* 4 bytes */
269#define EEPROM_REGULATORY_BAND_1 (2*0x62) /* 2 bytes */
270#define EEPROM_REGULATORY_BAND_1_CHANNELS (2*0x63) /* 28 bytes */
271
272/*
273 * 4.9 GHz channels 183, 184, 185, 187, 188, 189, 192, 196,
274 * 5.0 GHz channels 7, 8, 11, 12, 16
275 * (4915-5080MHz) (none of these is ever supported)
276 */
277#define EEPROM_REGULATORY_BAND_2 (2*0x71) /* 2 bytes */
278#define EEPROM_REGULATORY_BAND_2_CHANNELS (2*0x72) /* 26 bytes */
279
280/*
281 * 5.2 GHz channels 34, 36, 38, 40, 42, 44, 46, 48, 52, 56, 60, 64
282 * (5170-5320MHz)
283 */
284#define EEPROM_REGULATORY_BAND_3 (2*0x7F) /* 2 bytes */
285#define EEPROM_REGULATORY_BAND_3_CHANNELS (2*0x80) /* 24 bytes */
286
287/*
288 * 5.5 GHz channels 100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140
289 * (5500-5700MHz)
290 */
291#define EEPROM_REGULATORY_BAND_4 (2*0x8C) /* 2 bytes */
292#define EEPROM_REGULATORY_BAND_4_CHANNELS (2*0x8D) /* 22 bytes */
293
294/*
295 * 5.7 GHz channels 145, 149, 153, 157, 161, 165
296 * (5725-5825MHz)
297 */
298#define EEPROM_REGULATORY_BAND_5 (2*0x98) /* 2 bytes */
299#define EEPROM_REGULATORY_BAND_5_CHANNELS (2*0x99) /* 12 bytes */
300
301/*
302 * 2.4 GHz HT40 channels 1 (5), 2 (6), 3 (7), 4 (8), 5 (9), 6 (10), 7 (11)
303 *
304 * The channel listed is the center of the lower 20 MHz half of the channel.
305 * The overall center frequency is actually 2 channels (10 MHz) above that,
306 * and the upper half of each HT40 channel is centered 4 channels (20 MHz) away
307 * from the lower half; e.g. the upper half of HT40 channel 1 is channel 5,
308 * and the overall HT40 channel width centers on channel 3.
309 *
310 * NOTE: The RXON command uses 20 MHz channel numbers to specify the
311 * control channel to which to tune. RXON also specifies whether the
312 * control channel is the upper or lower half of a HT40 channel.
313 *
314 * NOTE: 4965 does not support HT40 channels on 2.4 GHz.
315 */
316#define EEPROM_4965_REGULATORY_BAND_24_HT40_CHANNELS (2*0xA0) /* 14 bytes */
317
318/*
319 * 5.2 GHz HT40 channels 36 (40), 44 (48), 52 (56), 60 (64),
320 * 100 (104), 108 (112), 116 (120), 124 (128), 132 (136), 149 (153), 157 (161)
321 */
322#define EEPROM_4965_REGULATORY_BAND_52_HT40_CHANNELS (2*0xA8) /* 22 bytes */
323
324#define EEPROM_REGULATORY_BAND_NO_HT40 (0)
325
326struct iwl_eeprom_ops {
327 const u32 regulatory_bands[7];
328 int (*acquire_semaphore) (struct iwl_priv *priv);
329 void (*release_semaphore) (struct iwl_priv *priv);
330};
331
332
333int iwl_legacy_eeprom_init(struct iwl_priv *priv);
334void iwl_legacy_eeprom_free(struct iwl_priv *priv);
335const u8 *iwl_legacy_eeprom_query_addr(const struct iwl_priv *priv,
336 size_t offset);
337u16 iwl_legacy_eeprom_query16(const struct iwl_priv *priv, size_t offset);
338int iwl_legacy_init_channel_map(struct iwl_priv *priv);
339void iwl_legacy_free_channel_map(struct iwl_priv *priv);
340const struct iwl_channel_info *iwl_legacy_get_channel_info(
341 const struct iwl_priv *priv,
342 enum ieee80211_band band, u16 channel);
343
344#endif /* __iwl_legacy_eeprom_h__ */
diff --git a/drivers/net/wireless/iwlegacy/iwl-fh.h b/drivers/net/wireless/iwlegacy/iwl-fh.h
new file mode 100644
index 00000000000..6e6091816e3
--- /dev/null
+++ b/drivers/net/wireless/iwlegacy/iwl-fh.h
@@ -0,0 +1,513 @@
1/******************************************************************************
2 *
3 * This file is provided under a dual BSD/GPLv2 license. When using or
4 * redistributing this file, you may do so under either license.
5 *
6 * GPL LICENSE SUMMARY
7 *
8 * Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved.
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of version 2 of the GNU General Public License as
12 * published by the Free Software Foundation.
13 *
14 * This program is distributed in the hope that it will be useful, but
15 * WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
22 * USA
23 *
24 * The full GNU General Public License is included in this distribution
25 * in the file called LICENSE.GPL.
26 *
27 * Contact Information:
28 * Intel Linux Wireless <ilw@linux.intel.com>
29 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
30 *
31 * BSD LICENSE
32 *
33 * Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved.
34 * All rights reserved.
35 *
36 * Redistribution and use in source and binary forms, with or without
37 * modification, are permitted provided that the following conditions
38 * are met:
39 *
40 * * Redistributions of source code must retain the above copyright
41 * notice, this list of conditions and the following disclaimer.
42 * * Redistributions in binary form must reproduce the above copyright
43 * notice, this list of conditions and the following disclaimer in
44 * the documentation and/or other materials provided with the
45 * distribution.
46 * * Neither the name Intel Corporation nor the names of its
47 * contributors may be used to endorse or promote products derived
48 * from this software without specific prior written permission.
49 *
50 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
51 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
52 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
53 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
54 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
55 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
56 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
57 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
58 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
59 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
60 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
61 *
62 *****************************************************************************/
63#ifndef __iwl_legacy_fh_h__
64#define __iwl_legacy_fh_h__
65
66/****************************/
67/* Flow Handler Definitions */
68/****************************/
69
70/**
71 * This I/O area is directly read/writable by driver (e.g. Linux uses writel())
72 * Addresses are offsets from device's PCI hardware base address.
73 */
74#define FH_MEM_LOWER_BOUND (0x1000)
75#define FH_MEM_UPPER_BOUND (0x2000)
76
77/**
78 * Keep-Warm (KW) buffer base address.
79 *
80 * Driver must allocate a 4KByte buffer that is used by 4965 for keeping the
81 * host DRAM powered on (via dummy accesses to DRAM) to maintain low-latency
82 * DRAM access when 4965 is Txing or Rxing. The dummy accesses prevent host
83 * from going into a power-savings mode that would cause higher DRAM latency,
84 * and possible data over/under-runs, before all Tx/Rx is complete.
85 *
86 * Driver loads FH_KW_MEM_ADDR_REG with the physical address (bits 35:4)
87 * of the buffer, which must be 4K aligned. Once this is set up, the 4965
88 * automatically invokes keep-warm accesses when normal accesses might not
89 * be sufficient to maintain fast DRAM response.
90 *
91 * Bit fields:
92 * 31-0: Keep-warm buffer physical base address [35:4], must be 4K aligned
93 */
94#define FH_KW_MEM_ADDR_REG (FH_MEM_LOWER_BOUND + 0x97C)
95
96
97/**
98 * TFD Circular Buffers Base (CBBC) addresses
99 *
100 * 4965 has 16 base pointer registers, one for each of 16 host-DRAM-resident
101 * circular buffers (CBs/queues) containing Transmit Frame Descriptors (TFDs)
102 * (see struct iwl_tfd_frame). These 16 pointer registers are offset by 0x04
103 * bytes from one another. Each TFD circular buffer in DRAM must be 256-byte
104 * aligned (address bits 0-7 must be 0).
105 *
106 * Bit fields in each pointer register:
107 * 27-0: TFD CB physical base address [35:8], must be 256-byte aligned
108 */
109#define FH_MEM_CBBC_LOWER_BOUND (FH_MEM_LOWER_BOUND + 0x9D0)
110#define FH_MEM_CBBC_UPPER_BOUND (FH_MEM_LOWER_BOUND + 0xA10)
111
112/* Find TFD CB base pointer for given queue (range 0-15). */
113#define FH_MEM_CBBC_QUEUE(x) (FH_MEM_CBBC_LOWER_BOUND + (x) * 0x4)
114
115
116/**
117 * Rx SRAM Control and Status Registers (RSCSR)
118 *
119 * These registers provide handshake between driver and 4965 for the Rx queue
120 * (this queue handles *all* command responses, notifications, Rx data, etc.
121 * sent from 4965 uCode to host driver). Unlike Tx, there is only one Rx
122 * queue, and only one Rx DMA/FIFO channel. Also unlike Tx, which can
123 * concatenate up to 20 DRAM buffers to form a Tx frame, each Receive Buffer
124 * Descriptor (RBD) points to only one Rx Buffer (RB); there is a 1:1
125 * mapping between RBDs and RBs.
126 *
127 * Driver must allocate host DRAM memory for the following, and set the
128 * physical address of each into 4965 registers:
129 *
130 * 1) Receive Buffer Descriptor (RBD) circular buffer (CB), typically with 256
131 * entries (although any power of 2, up to 4096, is selectable by driver).
132 * Each entry (1 dword) points to a receive buffer (RB) of consistent size
133 * (typically 4K, although 8K or 16K are also selectable by driver).
134 * Driver sets up RB size and number of RBDs in the CB via Rx config
135 * register FH_MEM_RCSR_CHNL0_CONFIG_REG.
136 *
137 * Bit fields within one RBD:
138 * 27-0: Receive Buffer physical address bits [35:8], 256-byte aligned
139 *
140 * Driver sets physical address [35:8] of base of RBD circular buffer
141 * into FH_RSCSR_CHNL0_RBDCB_BASE_REG [27:0].
142 *
143 * 2) Rx status buffer, 8 bytes, in which 4965 indicates which Rx Buffers
144 * (RBs) have been filled, via a "write pointer", actually the index of
145 * the RB's corresponding RBD within the circular buffer. Driver sets
146 * physical address [35:4] into FH_RSCSR_CHNL0_STTS_WPTR_REG [31:0].
147 *
148 * Bit fields in lower dword of Rx status buffer (upper dword not used
149 * by driver; see struct iwl4965_shared, val0):
150 * 31-12: Not used by driver
151 * 11- 0: Index of last filled Rx buffer descriptor
152 * (4965 writes, driver reads this value)
153 *
154 * As the driver prepares Receive Buffers (RBs) for 4965 to fill, driver must
155 * enter pointers to these RBs into contiguous RBD circular buffer entries,
156 * and update the 4965's "write" index register,
157 * FH_RSCSR_CHNL0_RBDCB_WPTR_REG.
158 *
159 * This "write" index corresponds to the *next* RBD that the driver will make
160 * available, i.e. one RBD past the tail of the ready-to-fill RBDs within
161 * the circular buffer. This value should initially be 0 (before preparing any
162 * RBs), should be 8 after preparing the first 8 RBs (for example), and must
163 * wrap back to 0 at the end of the circular buffer (but don't wrap before
164 * "read" index has advanced past 1! See below).
165 * NOTE: 4965 EXPECTS THE WRITE INDEX TO BE INCREMENTED IN MULTIPLES OF 8.
166 *
167 * As the 4965 fills RBs (referenced from contiguous RBDs within the circular
168 * buffer), it updates the Rx status buffer in host DRAM, 2) described above,
169 * to tell the driver the index of the latest filled RBD. The driver must
170 * read this "read" index from DRAM after receiving an Rx interrupt from 4965.
171 *
172 * The driver must also internally keep track of a third index, which is the
173 * next RBD to process. When receiving an Rx interrupt, driver should process
174 * all filled but unprocessed RBs up to, but not including, the RB
175 * corresponding to the "read" index. For example, if "read" index becomes "1",
176 * driver may process the RB pointed to by RBD 0. Depending on volume of
177 * traffic, there may be many RBs to process.
178 *
179 * If read index == write index, 4965 thinks there is no room to put new data.
180 * Due to this, the maximum number of filled RBs is 255, instead of 256. To
181 * be safe, make sure that there is a gap of at least 2 RBDs between "write"
182 * and "read" indexes; that is, make sure that there are no more than 254
183 * buffers waiting to be filled.
184 */
185#define FH_MEM_RSCSR_LOWER_BOUND (FH_MEM_LOWER_BOUND + 0xBC0)
186#define FH_MEM_RSCSR_UPPER_BOUND (FH_MEM_LOWER_BOUND + 0xC00)
187#define FH_MEM_RSCSR_CHNL0 (FH_MEM_RSCSR_LOWER_BOUND)
188
189/**
190 * Physical base address of 8-byte Rx Status buffer.
191 * Bit fields:
192 * 31-0: Rx status buffer physical base address [35:4], must 16-byte aligned.
193 */
194#define FH_RSCSR_CHNL0_STTS_WPTR_REG (FH_MEM_RSCSR_CHNL0)
195
196/**
197 * Physical base address of Rx Buffer Descriptor Circular Buffer.
198 * Bit fields:
199 * 27-0: RBD CD physical base address [35:8], must be 256-byte aligned.
200 */
201#define FH_RSCSR_CHNL0_RBDCB_BASE_REG (FH_MEM_RSCSR_CHNL0 + 0x004)
202
203/**
204 * Rx write pointer (index, really!).
205 * Bit fields:
206 * 11-0: Index of driver's most recent prepared-to-be-filled RBD, + 1.
207 * NOTE: For 256-entry circular buffer, use only bits [7:0].
208 */
209#define FH_RSCSR_CHNL0_RBDCB_WPTR_REG (FH_MEM_RSCSR_CHNL0 + 0x008)
210#define FH_RSCSR_CHNL0_WPTR (FH_RSCSR_CHNL0_RBDCB_WPTR_REG)
211
212
213/**
214 * Rx Config/Status Registers (RCSR)
215 * Rx Config Reg for channel 0 (only channel used)
216 *
217 * Driver must initialize FH_MEM_RCSR_CHNL0_CONFIG_REG as follows for
218 * normal operation (see bit fields).
219 *
220 * Clearing FH_MEM_RCSR_CHNL0_CONFIG_REG to 0 turns off Rx DMA.
221 * Driver should poll FH_MEM_RSSR_RX_STATUS_REG for
222 * FH_RSSR_CHNL0_RX_STATUS_CHNL_IDLE (bit 24) before continuing.
223 *
224 * Bit fields:
225 * 31-30: Rx DMA channel enable: '00' off/pause, '01' pause at end of frame,
226 * '10' operate normally
227 * 29-24: reserved
228 * 23-20: # RBDs in circular buffer = 2^value; use "8" for 256 RBDs (normal),
229 * min "5" for 32 RBDs, max "12" for 4096 RBDs.
230 * 19-18: reserved
231 * 17-16: size of each receive buffer; '00' 4K (normal), '01' 8K,
232 * '10' 12K, '11' 16K.
233 * 15-14: reserved
234 * 13-12: IRQ destination; '00' none, '01' host driver (normal operation)
235 * 11- 4: timeout for closing Rx buffer and interrupting host (units 32 usec)
236 * typical value 0x10 (about 1/2 msec)
237 * 3- 0: reserved
238 */
239#define FH_MEM_RCSR_LOWER_BOUND (FH_MEM_LOWER_BOUND + 0xC00)
240#define FH_MEM_RCSR_UPPER_BOUND (FH_MEM_LOWER_BOUND + 0xCC0)
241#define FH_MEM_RCSR_CHNL0 (FH_MEM_RCSR_LOWER_BOUND)
242
243#define FH_MEM_RCSR_CHNL0_CONFIG_REG (FH_MEM_RCSR_CHNL0)
244
245#define FH_RCSR_CHNL0_RX_CONFIG_RB_TIMEOUT_MSK (0x00000FF0) /* bits 4-11 */
246#define FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_MSK (0x00001000) /* bits 12 */
247#define FH_RCSR_CHNL0_RX_CONFIG_SINGLE_FRAME_MSK (0x00008000) /* bit 15 */
248#define FH_RCSR_CHNL0_RX_CONFIG_RB_SIZE_MSK (0x00030000) /* bits 16-17 */
249#define FH_RCSR_CHNL0_RX_CONFIG_RBDBC_SIZE_MSK (0x00F00000) /* bits 20-23 */
250#define FH_RCSR_CHNL0_RX_CONFIG_DMA_CHNL_EN_MSK (0xC0000000) /* bits 30-31*/
251
252#define FH_RCSR_RX_CONFIG_RBDCB_SIZE_POS (20)
253#define FH_RCSR_RX_CONFIG_REG_IRQ_RBTH_POS (4)
254#define RX_RB_TIMEOUT (0x10)
255
256#define FH_RCSR_RX_CONFIG_CHNL_EN_PAUSE_VAL (0x00000000)
257#define FH_RCSR_RX_CONFIG_CHNL_EN_PAUSE_EOF_VAL (0x40000000)
258#define FH_RCSR_RX_CONFIG_CHNL_EN_ENABLE_VAL (0x80000000)
259
260#define FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_4K (0x00000000)
261#define FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_8K (0x00010000)
262#define FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_12K (0x00020000)
263#define FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_16K (0x00030000)
264
265#define FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY (0x00000004)
266#define FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_NO_INT_VAL (0x00000000)
267#define FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_INT_HOST_VAL (0x00001000)
268
269#define FH_RSCSR_FRAME_SIZE_MSK (0x00003FFF) /* bits 0-13 */
270
271/**
272 * Rx Shared Status Registers (RSSR)
273 *
274 * After stopping Rx DMA channel (writing 0 to
275 * FH_MEM_RCSR_CHNL0_CONFIG_REG), driver must poll
276 * FH_MEM_RSSR_RX_STATUS_REG until Rx channel is idle.
277 *
278 * Bit fields:
279 * 24: 1 = Channel 0 is idle
280 *
281 * FH_MEM_RSSR_SHARED_CTRL_REG and FH_MEM_RSSR_RX_ENABLE_ERR_IRQ2DRV
282 * contain default values that should not be altered by the driver.
283 */
284#define FH_MEM_RSSR_LOWER_BOUND (FH_MEM_LOWER_BOUND + 0xC40)
285#define FH_MEM_RSSR_UPPER_BOUND (FH_MEM_LOWER_BOUND + 0xD00)
286
287#define FH_MEM_RSSR_SHARED_CTRL_REG (FH_MEM_RSSR_LOWER_BOUND)
288#define FH_MEM_RSSR_RX_STATUS_REG (FH_MEM_RSSR_LOWER_BOUND + 0x004)
289#define FH_MEM_RSSR_RX_ENABLE_ERR_IRQ2DRV\
290 (FH_MEM_RSSR_LOWER_BOUND + 0x008)
291
292#define FH_RSSR_CHNL0_RX_STATUS_CHNL_IDLE (0x01000000)
293
294#define FH_MEM_TFDIB_REG1_ADDR_BITSHIFT 28
295
296/* TFDB Area - TFDs buffer table */
297#define FH_MEM_TFDIB_DRAM_ADDR_LSB_MSK (0xFFFFFFFF)
298#define FH_TFDIB_LOWER_BOUND (FH_MEM_LOWER_BOUND + 0x900)
299#define FH_TFDIB_UPPER_BOUND (FH_MEM_LOWER_BOUND + 0x958)
300#define FH_TFDIB_CTRL0_REG(_chnl) (FH_TFDIB_LOWER_BOUND + 0x8 * (_chnl))
301#define FH_TFDIB_CTRL1_REG(_chnl) (FH_TFDIB_LOWER_BOUND + 0x8 * (_chnl) + 0x4)
302
303/**
304 * Transmit DMA Channel Control/Status Registers (TCSR)
305 *
306 * 4965 has one configuration register for each of 8 Tx DMA/FIFO channels
307 * supported in hardware (don't confuse these with the 16 Tx queues in DRAM,
308 * which feed the DMA/FIFO channels); config regs are separated by 0x20 bytes.
309 *
310 * To use a Tx DMA channel, driver must initialize its
311 * FH_TCSR_CHNL_TX_CONFIG_REG(chnl) with:
312 *
313 * FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE |
314 * FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE_VAL
315 *
316 * All other bits should be 0.
317 *
318 * Bit fields:
319 * 31-30: Tx DMA channel enable: '00' off/pause, '01' pause at end of frame,
320 * '10' operate normally
321 * 29- 4: Reserved, set to "0"
322 * 3: Enable internal DMA requests (1, normal operation), disable (0)
323 * 2- 0: Reserved, set to "0"
324 */
325#define FH_TCSR_LOWER_BOUND (FH_MEM_LOWER_BOUND + 0xD00)
326#define FH_TCSR_UPPER_BOUND (FH_MEM_LOWER_BOUND + 0xE60)
327
328/* Find Control/Status reg for given Tx DMA/FIFO channel */
329#define FH49_TCSR_CHNL_NUM (7)
330#define FH50_TCSR_CHNL_NUM (8)
331
332/* TCSR: tx_config register values */
333#define FH_TCSR_CHNL_TX_CONFIG_REG(_chnl) \
334 (FH_TCSR_LOWER_BOUND + 0x20 * (_chnl))
335#define FH_TCSR_CHNL_TX_CREDIT_REG(_chnl) \
336 (FH_TCSR_LOWER_BOUND + 0x20 * (_chnl) + 0x4)
337#define FH_TCSR_CHNL_TX_BUF_STS_REG(_chnl) \
338 (FH_TCSR_LOWER_BOUND + 0x20 * (_chnl) + 0x8)
339
340#define FH_TCSR_TX_CONFIG_REG_VAL_MSG_MODE_TXF (0x00000000)
341#define FH_TCSR_TX_CONFIG_REG_VAL_MSG_MODE_DRV (0x00000001)
342
343#define FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_DISABLE (0x00000000)
344#define FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE (0x00000008)
345
346#define FH_TCSR_TX_CONFIG_REG_VAL_CIRQ_HOST_NOINT (0x00000000)
347#define FH_TCSR_TX_CONFIG_REG_VAL_CIRQ_HOST_ENDTFD (0x00100000)
348#define FH_TCSR_TX_CONFIG_REG_VAL_CIRQ_HOST_IFTFD (0x00200000)
349
350#define FH_TCSR_TX_CONFIG_REG_VAL_CIRQ_RTC_NOINT (0x00000000)
351#define FH_TCSR_TX_CONFIG_REG_VAL_CIRQ_RTC_ENDTFD (0x00400000)
352#define FH_TCSR_TX_CONFIG_REG_VAL_CIRQ_RTC_IFTFD (0x00800000)
353
354#define FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_PAUSE (0x00000000)
355#define FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_PAUSE_EOF (0x40000000)
356#define FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE (0x80000000)
357
358#define FH_TCSR_CHNL_TX_BUF_STS_REG_VAL_TFDB_EMPTY (0x00000000)
359#define FH_TCSR_CHNL_TX_BUF_STS_REG_VAL_TFDB_WAIT (0x00002000)
360#define FH_TCSR_CHNL_TX_BUF_STS_REG_VAL_TFDB_VALID (0x00000003)
361
362#define FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_NUM (20)
363#define FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_IDX (12)
364
365/**
366 * Tx Shared Status Registers (TSSR)
367 *
368 * After stopping Tx DMA channel (writing 0 to
369 * FH_TCSR_CHNL_TX_CONFIG_REG(chnl)), driver must poll
370 * FH_TSSR_TX_STATUS_REG until selected Tx channel is idle
371 * (channel's buffers empty | no pending requests).
372 *
373 * Bit fields:
374 * 31-24: 1 = Channel buffers empty (channel 7:0)
375 * 23-16: 1 = No pending requests (channel 7:0)
376 */
377#define FH_TSSR_LOWER_BOUND (FH_MEM_LOWER_BOUND + 0xEA0)
378#define FH_TSSR_UPPER_BOUND (FH_MEM_LOWER_BOUND + 0xEC0)
379
380#define FH_TSSR_TX_STATUS_REG (FH_TSSR_LOWER_BOUND + 0x010)
381
382/**
383 * Bit fields for TSSR(Tx Shared Status & Control) error status register:
384 * 31: Indicates an address error when accessed to internal memory
385 * uCode/driver must write "1" in order to clear this flag
386 * 30: Indicates that Host did not send the expected number of dwords to FH
387 * uCode/driver must write "1" in order to clear this flag
388 * 16-9:Each status bit is for one channel. Indicates that an (Error) ActDMA
389 * command was received from the scheduler while the TRB was already full
390 * with previous command
391 * uCode/driver must write "1" in order to clear this flag
392 * 7-0: Each status bit indicates a channel's TxCredit error. When an error
393 * bit is set, it indicates that the FH has received a full indication
394 * from the RTC TxFIFO and the current value of the TxCredit counter was
395 * not equal to zero. This mean that the credit mechanism was not
396 * synchronized to the TxFIFO status
397 * uCode/driver must write "1" in order to clear this flag
398 */
399#define FH_TSSR_TX_ERROR_REG (FH_TSSR_LOWER_BOUND + 0x018)
400
401#define FH_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(_chnl) ((1 << (_chnl)) << 16)
402
403/* Tx service channels */
404#define FH_SRVC_CHNL (9)
405#define FH_SRVC_LOWER_BOUND (FH_MEM_LOWER_BOUND + 0x9C8)
406#define FH_SRVC_UPPER_BOUND (FH_MEM_LOWER_BOUND + 0x9D0)
407#define FH_SRVC_CHNL_SRAM_ADDR_REG(_chnl) \
408 (FH_SRVC_LOWER_BOUND + ((_chnl) - 9) * 0x4)
409
410#define FH_TX_CHICKEN_BITS_REG (FH_MEM_LOWER_BOUND + 0xE98)
411/* Instruct FH to increment the retry count of a packet when
412 * it is brought from the memory to TX-FIFO
413 */
414#define FH_TX_CHICKEN_BITS_SCD_AUTO_RETRY_EN (0x00000002)
415
416#define RX_QUEUE_SIZE 256
417#define RX_QUEUE_MASK 255
418#define RX_QUEUE_SIZE_LOG 8
419
420/*
421 * RX related structures and functions
422 */
423#define RX_FREE_BUFFERS 64
424#define RX_LOW_WATERMARK 8
425
426/* Size of one Rx buffer in host DRAM */
427#define IWL_RX_BUF_SIZE_3K (3 * 1000) /* 3945 only */
428#define IWL_RX_BUF_SIZE_4K (4 * 1024)
429#define IWL_RX_BUF_SIZE_8K (8 * 1024)
430
431/**
432 * struct iwl_rb_status - reseve buffer status
433 * host memory mapped FH registers
434 * @closed_rb_num [0:11] - Indicates the index of the RB which was closed
435 * @closed_fr_num [0:11] - Indicates the index of the RX Frame which was closed
436 * @finished_rb_num [0:11] - Indicates the index of the current RB
437 * in which the last frame was written to
438 * @finished_fr_num [0:11] - Indicates the index of the RX Frame
439 * which was transferred
440 */
441struct iwl_rb_status {
442 __le16 closed_rb_num;
443 __le16 closed_fr_num;
444 __le16 finished_rb_num;
445 __le16 finished_fr_nam;
446 __le32 __unused; /* 3945 only */
447} __packed;
448
449
450#define TFD_QUEUE_SIZE_MAX (256)
451#define TFD_QUEUE_SIZE_BC_DUP (64)
452#define TFD_QUEUE_BC_SIZE (TFD_QUEUE_SIZE_MAX + TFD_QUEUE_SIZE_BC_DUP)
453#define IWL_TX_DMA_MASK DMA_BIT_MASK(36)
454#define IWL_NUM_OF_TBS 20
455
456static inline u8 iwl_legacy_get_dma_hi_addr(dma_addr_t addr)
457{
458 return (sizeof(addr) > sizeof(u32) ? (addr >> 16) >> 16 : 0) & 0xF;
459}
460/**
461 * struct iwl_tfd_tb transmit buffer descriptor within transmit frame descriptor
462 *
463 * This structure contains dma address and length of transmission address
464 *
465 * @lo: low [31:0] portion of the dma address of TX buffer
466 * every even is unaligned on 16 bit boundary
467 * @hi_n_len 0-3 [35:32] portion of dma
468 * 4-15 length of the tx buffer
469 */
470struct iwl_tfd_tb {
471 __le32 lo;
472 __le16 hi_n_len;
473} __packed;
474
475/**
476 * struct iwl_tfd
477 *
478 * Transmit Frame Descriptor (TFD)
479 *
480 * @ __reserved1[3] reserved
481 * @ num_tbs 0-4 number of active tbs
482 * 5 reserved
483 * 6-7 padding (not used)
484 * @ tbs[20] transmit frame buffer descriptors
485 * @ __pad padding
486 *
487 * Each Tx queue uses a circular buffer of 256 TFDs stored in host DRAM.
488 * Both driver and device share these circular buffers, each of which must be
489 * contiguous 256 TFDs x 128 bytes-per-TFD = 32 KBytes
490 *
491 * Driver must indicate the physical address of the base of each
492 * circular buffer via the FH_MEM_CBBC_QUEUE registers.
493 *
494 * Each TFD contains pointer/size information for up to 20 data buffers
495 * in host DRAM. These buffers collectively contain the (one) frame described
496 * by the TFD. Each buffer must be a single contiguous block of memory within
497 * itself, but buffers may be scattered in host DRAM. Each buffer has max size
498 * of (4K - 4). The concatenates all of a TFD's buffers into a single
499 * Tx frame, up to 8 KBytes in size.
500 *
501 * A maximum of 255 (not 256!) TFDs may be on a queue waiting for Tx.
502 */
503struct iwl_tfd {
504 u8 __reserved1[3];
505 u8 num_tbs;
506 struct iwl_tfd_tb tbs[IWL_NUM_OF_TBS];
507 __le32 __pad;
508} __packed;
509
510/* Keep Warm Size */
511#define IWL_KW_SIZE 0x1000 /* 4k */
512
513#endif /* !__iwl_legacy_fh_h__ */
diff --git a/drivers/net/wireless/iwlegacy/iwl-hcmd.c b/drivers/net/wireless/iwlegacy/iwl-hcmd.c
new file mode 100644
index 00000000000..ce1fc9feb61
--- /dev/null
+++ b/drivers/net/wireless/iwlegacy/iwl-hcmd.c
@@ -0,0 +1,271 @@
1/******************************************************************************
2 *
3 * GPL LICENSE SUMMARY
4 *
5 * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of version 2 of the GNU General Public License as
9 * published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful, but
12 * WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
19 * USA
20 *
21 * The full GNU General Public License is included in this distribution
22 * in the file called LICENSE.GPL.
23 *
24 * Contact Information:
25 * Intel Linux Wireless <ilw@linux.intel.com>
26 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
27 *****************************************************************************/
28
29#include <linux/kernel.h>
30#include <linux/module.h>
31#include <linux/sched.h>
32#include <net/mac80211.h>
33
34#include "iwl-dev.h"
35#include "iwl-debug.h"
36#include "iwl-eeprom.h"
37#include "iwl-core.h"
38
39
40const char *iwl_legacy_get_cmd_string(u8 cmd)
41{
42 switch (cmd) {
43 IWL_CMD(REPLY_ALIVE);
44 IWL_CMD(REPLY_ERROR);
45 IWL_CMD(REPLY_RXON);
46 IWL_CMD(REPLY_RXON_ASSOC);
47 IWL_CMD(REPLY_QOS_PARAM);
48 IWL_CMD(REPLY_RXON_TIMING);
49 IWL_CMD(REPLY_ADD_STA);
50 IWL_CMD(REPLY_REMOVE_STA);
51 IWL_CMD(REPLY_WEPKEY);
52 IWL_CMD(REPLY_3945_RX);
53 IWL_CMD(REPLY_TX);
54 IWL_CMD(REPLY_RATE_SCALE);
55 IWL_CMD(REPLY_LEDS_CMD);
56 IWL_CMD(REPLY_TX_LINK_QUALITY_CMD);
57 IWL_CMD(REPLY_CHANNEL_SWITCH);
58 IWL_CMD(CHANNEL_SWITCH_NOTIFICATION);
59 IWL_CMD(REPLY_SPECTRUM_MEASUREMENT_CMD);
60 IWL_CMD(SPECTRUM_MEASURE_NOTIFICATION);
61 IWL_CMD(POWER_TABLE_CMD);
62 IWL_CMD(PM_SLEEP_NOTIFICATION);
63 IWL_CMD(PM_DEBUG_STATISTIC_NOTIFIC);
64 IWL_CMD(REPLY_SCAN_CMD);
65 IWL_CMD(REPLY_SCAN_ABORT_CMD);
66 IWL_CMD(SCAN_START_NOTIFICATION);
67 IWL_CMD(SCAN_RESULTS_NOTIFICATION);
68 IWL_CMD(SCAN_COMPLETE_NOTIFICATION);
69 IWL_CMD(BEACON_NOTIFICATION);
70 IWL_CMD(REPLY_TX_BEACON);
71 IWL_CMD(REPLY_TX_PWR_TABLE_CMD);
72 IWL_CMD(REPLY_BT_CONFIG);
73 IWL_CMD(REPLY_STATISTICS_CMD);
74 IWL_CMD(STATISTICS_NOTIFICATION);
75 IWL_CMD(CARD_STATE_NOTIFICATION);
76 IWL_CMD(MISSED_BEACONS_NOTIFICATION);
77 IWL_CMD(REPLY_CT_KILL_CONFIG_CMD);
78 IWL_CMD(SENSITIVITY_CMD);
79 IWL_CMD(REPLY_PHY_CALIBRATION_CMD);
80 IWL_CMD(REPLY_RX_PHY_CMD);
81 IWL_CMD(REPLY_RX_MPDU_CMD);
82 IWL_CMD(REPLY_RX);
83 IWL_CMD(REPLY_COMPRESSED_BA);
84 default:
85 return "UNKNOWN";
86
87 }
88}
89EXPORT_SYMBOL(iwl_legacy_get_cmd_string);
90
91#define HOST_COMPLETE_TIMEOUT (HZ / 2)
92
93static void iwl_legacy_generic_cmd_callback(struct iwl_priv *priv,
94 struct iwl_device_cmd *cmd,
95 struct iwl_rx_packet *pkt)
96{
97 if (pkt->hdr.flags & IWL_CMD_FAILED_MSK) {
98 IWL_ERR(priv, "Bad return from %s (0x%08X)\n",
99 iwl_legacy_get_cmd_string(cmd->hdr.cmd), pkt->hdr.flags);
100 return;
101 }
102
103#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
104 switch (cmd->hdr.cmd) {
105 case REPLY_TX_LINK_QUALITY_CMD:
106 case SENSITIVITY_CMD:
107 IWL_DEBUG_HC_DUMP(priv, "back from %s (0x%08X)\n",
108 iwl_legacy_get_cmd_string(cmd->hdr.cmd), pkt->hdr.flags);
109 break;
110 default:
111 IWL_DEBUG_HC(priv, "back from %s (0x%08X)\n",
112 iwl_legacy_get_cmd_string(cmd->hdr.cmd), pkt->hdr.flags);
113 }
114#endif
115}
116
117static int
118iwl_legacy_send_cmd_async(struct iwl_priv *priv, struct iwl_host_cmd *cmd)
119{
120 int ret;
121
122 BUG_ON(!(cmd->flags & CMD_ASYNC));
123
124 /* An asynchronous command can not expect an SKB to be set. */
125 BUG_ON(cmd->flags & CMD_WANT_SKB);
126
127 /* Assign a generic callback if one is not provided */
128 if (!cmd->callback)
129 cmd->callback = iwl_legacy_generic_cmd_callback;
130
131 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
132 return -EBUSY;
133
134 ret = iwl_legacy_enqueue_hcmd(priv, cmd);
135 if (ret < 0) {
136 IWL_ERR(priv, "Error sending %s: enqueue_hcmd failed: %d\n",
137 iwl_legacy_get_cmd_string(cmd->id), ret);
138 return ret;
139 }
140 return 0;
141}
142
143int iwl_legacy_send_cmd_sync(struct iwl_priv *priv, struct iwl_host_cmd *cmd)
144{
145 int cmd_idx;
146 int ret;
147
148 lockdep_assert_held(&priv->mutex);
149
150 BUG_ON(cmd->flags & CMD_ASYNC);
151
152 /* A synchronous command can not have a callback set. */
153 BUG_ON(cmd->callback);
154
155 IWL_DEBUG_INFO(priv, "Attempting to send sync command %s\n",
156 iwl_legacy_get_cmd_string(cmd->id));
157
158 set_bit(STATUS_HCMD_ACTIVE, &priv->status);
159 IWL_DEBUG_INFO(priv, "Setting HCMD_ACTIVE for command %s\n",
160 iwl_legacy_get_cmd_string(cmd->id));
161
162 cmd_idx = iwl_legacy_enqueue_hcmd(priv, cmd);
163 if (cmd_idx < 0) {
164 ret = cmd_idx;
165 IWL_ERR(priv, "Error sending %s: enqueue_hcmd failed: %d\n",
166 iwl_legacy_get_cmd_string(cmd->id), ret);
167 goto out;
168 }
169
170 ret = wait_event_timeout(priv->wait_command_queue,
171 !test_bit(STATUS_HCMD_ACTIVE, &priv->status),
172 HOST_COMPLETE_TIMEOUT);
173 if (!ret) {
174 if (test_bit(STATUS_HCMD_ACTIVE, &priv->status)) {
175 IWL_ERR(priv,
176 "Error sending %s: time out after %dms.\n",
177 iwl_legacy_get_cmd_string(cmd->id),
178 jiffies_to_msecs(HOST_COMPLETE_TIMEOUT));
179
180 clear_bit(STATUS_HCMD_ACTIVE, &priv->status);
181 IWL_DEBUG_INFO(priv,
182 "Clearing HCMD_ACTIVE for command %s\n",
183 iwl_legacy_get_cmd_string(cmd->id));
184 ret = -ETIMEDOUT;
185 goto cancel;
186 }
187 }
188
189 if (test_bit(STATUS_RF_KILL_HW, &priv->status)) {
190 IWL_ERR(priv, "Command %s aborted: RF KILL Switch\n",
191 iwl_legacy_get_cmd_string(cmd->id));
192 ret = -ECANCELED;
193 goto fail;
194 }
195 if (test_bit(STATUS_FW_ERROR, &priv->status)) {
196 IWL_ERR(priv, "Command %s failed: FW Error\n",
197 iwl_legacy_get_cmd_string(cmd->id));
198 ret = -EIO;
199 goto fail;
200 }
201 if ((cmd->flags & CMD_WANT_SKB) && !cmd->reply_page) {
202 IWL_ERR(priv, "Error: Response NULL in '%s'\n",
203 iwl_legacy_get_cmd_string(cmd->id));
204 ret = -EIO;
205 goto cancel;
206 }
207
208 ret = 0;
209 goto out;
210
211cancel:
212 if (cmd->flags & CMD_WANT_SKB) {
213 /*
214 * Cancel the CMD_WANT_SKB flag for the cmd in the
215 * TX cmd queue. Otherwise in case the cmd comes
216 * in later, it will possibly set an invalid
217 * address (cmd->meta.source).
218 */
219 priv->txq[priv->cmd_queue].meta[cmd_idx].flags &=
220 ~CMD_WANT_SKB;
221 }
222fail:
223 if (cmd->reply_page) {
224 iwl_legacy_free_pages(priv, cmd->reply_page);
225 cmd->reply_page = 0;
226 }
227out:
228 return ret;
229}
230EXPORT_SYMBOL(iwl_legacy_send_cmd_sync);
231
232int iwl_legacy_send_cmd(struct iwl_priv *priv, struct iwl_host_cmd *cmd)
233{
234 if (cmd->flags & CMD_ASYNC)
235 return iwl_legacy_send_cmd_async(priv, cmd);
236
237 return iwl_legacy_send_cmd_sync(priv, cmd);
238}
239EXPORT_SYMBOL(iwl_legacy_send_cmd);
240
241int
242iwl_legacy_send_cmd_pdu(struct iwl_priv *priv, u8 id, u16 len, const void *data)
243{
244 struct iwl_host_cmd cmd = {
245 .id = id,
246 .len = len,
247 .data = data,
248 };
249
250 return iwl_legacy_send_cmd_sync(priv, &cmd);
251}
252EXPORT_SYMBOL(iwl_legacy_send_cmd_pdu);
253
254int iwl_legacy_send_cmd_pdu_async(struct iwl_priv *priv,
255 u8 id, u16 len, const void *data,
256 void (*callback)(struct iwl_priv *priv,
257 struct iwl_device_cmd *cmd,
258 struct iwl_rx_packet *pkt))
259{
260 struct iwl_host_cmd cmd = {
261 .id = id,
262 .len = len,
263 .data = data,
264 };
265
266 cmd.flags |= CMD_ASYNC;
267 cmd.callback = callback;
268
269 return iwl_legacy_send_cmd_async(priv, &cmd);
270}
271EXPORT_SYMBOL(iwl_legacy_send_cmd_pdu_async);
diff --git a/drivers/net/wireless/iwlegacy/iwl-helpers.h b/drivers/net/wireless/iwlegacy/iwl-helpers.h
new file mode 100644
index 00000000000..5cf23eaecbb
--- /dev/null
+++ b/drivers/net/wireless/iwlegacy/iwl-helpers.h
@@ -0,0 +1,196 @@
1/******************************************************************************
2 *
3 * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved.
4 *
5 * Portions of this file are derived from the ipw3945 project, as well
6 * as portions of the ieee80211 subsystem header files.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of version 2 of the GNU General Public License as
10 * published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it will be useful, but WITHOUT
13 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 * more details.
16 *
17 * You should have received a copy of the GNU General Public License along with
18 * this program; if not, write to the Free Software Foundation, Inc.,
19 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
20 *
21 * The full GNU General Public License is included in this distribution in the
22 * file called LICENSE.
23 *
24 * Contact Information:
25 * Intel Linux Wireless <ilw@linux.intel.com>
26 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
27 *
28 *****************************************************************************/
29
30#ifndef __iwl_legacy_helpers_h__
31#define __iwl_legacy_helpers_h__
32
33#include <linux/ctype.h>
34#include <net/mac80211.h>
35
36#include "iwl-io.h"
37
38#define IWL_MASK(lo, hi) ((1 << (hi)) | ((1 << (hi)) - (1 << (lo))))
39
40
41static inline struct ieee80211_conf *iwl_legacy_ieee80211_get_hw_conf(
42 struct ieee80211_hw *hw)
43{
44 return &hw->conf;
45}
46
47/**
48 * iwl_legacy_queue_inc_wrap - increment queue index, wrap back to beginning
49 * @index -- current index
50 * @n_bd -- total number of entries in queue (must be power of 2)
51 */
52static inline int iwl_legacy_queue_inc_wrap(int index, int n_bd)
53{
54 return ++index & (n_bd - 1);
55}
56
57/**
58 * iwl_legacy_queue_dec_wrap - decrement queue index, wrap back to end
59 * @index -- current index
60 * @n_bd -- total number of entries in queue (must be power of 2)
61 */
62static inline int iwl_legacy_queue_dec_wrap(int index, int n_bd)
63{
64 return --index & (n_bd - 1);
65}
66
67/* TODO: Move fw_desc functions to iwl-pci.ko */
68static inline void iwl_legacy_free_fw_desc(struct pci_dev *pci_dev,
69 struct fw_desc *desc)
70{
71 if (desc->v_addr)
72 dma_free_coherent(&pci_dev->dev, desc->len,
73 desc->v_addr, desc->p_addr);
74 desc->v_addr = NULL;
75 desc->len = 0;
76}
77
78static inline int iwl_legacy_alloc_fw_desc(struct pci_dev *pci_dev,
79 struct fw_desc *desc)
80{
81 if (!desc->len) {
82 desc->v_addr = NULL;
83 return -EINVAL;
84 }
85
86 desc->v_addr = dma_alloc_coherent(&pci_dev->dev, desc->len,
87 &desc->p_addr, GFP_KERNEL);
88 return (desc->v_addr != NULL) ? 0 : -ENOMEM;
89}
90
91/*
92 * we have 8 bits used like this:
93 *
94 * 7 6 5 4 3 2 1 0
95 * | | | | | | | |
96 * | | | | | | +-+-------- AC queue (0-3)
97 * | | | | | |
98 * | +-+-+-+-+------------ HW queue ID
99 * |
100 * +---------------------- unused
101 */
102static inline void
103iwl_legacy_set_swq_id(struct iwl_tx_queue *txq, u8 ac, u8 hwq)
104{
105 BUG_ON(ac > 3); /* only have 2 bits */
106 BUG_ON(hwq > 31); /* only use 5 bits */
107
108 txq->swq_id = (hwq << 2) | ac;
109}
110
111static inline void iwl_legacy_wake_queue(struct iwl_priv *priv,
112 struct iwl_tx_queue *txq)
113{
114 u8 queue = txq->swq_id;
115 u8 ac = queue & 3;
116 u8 hwq = (queue >> 2) & 0x1f;
117
118 if (test_and_clear_bit(hwq, priv->queue_stopped))
119 if (atomic_dec_return(&priv->queue_stop_count[ac]) <= 0)
120 ieee80211_wake_queue(priv->hw, ac);
121}
122
123static inline void iwl_legacy_stop_queue(struct iwl_priv *priv,
124 struct iwl_tx_queue *txq)
125{
126 u8 queue = txq->swq_id;
127 u8 ac = queue & 3;
128 u8 hwq = (queue >> 2) & 0x1f;
129
130 if (!test_and_set_bit(hwq, priv->queue_stopped))
131 if (atomic_inc_return(&priv->queue_stop_count[ac]) > 0)
132 ieee80211_stop_queue(priv->hw, ac);
133}
134
135#ifdef ieee80211_stop_queue
136#undef ieee80211_stop_queue
137#endif
138
139#define ieee80211_stop_queue DO_NOT_USE_ieee80211_stop_queue
140
141#ifdef ieee80211_wake_queue
142#undef ieee80211_wake_queue
143#endif
144
145#define ieee80211_wake_queue DO_NOT_USE_ieee80211_wake_queue
146
147static inline void iwl_legacy_disable_interrupts(struct iwl_priv *priv)
148{
149 clear_bit(STATUS_INT_ENABLED, &priv->status);
150
151 /* disable interrupts from uCode/NIC to host */
152 iwl_write32(priv, CSR_INT_MASK, 0x00000000);
153
154 /* acknowledge/clear/reset any interrupts still pending
155 * from uCode or flow handler (Rx/Tx DMA) */
156 iwl_write32(priv, CSR_INT, 0xffffffff);
157 iwl_write32(priv, CSR_FH_INT_STATUS, 0xffffffff);
158 IWL_DEBUG_ISR(priv, "Disabled interrupts\n");
159}
160
161static inline void iwl_legacy_enable_rfkill_int(struct iwl_priv *priv)
162{
163 IWL_DEBUG_ISR(priv, "Enabling rfkill interrupt\n");
164 iwl_write32(priv, CSR_INT_MASK, CSR_INT_BIT_RF_KILL);
165}
166
167static inline void iwl_legacy_enable_interrupts(struct iwl_priv *priv)
168{
169 IWL_DEBUG_ISR(priv, "Enabling interrupts\n");
170 set_bit(STATUS_INT_ENABLED, &priv->status);
171 iwl_write32(priv, CSR_INT_MASK, priv->inta_mask);
172}
173
174/**
175 * iwl_legacy_beacon_time_mask_low - mask of lower 32 bit of beacon time
176 * @priv -- pointer to iwl_priv data structure
177 * @tsf_bits -- number of bits need to shift for masking)
178 */
179static inline u32 iwl_legacy_beacon_time_mask_low(struct iwl_priv *priv,
180 u16 tsf_bits)
181{
182 return (1 << tsf_bits) - 1;
183}
184
185/**
186 * iwl_legacy_beacon_time_mask_high - mask of higher 32 bit of beacon time
187 * @priv -- pointer to iwl_priv data structure
188 * @tsf_bits -- number of bits need to shift for masking)
189 */
190static inline u32 iwl_legacy_beacon_time_mask_high(struct iwl_priv *priv,
191 u16 tsf_bits)
192{
193 return ((1 << (32 - tsf_bits)) - 1) << tsf_bits;
194}
195
196#endif /* __iwl_legacy_helpers_h__ */
diff --git a/drivers/net/wireless/iwlegacy/iwl-io.h b/drivers/net/wireless/iwlegacy/iwl-io.h
new file mode 100644
index 00000000000..5cc5d342914
--- /dev/null
+++ b/drivers/net/wireless/iwlegacy/iwl-io.h
@@ -0,0 +1,545 @@
1/******************************************************************************
2 *
3 * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved.
4 *
5 * Portions of this file are derived from the ipw3945 project.
6 *
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms of version 2 of the GNU General Public License as
9 * published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful, but WITHOUT
12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 * more details.
15 *
16 * You should have received a copy of the GNU General Public License along with
17 * this program; if not, write to the Free Software Foundation, Inc.,
18 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
19 *
20 * The full GNU General Public License is included in this distribution in the
21 * file called LICENSE.
22 *
23 * Contact Information:
24 * Intel Linux Wireless <ilw@linux.intel.com>
25 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
26 *
27 *****************************************************************************/
28
29#ifndef __iwl_legacy_io_h__
30#define __iwl_legacy_io_h__
31
32#include <linux/io.h>
33
34#include "iwl-dev.h"
35#include "iwl-debug.h"
36#include "iwl-devtrace.h"
37
38/*
39 * IO, register, and NIC memory access functions
40 *
41 * NOTE on naming convention and macro usage for these
42 *
43 * A single _ prefix before a an access function means that no state
44 * check or debug information is printed when that function is called.
45 *
46 * A double __ prefix before an access function means that state is checked
47 * and the current line number and caller function name are printed in addition
48 * to any other debug output.
49 *
50 * The non-prefixed name is the #define that maps the caller into a
51 * #define that provides the caller's name and __LINE__ to the double
52 * prefix version.
53 *
54 * If you wish to call the function without any debug or state checking,
55 * you should use the single _ prefix version (as is used by dependent IO
56 * routines, for example _iwl_legacy_read_direct32 calls the non-check version of
57 * _iwl_legacy_read32.)
58 *
59 * These declarations are *extremely* useful in quickly isolating code deltas
60 * which result in misconfiguration of the hardware I/O. In combination with
61 * git-bisect and the IO debug level you can quickly determine the specific
62 * commit which breaks the IO sequence to the hardware.
63 *
64 */
65
66static inline void _iwl_legacy_write8(struct iwl_priv *priv, u32 ofs, u8 val)
67{
68 trace_iwlwifi_legacy_dev_iowrite8(priv, ofs, val);
69 iowrite8(val, priv->hw_base + ofs);
70}
71
72#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
73static inline void
74__iwl_legacy_write8(const char *f, u32 l, struct iwl_priv *priv,
75 u32 ofs, u8 val)
76{
77 IWL_DEBUG_IO(priv, "write8(0x%08X, 0x%02X) - %s %d\n", ofs, val, f, l);
78 _iwl_legacy_write8(priv, ofs, val);
79}
80#define iwl_write8(priv, ofs, val) \
81 __iwl_legacy_write8(__FILE__, __LINE__, priv, ofs, val)
82#else
83#define iwl_write8(priv, ofs, val) _iwl_legacy_write8(priv, ofs, val)
84#endif
85
86
87static inline void _iwl_legacy_write32(struct iwl_priv *priv, u32 ofs, u32 val)
88{
89 trace_iwlwifi_legacy_dev_iowrite32(priv, ofs, val);
90 iowrite32(val, priv->hw_base + ofs);
91}
92
93#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
94static inline void
95__iwl_legacy_write32(const char *f, u32 l, struct iwl_priv *priv,
96 u32 ofs, u32 val)
97{
98 IWL_DEBUG_IO(priv, "write32(0x%08X, 0x%08X) - %s %d\n", ofs, val, f, l);
99 _iwl_legacy_write32(priv, ofs, val);
100}
101#define iwl_write32(priv, ofs, val) \
102 __iwl_legacy_write32(__FILE__, __LINE__, priv, ofs, val)
103#else
104#define iwl_write32(priv, ofs, val) _iwl_legacy_write32(priv, ofs, val)
105#endif
106
107static inline u32 _iwl_legacy_read32(struct iwl_priv *priv, u32 ofs)
108{
109 u32 val = ioread32(priv->hw_base + ofs);
110 trace_iwlwifi_legacy_dev_ioread32(priv, ofs, val);
111 return val;
112}
113
114#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
115static inline u32
116__iwl_legacy_read32(char *f, u32 l, struct iwl_priv *priv, u32 ofs)
117{
118 IWL_DEBUG_IO(priv, "read_direct32(0x%08X) - %s %d\n", ofs, f, l);
119 return _iwl_legacy_read32(priv, ofs);
120}
121#define iwl_read32(priv, ofs) __iwl_legacy_read32(__FILE__, __LINE__, priv, ofs)
122#else
123#define iwl_read32(p, o) _iwl_legacy_read32(p, o)
124#endif
125
126#define IWL_POLL_INTERVAL 10 /* microseconds */
127static inline int
128_iwl_legacy_poll_bit(struct iwl_priv *priv, u32 addr,
129 u32 bits, u32 mask, int timeout)
130{
131 int t = 0;
132
133 do {
134 if ((_iwl_legacy_read32(priv, addr) & mask) == (bits & mask))
135 return t;
136 udelay(IWL_POLL_INTERVAL);
137 t += IWL_POLL_INTERVAL;
138 } while (t < timeout);
139
140 return -ETIMEDOUT;
141}
142#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
143static inline int __iwl_legacy_poll_bit(const char *f, u32 l,
144 struct iwl_priv *priv, u32 addr,
145 u32 bits, u32 mask, int timeout)
146{
147 int ret = _iwl_legacy_poll_bit(priv, addr, bits, mask, timeout);
148 IWL_DEBUG_IO(priv, "poll_bit(0x%08X, 0x%08X, 0x%08X) - %s- %s %d\n",
149 addr, bits, mask,
150 unlikely(ret == -ETIMEDOUT) ? "timeout" : "", f, l);
151 return ret;
152}
153#define iwl_poll_bit(priv, addr, bits, mask, timeout) \
154 __iwl_legacy_poll_bit(__FILE__, __LINE__, priv, addr, \
155 bits, mask, timeout)
156#else
157#define iwl_poll_bit(p, a, b, m, t) _iwl_legacy_poll_bit(p, a, b, m, t)
158#endif
159
160static inline void _iwl_legacy_set_bit(struct iwl_priv *priv, u32 reg, u32 mask)
161{
162 _iwl_legacy_write32(priv, reg, _iwl_legacy_read32(priv, reg) | mask);
163}
164#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
165static inline void __iwl_legacy_set_bit(const char *f, u32 l,
166 struct iwl_priv *priv, u32 reg, u32 mask)
167{
168 u32 val = _iwl_legacy_read32(priv, reg) | mask;
169 IWL_DEBUG_IO(priv, "set_bit(0x%08X, 0x%08X) = 0x%08X\n", reg,
170 mask, val);
171 _iwl_legacy_write32(priv, reg, val);
172}
173static inline void iwl_legacy_set_bit(struct iwl_priv *p, u32 r, u32 m)
174{
175 unsigned long reg_flags;
176
177 spin_lock_irqsave(&p->reg_lock, reg_flags);
178 __iwl_legacy_set_bit(__FILE__, __LINE__, p, r, m);
179 spin_unlock_irqrestore(&p->reg_lock, reg_flags);
180}
181#else
182static inline void iwl_legacy_set_bit(struct iwl_priv *p, u32 r, u32 m)
183{
184 unsigned long reg_flags;
185
186 spin_lock_irqsave(&p->reg_lock, reg_flags);
187 _iwl_legacy_set_bit(p, r, m);
188 spin_unlock_irqrestore(&p->reg_lock, reg_flags);
189}
190#endif
191
192static inline void
193_iwl_legacy_clear_bit(struct iwl_priv *priv, u32 reg, u32 mask)
194{
195 _iwl_legacy_write32(priv, reg, _iwl_legacy_read32(priv, reg) & ~mask);
196}
197#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
198static inline void
199__iwl_legacy_clear_bit(const char *f, u32 l,
200 struct iwl_priv *priv, u32 reg, u32 mask)
201{
202 u32 val = _iwl_legacy_read32(priv, reg) & ~mask;
203 IWL_DEBUG_IO(priv, "clear_bit(0x%08X, 0x%08X) = 0x%08X\n", reg, mask, val);
204 _iwl_legacy_write32(priv, reg, val);
205}
206static inline void iwl_legacy_clear_bit(struct iwl_priv *p, u32 r, u32 m)
207{
208 unsigned long reg_flags;
209
210 spin_lock_irqsave(&p->reg_lock, reg_flags);
211 __iwl_legacy_clear_bit(__FILE__, __LINE__, p, r, m);
212 spin_unlock_irqrestore(&p->reg_lock, reg_flags);
213}
214#else
215static inline void iwl_legacy_clear_bit(struct iwl_priv *p, u32 r, u32 m)
216{
217 unsigned long reg_flags;
218
219 spin_lock_irqsave(&p->reg_lock, reg_flags);
220 _iwl_legacy_clear_bit(p, r, m);
221 spin_unlock_irqrestore(&p->reg_lock, reg_flags);
222}
223#endif
224
225static inline int _iwl_legacy_grab_nic_access(struct iwl_priv *priv)
226{
227 int ret;
228 u32 val;
229
230 /* this bit wakes up the NIC */
231 _iwl_legacy_set_bit(priv, CSR_GP_CNTRL,
232 CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
233
234 /*
235 * These bits say the device is running, and should keep running for
236 * at least a short while (at least as long as MAC_ACCESS_REQ stays 1),
237 * but they do not indicate that embedded SRAM is restored yet;
238 * 3945 and 4965 have volatile SRAM, and must save/restore contents
239 * to/from host DRAM when sleeping/waking for power-saving.
240 * Each direction takes approximately 1/4 millisecond; with this
241 * overhead, it's a good idea to grab and hold MAC_ACCESS_REQUEST if a
242 * series of register accesses are expected (e.g. reading Event Log),
243 * to keep device from sleeping.
244 *
245 * CSR_UCODE_DRV_GP1 register bit MAC_SLEEP == 0 indicates that
246 * SRAM is okay/restored. We don't check that here because this call
247 * is just for hardware register access; but GP1 MAC_SLEEP check is a
248 * good idea before accessing 3945/4965 SRAM (e.g. reading Event Log).
249 *
250 */
251 ret = _iwl_legacy_poll_bit(priv, CSR_GP_CNTRL,
252 CSR_GP_CNTRL_REG_VAL_MAC_ACCESS_EN,
253 (CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY |
254 CSR_GP_CNTRL_REG_FLAG_GOING_TO_SLEEP), 15000);
255 if (ret < 0) {
256 val = _iwl_legacy_read32(priv, CSR_GP_CNTRL);
257 IWL_ERR(priv,
258 "MAC is in deep sleep!. CSR_GP_CNTRL = 0x%08X\n", val);
259 _iwl_legacy_write32(priv, CSR_RESET,
260 CSR_RESET_REG_FLAG_FORCE_NMI);
261 return -EIO;
262 }
263
264 return 0;
265}
266
267#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
268static inline int __iwl_legacy_grab_nic_access(const char *f, u32 l,
269 struct iwl_priv *priv)
270{
271 IWL_DEBUG_IO(priv, "grabbing nic access - %s %d\n", f, l);
272 return _iwl_legacy_grab_nic_access(priv);
273}
274#define iwl_grab_nic_access(priv) \
275 __iwl_legacy_grab_nic_access(__FILE__, __LINE__, priv)
276#else
277#define iwl_grab_nic_access(priv) \
278 _iwl_legacy_grab_nic_access(priv)
279#endif
280
281static inline void _iwl_legacy_release_nic_access(struct iwl_priv *priv)
282{
283 _iwl_legacy_clear_bit(priv, CSR_GP_CNTRL,
284 CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
285}
286#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
287static inline void __iwl_legacy_release_nic_access(const char *f, u32 l,
288 struct iwl_priv *priv)
289{
290
291 IWL_DEBUG_IO(priv, "releasing nic access - %s %d\n", f, l);
292 _iwl_legacy_release_nic_access(priv);
293}
294#define iwl_release_nic_access(priv) \
295 __iwl_legacy_release_nic_access(__FILE__, __LINE__, priv)
296#else
297#define iwl_release_nic_access(priv) \
298 _iwl_legacy_release_nic_access(priv)
299#endif
300
301static inline u32 _iwl_legacy_read_direct32(struct iwl_priv *priv, u32 reg)
302{
303 return _iwl_legacy_read32(priv, reg);
304}
305#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
306static inline u32 __iwl_legacy_read_direct32(const char *f, u32 l,
307 struct iwl_priv *priv, u32 reg)
308{
309 u32 value = _iwl_legacy_read_direct32(priv, reg);
310 IWL_DEBUG_IO(priv,
311 "read_direct32(0x%4X) = 0x%08x - %s %d\n", reg, value,
312 f, l);
313 return value;
314}
315static inline u32 iwl_legacy_read_direct32(struct iwl_priv *priv, u32 reg)
316{
317 u32 value;
318 unsigned long reg_flags;
319
320 spin_lock_irqsave(&priv->reg_lock, reg_flags);
321 iwl_grab_nic_access(priv);
322 value = __iwl_legacy_read_direct32(__FILE__, __LINE__, priv, reg);
323 iwl_release_nic_access(priv);
324 spin_unlock_irqrestore(&priv->reg_lock, reg_flags);
325 return value;
326}
327
328#else
329static inline u32 iwl_legacy_read_direct32(struct iwl_priv *priv, u32 reg)
330{
331 u32 value;
332 unsigned long reg_flags;
333
334 spin_lock_irqsave(&priv->reg_lock, reg_flags);
335 iwl_grab_nic_access(priv);
336 value = _iwl_legacy_read_direct32(priv, reg);
337 iwl_release_nic_access(priv);
338 spin_unlock_irqrestore(&priv->reg_lock, reg_flags);
339 return value;
340
341}
342#endif
343
344static inline void _iwl_legacy_write_direct32(struct iwl_priv *priv,
345 u32 reg, u32 value)
346{
347 _iwl_legacy_write32(priv, reg, value);
348}
349static inline void
350iwl_legacy_write_direct32(struct iwl_priv *priv, u32 reg, u32 value)
351{
352 unsigned long reg_flags;
353
354 spin_lock_irqsave(&priv->reg_lock, reg_flags);
355 if (!iwl_grab_nic_access(priv)) {
356 _iwl_legacy_write_direct32(priv, reg, value);
357 iwl_release_nic_access(priv);
358 }
359 spin_unlock_irqrestore(&priv->reg_lock, reg_flags);
360}
361
362static inline void iwl_legacy_write_reg_buf(struct iwl_priv *priv,
363 u32 reg, u32 len, u32 *values)
364{
365 u32 count = sizeof(u32);
366
367 if ((priv != NULL) && (values != NULL)) {
368 for (; 0 < len; len -= count, reg += count, values++)
369 iwl_legacy_write_direct32(priv, reg, *values);
370 }
371}
372
373static inline int _iwl_legacy_poll_direct_bit(struct iwl_priv *priv, u32 addr,
374 u32 mask, int timeout)
375{
376 int t = 0;
377
378 do {
379 if ((iwl_legacy_read_direct32(priv, addr) & mask) == mask)
380 return t;
381 udelay(IWL_POLL_INTERVAL);
382 t += IWL_POLL_INTERVAL;
383 } while (t < timeout);
384
385 return -ETIMEDOUT;
386}
387
388#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
389static inline int __iwl_legacy_poll_direct_bit(const char *f, u32 l,
390 struct iwl_priv *priv,
391 u32 addr, u32 mask, int timeout)
392{
393 int ret = _iwl_legacy_poll_direct_bit(priv, addr, mask, timeout);
394
395 if (unlikely(ret == -ETIMEDOUT))
396 IWL_DEBUG_IO(priv, "poll_direct_bit(0x%08X, 0x%08X) - "
397 "timedout - %s %d\n", addr, mask, f, l);
398 else
399 IWL_DEBUG_IO(priv, "poll_direct_bit(0x%08X, 0x%08X) = 0x%08X "
400 "- %s %d\n", addr, mask, ret, f, l);
401 return ret;
402}
403#define iwl_poll_direct_bit(priv, addr, mask, timeout) \
404__iwl_legacy_poll_direct_bit(__FILE__, __LINE__, priv, addr, mask, timeout)
405#else
406#define iwl_poll_direct_bit _iwl_legacy_poll_direct_bit
407#endif
408
409static inline u32 _iwl_legacy_read_prph(struct iwl_priv *priv, u32 reg)
410{
411 _iwl_legacy_write_direct32(priv, HBUS_TARG_PRPH_RADDR, reg | (3 << 24));
412 rmb();
413 return _iwl_legacy_read_direct32(priv, HBUS_TARG_PRPH_RDAT);
414}
415static inline u32 iwl_legacy_read_prph(struct iwl_priv *priv, u32 reg)
416{
417 unsigned long reg_flags;
418 u32 val;
419
420 spin_lock_irqsave(&priv->reg_lock, reg_flags);
421 iwl_grab_nic_access(priv);
422 val = _iwl_legacy_read_prph(priv, reg);
423 iwl_release_nic_access(priv);
424 spin_unlock_irqrestore(&priv->reg_lock, reg_flags);
425 return val;
426}
427
428static inline void _iwl_legacy_write_prph(struct iwl_priv *priv,
429 u32 addr, u32 val)
430{
431 _iwl_legacy_write_direct32(priv, HBUS_TARG_PRPH_WADDR,
432 ((addr & 0x0000FFFF) | (3 << 24)));
433 wmb();
434 _iwl_legacy_write_direct32(priv, HBUS_TARG_PRPH_WDAT, val);
435}
436
437static inline void
438iwl_legacy_write_prph(struct iwl_priv *priv, u32 addr, u32 val)
439{
440 unsigned long reg_flags;
441
442 spin_lock_irqsave(&priv->reg_lock, reg_flags);
443 if (!iwl_grab_nic_access(priv)) {
444 _iwl_legacy_write_prph(priv, addr, val);
445 iwl_release_nic_access(priv);
446 }
447 spin_unlock_irqrestore(&priv->reg_lock, reg_flags);
448}
449
450#define _iwl_legacy_set_bits_prph(priv, reg, mask) \
451_iwl_legacy_write_prph(priv, reg, (_iwl_legacy_read_prph(priv, reg) | mask))
452
453static inline void
454iwl_legacy_set_bits_prph(struct iwl_priv *priv, u32 reg, u32 mask)
455{
456 unsigned long reg_flags;
457
458 spin_lock_irqsave(&priv->reg_lock, reg_flags);
459 iwl_grab_nic_access(priv);
460 _iwl_legacy_set_bits_prph(priv, reg, mask);
461 iwl_release_nic_access(priv);
462 spin_unlock_irqrestore(&priv->reg_lock, reg_flags);
463}
464
465#define _iwl_legacy_set_bits_mask_prph(priv, reg, bits, mask) \
466_iwl_legacy_write_prph(priv, reg, \
467 ((_iwl_legacy_read_prph(priv, reg) & mask) | bits))
468
469static inline void iwl_legacy_set_bits_mask_prph(struct iwl_priv *priv, u32 reg,
470 u32 bits, u32 mask)
471{
472 unsigned long reg_flags;
473
474 spin_lock_irqsave(&priv->reg_lock, reg_flags);
475 iwl_grab_nic_access(priv);
476 _iwl_legacy_set_bits_mask_prph(priv, reg, bits, mask);
477 iwl_release_nic_access(priv);
478 spin_unlock_irqrestore(&priv->reg_lock, reg_flags);
479}
480
481static inline void iwl_legacy_clear_bits_prph(struct iwl_priv
482 *priv, u32 reg, u32 mask)
483{
484 unsigned long reg_flags;
485 u32 val;
486
487 spin_lock_irqsave(&priv->reg_lock, reg_flags);
488 iwl_grab_nic_access(priv);
489 val = _iwl_legacy_read_prph(priv, reg);
490 _iwl_legacy_write_prph(priv, reg, (val & ~mask));
491 iwl_release_nic_access(priv);
492 spin_unlock_irqrestore(&priv->reg_lock, reg_flags);
493}
494
495static inline u32 iwl_legacy_read_targ_mem(struct iwl_priv *priv, u32 addr)
496{
497 unsigned long reg_flags;
498 u32 value;
499
500 spin_lock_irqsave(&priv->reg_lock, reg_flags);
501 iwl_grab_nic_access(priv);
502
503 _iwl_legacy_write_direct32(priv, HBUS_TARG_MEM_RADDR, addr);
504 rmb();
505 value = _iwl_legacy_read_direct32(priv, HBUS_TARG_MEM_RDAT);
506
507 iwl_release_nic_access(priv);
508 spin_unlock_irqrestore(&priv->reg_lock, reg_flags);
509 return value;
510}
511
512static inline void
513iwl_legacy_write_targ_mem(struct iwl_priv *priv, u32 addr, u32 val)
514{
515 unsigned long reg_flags;
516
517 spin_lock_irqsave(&priv->reg_lock, reg_flags);
518 if (!iwl_grab_nic_access(priv)) {
519 _iwl_legacy_write_direct32(priv, HBUS_TARG_MEM_WADDR, addr);
520 wmb();
521 _iwl_legacy_write_direct32(priv, HBUS_TARG_MEM_WDAT, val);
522 iwl_release_nic_access(priv);
523 }
524 spin_unlock_irqrestore(&priv->reg_lock, reg_flags);
525}
526
527static inline void
528iwl_legacy_write_targ_mem_buf(struct iwl_priv *priv, u32 addr,
529 u32 len, u32 *values)
530{
531 unsigned long reg_flags;
532
533 spin_lock_irqsave(&priv->reg_lock, reg_flags);
534 if (!iwl_grab_nic_access(priv)) {
535 _iwl_legacy_write_direct32(priv, HBUS_TARG_MEM_WADDR, addr);
536 wmb();
537 for (; 0 < len; len -= sizeof(u32), values++)
538 _iwl_legacy_write_direct32(priv,
539 HBUS_TARG_MEM_WDAT, *values);
540
541 iwl_release_nic_access(priv);
542 }
543 spin_unlock_irqrestore(&priv->reg_lock, reg_flags);
544}
545#endif
diff --git a/drivers/net/wireless/iwlegacy/iwl-led.c b/drivers/net/wireless/iwlegacy/iwl-led.c
new file mode 100644
index 00000000000..bda0d61b2c0
--- /dev/null
+++ b/drivers/net/wireless/iwlegacy/iwl-led.c
@@ -0,0 +1,206 @@
1/******************************************************************************
2 *
3 * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of version 2 of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
17 *
18 * The full GNU General Public License is included in this distribution in the
19 * file called LICENSE.
20 *
21 * Contact Information:
22 * Intel Linux Wireless <ilw@linux.intel.com>
23 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
24 *
25 *****************************************************************************/
26
27
28#include <linux/kernel.h>
29#include <linux/module.h>
30#include <linux/init.h>
31#include <linux/pci.h>
32#include <linux/dma-mapping.h>
33#include <linux/delay.h>
34#include <linux/skbuff.h>
35#include <linux/netdevice.h>
36#include <linux/wireless.h>
37#include <net/mac80211.h>
38#include <linux/etherdevice.h>
39#include <asm/unaligned.h>
40
41#include "iwl-dev.h"
42#include "iwl-core.h"
43#include "iwl-io.h"
44
45/* default: IWL_LED_BLINK(0) using blinking index table */
46static int led_mode;
47module_param(led_mode, int, S_IRUGO);
48MODULE_PARM_DESC(led_mode, "0=system default, "
49 "1=On(RF On)/Off(RF Off), 2=blinking");
50
51/* Throughput OFF time(ms) ON time (ms)
52 * >300 25 25
53 * >200 to 300 40 40
54 * >100 to 200 55 55
55 * >70 to 100 65 65
56 * >50 to 70 75 75
57 * >20 to 50 85 85
58 * >10 to 20 95 95
59 * >5 to 10 110 110
60 * >1 to 5 130 130
61 * >0 to 1 167 167
62 * <=0 SOLID ON
63 */
64static const struct ieee80211_tpt_blink iwl_blink[] = {
65 { .throughput = 0, .blink_time = 334 },
66 { .throughput = 1 * 1024 - 1, .blink_time = 260 },
67 { .throughput = 5 * 1024 - 1, .blink_time = 220 },
68 { .throughput = 10 * 1024 - 1, .blink_time = 190 },
69 { .throughput = 20 * 1024 - 1, .blink_time = 170 },
70 { .throughput = 50 * 1024 - 1, .blink_time = 150 },
71 { .throughput = 70 * 1024 - 1, .blink_time = 130 },
72 { .throughput = 100 * 1024 - 1, .blink_time = 110 },
73 { .throughput = 200 * 1024 - 1, .blink_time = 80 },
74 { .throughput = 300 * 1024 - 1, .blink_time = 50 },
75};
76
77/*
78 * Adjust led blink rate to compensate on a MAC Clock difference on every HW
79 * Led blink rate analysis showed an average deviation of 0% on 3945,
80 * 5% on 4965 HW.
81 * Need to compensate on the led on/off time per HW according to the deviation
82 * to achieve the desired led frequency
83 * The calculation is: (100-averageDeviation)/100 * blinkTime
84 * For code efficiency the calculation will be:
85 * compensation = (100 - averageDeviation) * 64 / 100
86 * NewBlinkTime = (compensation * BlinkTime) / 64
87 */
88static inline u8 iwl_legacy_blink_compensation(struct iwl_priv *priv,
89 u8 time, u16 compensation)
90{
91 if (!compensation) {
92 IWL_ERR(priv, "undefined blink compensation: "
93 "use pre-defined blinking time\n");
94 return time;
95 }
96
97 return (u8)((time * compensation) >> 6);
98}
99
100/* Set led pattern command */
101static int iwl_legacy_led_cmd(struct iwl_priv *priv,
102 unsigned long on,
103 unsigned long off)
104{
105 struct iwl_led_cmd led_cmd = {
106 .id = IWL_LED_LINK,
107 .interval = IWL_DEF_LED_INTRVL
108 };
109 int ret;
110
111 if (!test_bit(STATUS_READY, &priv->status))
112 return -EBUSY;
113
114 if (priv->blink_on == on && priv->blink_off == off)
115 return 0;
116
117 if (off == 0) {
118 /* led is SOLID_ON */
119 on = IWL_LED_SOLID;
120 }
121
122 IWL_DEBUG_LED(priv, "Led blink time compensation=%u\n",
123 priv->cfg->base_params->led_compensation);
124 led_cmd.on = iwl_legacy_blink_compensation(priv, on,
125 priv->cfg->base_params->led_compensation);
126 led_cmd.off = iwl_legacy_blink_compensation(priv, off,
127 priv->cfg->base_params->led_compensation);
128
129 ret = priv->cfg->ops->led->cmd(priv, &led_cmd);
130 if (!ret) {
131 priv->blink_on = on;
132 priv->blink_off = off;
133 }
134 return ret;
135}
136
137static void iwl_legacy_led_brightness_set(struct led_classdev *led_cdev,
138 enum led_brightness brightness)
139{
140 struct iwl_priv *priv = container_of(led_cdev, struct iwl_priv, led);
141 unsigned long on = 0;
142
143 if (brightness > 0)
144 on = IWL_LED_SOLID;
145
146 iwl_legacy_led_cmd(priv, on, 0);
147}
148
149static int iwl_legacy_led_blink_set(struct led_classdev *led_cdev,
150 unsigned long *delay_on,
151 unsigned long *delay_off)
152{
153 struct iwl_priv *priv = container_of(led_cdev, struct iwl_priv, led);
154
155 return iwl_legacy_led_cmd(priv, *delay_on, *delay_off);
156}
157
158void iwl_legacy_leds_init(struct iwl_priv *priv)
159{
160 int mode = led_mode;
161 int ret;
162
163 if (mode == IWL_LED_DEFAULT)
164 mode = priv->cfg->led_mode;
165
166 priv->led.name = kasprintf(GFP_KERNEL, "%s-led",
167 wiphy_name(priv->hw->wiphy));
168 priv->led.brightness_set = iwl_legacy_led_brightness_set;
169 priv->led.blink_set = iwl_legacy_led_blink_set;
170 priv->led.max_brightness = 1;
171
172 switch (mode) {
173 case IWL_LED_DEFAULT:
174 WARN_ON(1);
175 break;
176 case IWL_LED_BLINK:
177 priv->led.default_trigger =
178 ieee80211_create_tpt_led_trigger(priv->hw,
179 IEEE80211_TPT_LEDTRIG_FL_CONNECTED,
180 iwl_blink, ARRAY_SIZE(iwl_blink));
181 break;
182 case IWL_LED_RF_STATE:
183 priv->led.default_trigger =
184 ieee80211_get_radio_led_name(priv->hw);
185 break;
186 }
187
188 ret = led_classdev_register(&priv->pci_dev->dev, &priv->led);
189 if (ret) {
190 kfree(priv->led.name);
191 return;
192 }
193
194 priv->led_registered = true;
195}
196EXPORT_SYMBOL(iwl_legacy_leds_init);
197
198void iwl_legacy_leds_exit(struct iwl_priv *priv)
199{
200 if (!priv->led_registered)
201 return;
202
203 led_classdev_unregister(&priv->led);
204 kfree(priv->led.name);
205}
206EXPORT_SYMBOL(iwl_legacy_leds_exit);
diff --git a/drivers/net/wireless/iwlegacy/iwl-led.h b/drivers/net/wireless/iwlegacy/iwl-led.h
new file mode 100644
index 00000000000..f0791f70f79
--- /dev/null
+++ b/drivers/net/wireless/iwlegacy/iwl-led.h
@@ -0,0 +1,56 @@
1/******************************************************************************
2 *
3 * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of version 2 of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
17 *
18 * The full GNU General Public License is included in this distribution in the
19 * file called LICENSE.
20 *
21 * Contact Information:
22 * Intel Linux Wireless <ilw@linux.intel.com>
23 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
24 *
25 *****************************************************************************/
26
27#ifndef __iwl_legacy_leds_h__
28#define __iwl_legacy_leds_h__
29
30
31struct iwl_priv;
32
33#define IWL_LED_SOLID 11
34#define IWL_DEF_LED_INTRVL cpu_to_le32(1000)
35
36#define IWL_LED_ACTIVITY (0<<1)
37#define IWL_LED_LINK (1<<1)
38
39/*
40 * LED mode
41 * IWL_LED_DEFAULT: use device default
42 * IWL_LED_RF_STATE: turn LED on/off based on RF state
43 * LED ON = RF ON
44 * LED OFF = RF OFF
45 * IWL_LED_BLINK: adjust led blink rate based on blink table
46 */
47enum iwl_led_mode {
48 IWL_LED_DEFAULT,
49 IWL_LED_RF_STATE,
50 IWL_LED_BLINK,
51};
52
53void iwl_legacy_leds_init(struct iwl_priv *priv);
54void iwl_legacy_leds_exit(struct iwl_priv *priv);
55
56#endif /* __iwl_legacy_leds_h__ */
diff --git a/drivers/net/wireless/iwlegacy/iwl-legacy-rs.h b/drivers/net/wireless/iwlegacy/iwl-legacy-rs.h
new file mode 100644
index 00000000000..38647e481eb
--- /dev/null
+++ b/drivers/net/wireless/iwlegacy/iwl-legacy-rs.h
@@ -0,0 +1,456 @@
1/******************************************************************************
2 *
3 * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of version 2 of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
17 *
18 * The full GNU General Public License is included in this distribution in the
19 * file called LICENSE.
20 *
21 * Contact Information:
22 * Intel Linux Wireless <ilw@linux.intel.com>
23 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
24 *
25 *****************************************************************************/
26
27#ifndef __iwl_legacy_rs_h__
28#define __iwl_legacy_rs_h__
29
30struct iwl_rate_info {
31 u8 plcp; /* uCode API: IWL_RATE_6M_PLCP, etc. */
32 u8 plcp_siso; /* uCode API: IWL_RATE_SISO_6M_PLCP, etc. */
33 u8 plcp_mimo2; /* uCode API: IWL_RATE_MIMO2_6M_PLCP, etc. */
34 u8 ieee; /* MAC header: IWL_RATE_6M_IEEE, etc. */
35 u8 prev_ieee; /* previous rate in IEEE speeds */
36 u8 next_ieee; /* next rate in IEEE speeds */
37 u8 prev_rs; /* previous rate used in rs algo */
38 u8 next_rs; /* next rate used in rs algo */
39 u8 prev_rs_tgg; /* previous rate used in TGG rs algo */
40 u8 next_rs_tgg; /* next rate used in TGG rs algo */
41};
42
43struct iwl3945_rate_info {
44 u8 plcp; /* uCode API: IWL_RATE_6M_PLCP, etc. */
45 u8 ieee; /* MAC header: IWL_RATE_6M_IEEE, etc. */
46 u8 prev_ieee; /* previous rate in IEEE speeds */
47 u8 next_ieee; /* next rate in IEEE speeds */
48 u8 prev_rs; /* previous rate used in rs algo */
49 u8 next_rs; /* next rate used in rs algo */
50 u8 prev_rs_tgg; /* previous rate used in TGG rs algo */
51 u8 next_rs_tgg; /* next rate used in TGG rs algo */
52 u8 table_rs_index; /* index in rate scale table cmd */
53 u8 prev_table_rs; /* prev in rate table cmd */
54};
55
56
57/*
58 * These serve as indexes into
59 * struct iwl_rate_info iwlegacy_rates[IWL_RATE_COUNT];
60 */
61enum {
62 IWL_RATE_1M_INDEX = 0,
63 IWL_RATE_2M_INDEX,
64 IWL_RATE_5M_INDEX,
65 IWL_RATE_11M_INDEX,
66 IWL_RATE_6M_INDEX,
67 IWL_RATE_9M_INDEX,
68 IWL_RATE_12M_INDEX,
69 IWL_RATE_18M_INDEX,
70 IWL_RATE_24M_INDEX,
71 IWL_RATE_36M_INDEX,
72 IWL_RATE_48M_INDEX,
73 IWL_RATE_54M_INDEX,
74 IWL_RATE_60M_INDEX,
75 IWL_RATE_COUNT,
76 IWL_RATE_COUNT_LEGACY = IWL_RATE_COUNT - 1, /* Excluding 60M */
77 IWL_RATE_COUNT_3945 = IWL_RATE_COUNT - 1,
78 IWL_RATE_INVM_INDEX = IWL_RATE_COUNT,
79 IWL_RATE_INVALID = IWL_RATE_COUNT,
80};
81
82enum {
83 IWL_RATE_6M_INDEX_TABLE = 0,
84 IWL_RATE_9M_INDEX_TABLE,
85 IWL_RATE_12M_INDEX_TABLE,
86 IWL_RATE_18M_INDEX_TABLE,
87 IWL_RATE_24M_INDEX_TABLE,
88 IWL_RATE_36M_INDEX_TABLE,
89 IWL_RATE_48M_INDEX_TABLE,
90 IWL_RATE_54M_INDEX_TABLE,
91 IWL_RATE_1M_INDEX_TABLE,
92 IWL_RATE_2M_INDEX_TABLE,
93 IWL_RATE_5M_INDEX_TABLE,
94 IWL_RATE_11M_INDEX_TABLE,
95 IWL_RATE_INVM_INDEX_TABLE = IWL_RATE_INVM_INDEX - 1,
96};
97
98enum {
99 IWL_FIRST_OFDM_RATE = IWL_RATE_6M_INDEX,
100 IWL39_LAST_OFDM_RATE = IWL_RATE_54M_INDEX,
101 IWL_LAST_OFDM_RATE = IWL_RATE_60M_INDEX,
102 IWL_FIRST_CCK_RATE = IWL_RATE_1M_INDEX,
103 IWL_LAST_CCK_RATE = IWL_RATE_11M_INDEX,
104};
105
106/* #define vs. enum to keep from defaulting to 'large integer' */
107#define IWL_RATE_6M_MASK (1 << IWL_RATE_6M_INDEX)
108#define IWL_RATE_9M_MASK (1 << IWL_RATE_9M_INDEX)
109#define IWL_RATE_12M_MASK (1 << IWL_RATE_12M_INDEX)
110#define IWL_RATE_18M_MASK (1 << IWL_RATE_18M_INDEX)
111#define IWL_RATE_24M_MASK (1 << IWL_RATE_24M_INDEX)
112#define IWL_RATE_36M_MASK (1 << IWL_RATE_36M_INDEX)
113#define IWL_RATE_48M_MASK (1 << IWL_RATE_48M_INDEX)
114#define IWL_RATE_54M_MASK (1 << IWL_RATE_54M_INDEX)
115#define IWL_RATE_60M_MASK (1 << IWL_RATE_60M_INDEX)
116#define IWL_RATE_1M_MASK (1 << IWL_RATE_1M_INDEX)
117#define IWL_RATE_2M_MASK (1 << IWL_RATE_2M_INDEX)
118#define IWL_RATE_5M_MASK (1 << IWL_RATE_5M_INDEX)
119#define IWL_RATE_11M_MASK (1 << IWL_RATE_11M_INDEX)
120
121/* uCode API values for legacy bit rates, both OFDM and CCK */
122enum {
123 IWL_RATE_6M_PLCP = 13,
124 IWL_RATE_9M_PLCP = 15,
125 IWL_RATE_12M_PLCP = 5,
126 IWL_RATE_18M_PLCP = 7,
127 IWL_RATE_24M_PLCP = 9,
128 IWL_RATE_36M_PLCP = 11,
129 IWL_RATE_48M_PLCP = 1,
130 IWL_RATE_54M_PLCP = 3,
131 IWL_RATE_60M_PLCP = 3,/*FIXME:RS:should be removed*/
132 IWL_RATE_1M_PLCP = 10,
133 IWL_RATE_2M_PLCP = 20,
134 IWL_RATE_5M_PLCP = 55,
135 IWL_RATE_11M_PLCP = 110,
136 /*FIXME:RS:add IWL_RATE_LEGACY_INVM_PLCP = 0,*/
137};
138
139/* uCode API values for OFDM high-throughput (HT) bit rates */
140enum {
141 IWL_RATE_SISO_6M_PLCP = 0,
142 IWL_RATE_SISO_12M_PLCP = 1,
143 IWL_RATE_SISO_18M_PLCP = 2,
144 IWL_RATE_SISO_24M_PLCP = 3,
145 IWL_RATE_SISO_36M_PLCP = 4,
146 IWL_RATE_SISO_48M_PLCP = 5,
147 IWL_RATE_SISO_54M_PLCP = 6,
148 IWL_RATE_SISO_60M_PLCP = 7,
149 IWL_RATE_MIMO2_6M_PLCP = 0x8,
150 IWL_RATE_MIMO2_12M_PLCP = 0x9,
151 IWL_RATE_MIMO2_18M_PLCP = 0xa,
152 IWL_RATE_MIMO2_24M_PLCP = 0xb,
153 IWL_RATE_MIMO2_36M_PLCP = 0xc,
154 IWL_RATE_MIMO2_48M_PLCP = 0xd,
155 IWL_RATE_MIMO2_54M_PLCP = 0xe,
156 IWL_RATE_MIMO2_60M_PLCP = 0xf,
157 IWL_RATE_SISO_INVM_PLCP,
158 IWL_RATE_MIMO2_INVM_PLCP = IWL_RATE_SISO_INVM_PLCP,
159};
160
161/* MAC header values for bit rates */
162enum {
163 IWL_RATE_6M_IEEE = 12,
164 IWL_RATE_9M_IEEE = 18,
165 IWL_RATE_12M_IEEE = 24,
166 IWL_RATE_18M_IEEE = 36,
167 IWL_RATE_24M_IEEE = 48,
168 IWL_RATE_36M_IEEE = 72,
169 IWL_RATE_48M_IEEE = 96,
170 IWL_RATE_54M_IEEE = 108,
171 IWL_RATE_60M_IEEE = 120,
172 IWL_RATE_1M_IEEE = 2,
173 IWL_RATE_2M_IEEE = 4,
174 IWL_RATE_5M_IEEE = 11,
175 IWL_RATE_11M_IEEE = 22,
176};
177
178#define IWL_CCK_BASIC_RATES_MASK \
179 (IWL_RATE_1M_MASK | \
180 IWL_RATE_2M_MASK)
181
182#define IWL_CCK_RATES_MASK \
183 (IWL_CCK_BASIC_RATES_MASK | \
184 IWL_RATE_5M_MASK | \
185 IWL_RATE_11M_MASK)
186
187#define IWL_OFDM_BASIC_RATES_MASK \
188 (IWL_RATE_6M_MASK | \
189 IWL_RATE_12M_MASK | \
190 IWL_RATE_24M_MASK)
191
192#define IWL_OFDM_RATES_MASK \
193 (IWL_OFDM_BASIC_RATES_MASK | \
194 IWL_RATE_9M_MASK | \
195 IWL_RATE_18M_MASK | \
196 IWL_RATE_36M_MASK | \
197 IWL_RATE_48M_MASK | \
198 IWL_RATE_54M_MASK)
199
200#define IWL_BASIC_RATES_MASK \
201 (IWL_OFDM_BASIC_RATES_MASK | \
202 IWL_CCK_BASIC_RATES_MASK)
203
204#define IWL_RATES_MASK ((1 << IWL_RATE_COUNT) - 1)
205#define IWL_RATES_MASK_3945 ((1 << IWL_RATE_COUNT_3945) - 1)
206
207#define IWL_INVALID_VALUE -1
208
209#define IWL_MIN_RSSI_VAL -100
210#define IWL_MAX_RSSI_VAL 0
211
212/* These values specify how many Tx frame attempts before
213 * searching for a new modulation mode */
214#define IWL_LEGACY_FAILURE_LIMIT 160
215#define IWL_LEGACY_SUCCESS_LIMIT 480
216#define IWL_LEGACY_TABLE_COUNT 160
217
218#define IWL_NONE_LEGACY_FAILURE_LIMIT 400
219#define IWL_NONE_LEGACY_SUCCESS_LIMIT 4500
220#define IWL_NONE_LEGACY_TABLE_COUNT 1500
221
222/* Success ratio (ACKed / attempted tx frames) values (perfect is 128 * 100) */
223#define IWL_RS_GOOD_RATIO 12800 /* 100% */
224#define IWL_RATE_SCALE_SWITCH 10880 /* 85% */
225#define IWL_RATE_HIGH_TH 10880 /* 85% */
226#define IWL_RATE_INCREASE_TH 6400 /* 50% */
227#define IWL_RATE_DECREASE_TH 1920 /* 15% */
228
229/* possible actions when in legacy mode */
230#define IWL_LEGACY_SWITCH_ANTENNA1 0
231#define IWL_LEGACY_SWITCH_ANTENNA2 1
232#define IWL_LEGACY_SWITCH_SISO 2
233#define IWL_LEGACY_SWITCH_MIMO2_AB 3
234#define IWL_LEGACY_SWITCH_MIMO2_AC 4
235#define IWL_LEGACY_SWITCH_MIMO2_BC 5
236
237/* possible actions when in siso mode */
238#define IWL_SISO_SWITCH_ANTENNA1 0
239#define IWL_SISO_SWITCH_ANTENNA2 1
240#define IWL_SISO_SWITCH_MIMO2_AB 2
241#define IWL_SISO_SWITCH_MIMO2_AC 3
242#define IWL_SISO_SWITCH_MIMO2_BC 4
243#define IWL_SISO_SWITCH_GI 5
244
245/* possible actions when in mimo mode */
246#define IWL_MIMO2_SWITCH_ANTENNA1 0
247#define IWL_MIMO2_SWITCH_ANTENNA2 1
248#define IWL_MIMO2_SWITCH_SISO_A 2
249#define IWL_MIMO2_SWITCH_SISO_B 3
250#define IWL_MIMO2_SWITCH_SISO_C 4
251#define IWL_MIMO2_SWITCH_GI 5
252
253#define IWL_MAX_SEARCH IWL_MIMO2_SWITCH_GI
254
255#define IWL_ACTION_LIMIT 3 /* # possible actions */
256
257#define LQ_SIZE 2 /* 2 mode tables: "Active" and "Search" */
258
259/* load per tid defines for A-MPDU activation */
260#define IWL_AGG_TPT_THREHOLD 0
261#define IWL_AGG_LOAD_THRESHOLD 10
262#define IWL_AGG_ALL_TID 0xff
263#define TID_QUEUE_CELL_SPACING 50 /*mS */
264#define TID_QUEUE_MAX_SIZE 20
265#define TID_ROUND_VALUE 5 /* mS */
266#define TID_MAX_LOAD_COUNT 8
267
268#define TID_MAX_TIME_DIFF ((TID_QUEUE_MAX_SIZE - 1) * TID_QUEUE_CELL_SPACING)
269#define TIME_WRAP_AROUND(x, y) (((y) > (x)) ? (y) - (x) : (0-(x)) + (y))
270
271extern const struct iwl_rate_info iwlegacy_rates[IWL_RATE_COUNT];
272
273enum iwl_table_type {
274 LQ_NONE,
275 LQ_G, /* legacy types */
276 LQ_A,
277 LQ_SISO, /* high-throughput types */
278 LQ_MIMO2,
279 LQ_MAX,
280};
281
282#define is_legacy(tbl) (((tbl) == LQ_G) || ((tbl) == LQ_A))
283#define is_siso(tbl) ((tbl) == LQ_SISO)
284#define is_mimo2(tbl) ((tbl) == LQ_MIMO2)
285#define is_mimo(tbl) (is_mimo2(tbl))
286#define is_Ht(tbl) (is_siso(tbl) || is_mimo(tbl))
287#define is_a_band(tbl) ((tbl) == LQ_A)
288#define is_g_and(tbl) ((tbl) == LQ_G)
289
290#define ANT_NONE 0x0
291#define ANT_A BIT(0)
292#define ANT_B BIT(1)
293#define ANT_AB (ANT_A | ANT_B)
294#define ANT_C BIT(2)
295#define ANT_AC (ANT_A | ANT_C)
296#define ANT_BC (ANT_B | ANT_C)
297#define ANT_ABC (ANT_AB | ANT_C)
298
299#define IWL_MAX_MCS_DISPLAY_SIZE 12
300
301struct iwl_rate_mcs_info {
302 char mbps[IWL_MAX_MCS_DISPLAY_SIZE];
303 char mcs[IWL_MAX_MCS_DISPLAY_SIZE];
304};
305
306/**
307 * struct iwl_rate_scale_data -- tx success history for one rate
308 */
309struct iwl_rate_scale_data {
310 u64 data; /* bitmap of successful frames */
311 s32 success_counter; /* number of frames successful */
312 s32 success_ratio; /* per-cent * 128 */
313 s32 counter; /* number of frames attempted */
314 s32 average_tpt; /* success ratio * expected throughput */
315 unsigned long stamp;
316};
317
318/**
319 * struct iwl_scale_tbl_info -- tx params and success history for all rates
320 *
321 * There are two of these in struct iwl_lq_sta,
322 * one for "active", and one for "search".
323 */
324struct iwl_scale_tbl_info {
325 enum iwl_table_type lq_type;
326 u8 ant_type;
327 u8 is_SGI; /* 1 = short guard interval */
328 u8 is_ht40; /* 1 = 40 MHz channel width */
329 u8 is_dup; /* 1 = duplicated data streams */
330 u8 action; /* change modulation; IWL_[LEGACY/SISO/MIMO]_SWITCH_* */
331 u8 max_search; /* maximun number of tables we can search */
332 s32 *expected_tpt; /* throughput metrics; expected_tpt_G, etc. */
333 u32 current_rate; /* rate_n_flags, uCode API format */
334 struct iwl_rate_scale_data win[IWL_RATE_COUNT]; /* rate histories */
335};
336
337struct iwl_traffic_load {
338 unsigned long time_stamp; /* age of the oldest statistics */
339 u32 packet_count[TID_QUEUE_MAX_SIZE]; /* packet count in this time
340 * slice */
341 u32 total; /* total num of packets during the
342 * last TID_MAX_TIME_DIFF */
343 u8 queue_count; /* number of queues that has
344 * been used since the last cleanup */
345 u8 head; /* start of the circular buffer */
346};
347
348/**
349 * struct iwl_lq_sta -- driver's rate scaling private structure
350 *
351 * Pointer to this gets passed back and forth between driver and mac80211.
352 */
353struct iwl_lq_sta {
354 u8 active_tbl; /* index of active table, range 0-1 */
355 u8 enable_counter; /* indicates HT mode */
356 u8 stay_in_tbl; /* 1: disallow, 0: allow search for new mode */
357 u8 search_better_tbl; /* 1: currently trying alternate mode */
358 s32 last_tpt;
359
360 /* The following determine when to search for a new mode */
361 u32 table_count_limit;
362 u32 max_failure_limit; /* # failed frames before new search */
363 u32 max_success_limit; /* # successful frames before new search */
364 u32 table_count;
365 u32 total_failed; /* total failed frames, any/all rates */
366 u32 total_success; /* total successful frames, any/all rates */
367 u64 flush_timer; /* time staying in mode before new search */
368
369 u8 action_counter; /* # mode-switch actions tried */
370 u8 is_green;
371 u8 is_dup;
372 enum ieee80211_band band;
373
374 /* The following are bitmaps of rates; IWL_RATE_6M_MASK, etc. */
375 u32 supp_rates;
376 u16 active_legacy_rate;
377 u16 active_siso_rate;
378 u16 active_mimo2_rate;
379 s8 max_rate_idx; /* Max rate set by user */
380 u8 missed_rate_counter;
381
382 struct iwl_link_quality_cmd lq;
383 struct iwl_scale_tbl_info lq_info[LQ_SIZE]; /* "active", "search" */
384 struct iwl_traffic_load load[TID_MAX_LOAD_COUNT];
385 u8 tx_agg_tid_en;
386#ifdef CONFIG_MAC80211_DEBUGFS
387 struct dentry *rs_sta_dbgfs_scale_table_file;
388 struct dentry *rs_sta_dbgfs_stats_table_file;
389 struct dentry *rs_sta_dbgfs_rate_scale_data_file;
390 struct dentry *rs_sta_dbgfs_tx_agg_tid_en_file;
391 u32 dbg_fixed_rate;
392#endif
393 struct iwl_priv *drv;
394
395 /* used to be in sta_info */
396 int last_txrate_idx;
397 /* last tx rate_n_flags */
398 u32 last_rate_n_flags;
399 /* packets destined for this STA are aggregated */
400 u8 is_agg;
401};
402
403static inline u8 iwl4965_num_of_ant(u8 mask)
404{
405 return !!((mask) & ANT_A) +
406 !!((mask) & ANT_B) +
407 !!((mask) & ANT_C);
408}
409
410static inline u8 iwl4965_first_antenna(u8 mask)
411{
412 if (mask & ANT_A)
413 return ANT_A;
414 if (mask & ANT_B)
415 return ANT_B;
416 return ANT_C;
417}
418
419
420/**
421 * iwl3945_rate_scale_init - Initialize the rate scale table based on assoc info
422 *
423 * The specific throughput table used is based on the type of network
424 * the associated with, including A, B, G, and G w/ TGG protection
425 */
426extern void iwl3945_rate_scale_init(struct ieee80211_hw *hw, s32 sta_id);
427
428/* Initialize station's rate scaling information after adding station */
429extern void iwl4965_rs_rate_init(struct iwl_priv *priv,
430 struct ieee80211_sta *sta, u8 sta_id);
431extern void iwl3945_rs_rate_init(struct iwl_priv *priv,
432 struct ieee80211_sta *sta, u8 sta_id);
433
434/**
435 * iwl_rate_control_register - Register the rate control algorithm callbacks
436 *
437 * Since the rate control algorithm is hardware specific, there is no need
438 * or reason to place it as a stand alone module. The driver can call
439 * iwl_rate_control_register in order to register the rate control callbacks
440 * with the mac80211 subsystem. This should be performed prior to calling
441 * ieee80211_register_hw
442 *
443 */
444extern int iwl4965_rate_control_register(void);
445extern int iwl3945_rate_control_register(void);
446
447/**
448 * iwl_rate_control_unregister - Unregister the rate control callbacks
449 *
450 * This should be called after calling ieee80211_unregister_hw, but before
451 * the driver is unloaded.
452 */
453extern void iwl4965_rate_control_unregister(void);
454extern void iwl3945_rate_control_unregister(void);
455
456#endif /* __iwl_legacy_rs__ */
diff --git a/drivers/net/wireless/iwlegacy/iwl-power.c b/drivers/net/wireless/iwlegacy/iwl-power.c
new file mode 100644
index 00000000000..903ef0d6d6c
--- /dev/null
+++ b/drivers/net/wireless/iwlegacy/iwl-power.c
@@ -0,0 +1,165 @@
1/******************************************************************************
2 *
3 * Copyright(c) 2007 - 2011 Intel Corporation. All rights reserved.
4 *
5 * Portions of this file are derived from the ipw3945 project, as well
6 * as portions of the ieee80211 subsystem header files.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of version 2 of the GNU General Public License as
10 * published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it will be useful, but WITHOUT
13 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 * more details.
16 *
17 * You should have received a copy of the GNU General Public License along with
18 * this program; if not, write to the Free Software Foundation, Inc.,
19 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
20 *
21 * The full GNU General Public License is included in this distribution in the
22 * file called LICENSE.
23 *
24 * Contact Information:
25 * Intel Linux Wireless <ilw@linux.intel.com>
26 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
27 *****************************************************************************/
28
29
30#include <linux/kernel.h>
31#include <linux/module.h>
32#include <linux/slab.h>
33#include <linux/init.h>
34
35#include <net/mac80211.h>
36
37#include "iwl-eeprom.h"
38#include "iwl-dev.h"
39#include "iwl-core.h"
40#include "iwl-io.h"
41#include "iwl-commands.h"
42#include "iwl-debug.h"
43#include "iwl-power.h"
44
45/*
46 * Setting power level allows the card to go to sleep when not busy.
47 *
48 * We calculate a sleep command based on the required latency, which
49 * we get from mac80211. In order to handle thermal throttling, we can
50 * also use pre-defined power levels.
51 */
52
53/*
54 * This defines the old power levels. They are still used by default
55 * (level 1) and for thermal throttle (levels 3 through 5)
56 */
57
58struct iwl_power_vec_entry {
59 struct iwl_powertable_cmd cmd;
60 u8 no_dtim; /* number of skip dtim */
61};
62
63static void iwl_legacy_power_sleep_cam_cmd(struct iwl_priv *priv,
64 struct iwl_powertable_cmd *cmd)
65{
66 memset(cmd, 0, sizeof(*cmd));
67
68 if (priv->power_data.pci_pm)
69 cmd->flags |= IWL_POWER_PCI_PM_MSK;
70
71 IWL_DEBUG_POWER(priv, "Sleep command for CAM\n");
72}
73
74static int
75iwl_legacy_set_power(struct iwl_priv *priv, struct iwl_powertable_cmd *cmd)
76{
77 IWL_DEBUG_POWER(priv, "Sending power/sleep command\n");
78 IWL_DEBUG_POWER(priv, "Flags value = 0x%08X\n", cmd->flags);
79 IWL_DEBUG_POWER(priv, "Tx timeout = %u\n",
80 le32_to_cpu(cmd->tx_data_timeout));
81 IWL_DEBUG_POWER(priv, "Rx timeout = %u\n",
82 le32_to_cpu(cmd->rx_data_timeout));
83 IWL_DEBUG_POWER(priv,
84 "Sleep interval vector = { %d , %d , %d , %d , %d }\n",
85 le32_to_cpu(cmd->sleep_interval[0]),
86 le32_to_cpu(cmd->sleep_interval[1]),
87 le32_to_cpu(cmd->sleep_interval[2]),
88 le32_to_cpu(cmd->sleep_interval[3]),
89 le32_to_cpu(cmd->sleep_interval[4]));
90
91 return iwl_legacy_send_cmd_pdu(priv, POWER_TABLE_CMD,
92 sizeof(struct iwl_powertable_cmd), cmd);
93}
94
95int
96iwl_legacy_power_set_mode(struct iwl_priv *priv, struct iwl_powertable_cmd *cmd,
97 bool force)
98{
99 int ret;
100 bool update_chains;
101
102 lockdep_assert_held(&priv->mutex);
103
104 /* Don't update the RX chain when chain noise calibration is running */
105 update_chains = priv->chain_noise_data.state == IWL_CHAIN_NOISE_DONE ||
106 priv->chain_noise_data.state == IWL_CHAIN_NOISE_ALIVE;
107
108 if (!memcmp(&priv->power_data.sleep_cmd, cmd, sizeof(*cmd)) && !force)
109 return 0;
110
111 if (!iwl_legacy_is_ready_rf(priv))
112 return -EIO;
113
114 /* scan complete use sleep_power_next, need to be updated */
115 memcpy(&priv->power_data.sleep_cmd_next, cmd, sizeof(*cmd));
116 if (test_bit(STATUS_SCANNING, &priv->status) && !force) {
117 IWL_DEBUG_INFO(priv, "Defer power set mode while scanning\n");
118 return 0;
119 }
120
121 if (cmd->flags & IWL_POWER_DRIVER_ALLOW_SLEEP_MSK)
122 set_bit(STATUS_POWER_PMI, &priv->status);
123
124 ret = iwl_legacy_set_power(priv, cmd);
125 if (!ret) {
126 if (!(cmd->flags & IWL_POWER_DRIVER_ALLOW_SLEEP_MSK))
127 clear_bit(STATUS_POWER_PMI, &priv->status);
128
129 if (priv->cfg->ops->lib->update_chain_flags && update_chains)
130 priv->cfg->ops->lib->update_chain_flags(priv);
131 else if (priv->cfg->ops->lib->update_chain_flags)
132 IWL_DEBUG_POWER(priv,
133 "Cannot update the power, chain noise "
134 "calibration running: %d\n",
135 priv->chain_noise_data.state);
136
137 memcpy(&priv->power_data.sleep_cmd, cmd, sizeof(*cmd));
138 } else
139 IWL_ERR(priv, "set power fail, ret = %d", ret);
140
141 return ret;
142}
143
144int iwl_legacy_power_update_mode(struct iwl_priv *priv, bool force)
145{
146 struct iwl_powertable_cmd cmd;
147
148 iwl_legacy_power_sleep_cam_cmd(priv, &cmd);
149 return iwl_legacy_power_set_mode(priv, &cmd, force);
150}
151EXPORT_SYMBOL(iwl_legacy_power_update_mode);
152
153/* initialize to default */
154void iwl_legacy_power_initialize(struct iwl_priv *priv)
155{
156 u16 lctl = iwl_legacy_pcie_link_ctl(priv);
157
158 priv->power_data.pci_pm = !(lctl & PCI_CFG_LINK_CTRL_VAL_L0S_EN);
159
160 priv->power_data.debug_sleep_level_override = -1;
161
162 memset(&priv->power_data.sleep_cmd, 0,
163 sizeof(priv->power_data.sleep_cmd));
164}
165EXPORT_SYMBOL(iwl_legacy_power_initialize);
diff --git a/drivers/net/wireless/iwlegacy/iwl-power.h b/drivers/net/wireless/iwlegacy/iwl-power.h
new file mode 100644
index 00000000000..d30b36acdc4
--- /dev/null
+++ b/drivers/net/wireless/iwlegacy/iwl-power.h
@@ -0,0 +1,55 @@
1/******************************************************************************
2 *
3 * Copyright(c) 2007 - 2011 Intel Corporation. All rights reserved.
4 *
5 * Portions of this file are derived from the ipw3945 project, as well
6 * as portions of the ieee80211 subsystem header files.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of version 2 of the GNU General Public License as
10 * published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it will be useful, but WITHOUT
13 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 * more details.
16 *
17 * You should have received a copy of the GNU General Public License along with
18 * this program; if not, write to the Free Software Foundation, Inc.,
19 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
20 *
21 * The full GNU General Public License is included in this distribution in the
22 * file called LICENSE.
23 *
24 * Contact Information:
25 * Intel Linux Wireless <ilw@linux.intel.com>
26 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
27 *****************************************************************************/
28#ifndef __iwl_legacy_power_setting_h__
29#define __iwl_legacy_power_setting_h__
30
31#include "iwl-commands.h"
32
33enum iwl_power_level {
34 IWL_POWER_INDEX_1,
35 IWL_POWER_INDEX_2,
36 IWL_POWER_INDEX_3,
37 IWL_POWER_INDEX_4,
38 IWL_POWER_INDEX_5,
39 IWL_POWER_NUM
40};
41
42struct iwl_power_mgr {
43 struct iwl_powertable_cmd sleep_cmd;
44 struct iwl_powertable_cmd sleep_cmd_next;
45 int debug_sleep_level_override;
46 bool pci_pm;
47};
48
49int
50iwl_legacy_power_set_mode(struct iwl_priv *priv, struct iwl_powertable_cmd *cmd,
51 bool force);
52int iwl_legacy_power_update_mode(struct iwl_priv *priv, bool force);
53void iwl_legacy_power_initialize(struct iwl_priv *priv);
54
55#endif /* __iwl_legacy_power_setting_h__ */
diff --git a/drivers/net/wireless/iwlegacy/iwl-prph.h b/drivers/net/wireless/iwlegacy/iwl-prph.h
new file mode 100644
index 00000000000..30a493003ab
--- /dev/null
+++ b/drivers/net/wireless/iwlegacy/iwl-prph.h
@@ -0,0 +1,523 @@
1/******************************************************************************
2 *
3 * This file is provided under a dual BSD/GPLv2 license. When using or
4 * redistributing this file, you may do so under either license.
5 *
6 * GPL LICENSE SUMMARY
7 *
8 * Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved.
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of version 2 of the GNU General Public License as
12 * published by the Free Software Foundation.
13 *
14 * This program is distributed in the hope that it will be useful, but
15 * WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
22 * USA
23 *
24 * The full GNU General Public License is included in this distribution
25 * in the file called LICENSE.GPL.
26 *
27 * Contact Information:
28 * Intel Linux Wireless <ilw@linux.intel.com>
29 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
30 *
31 * BSD LICENSE
32 *
33 * Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved.
34 * All rights reserved.
35 *
36 * Redistribution and use in source and binary forms, with or without
37 * modification, are permitted provided that the following conditions
38 * are met:
39 *
40 * * Redistributions of source code must retain the above copyright
41 * notice, this list of conditions and the following disclaimer.
42 * * Redistributions in binary form must reproduce the above copyright
43 * notice, this list of conditions and the following disclaimer in
44 * the documentation and/or other materials provided with the
45 * distribution.
46 * * Neither the name Intel Corporation nor the names of its
47 * contributors may be used to endorse or promote products derived
48 * from this software without specific prior written permission.
49 *
50 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
51 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
52 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
53 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
54 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
55 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
56 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
57 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
58 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
59 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
60 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
61 *****************************************************************************/
62
63#ifndef __iwl_legacy_prph_h__
64#define __iwl_legacy_prph_h__
65
66/*
67 * Registers in this file are internal, not PCI bus memory mapped.
68 * Driver accesses these via HBUS_TARG_PRPH_* registers.
69 */
70#define PRPH_BASE (0x00000)
71#define PRPH_END (0xFFFFF)
72
73/* APMG (power management) constants */
74#define APMG_BASE (PRPH_BASE + 0x3000)
75#define APMG_CLK_CTRL_REG (APMG_BASE + 0x0000)
76#define APMG_CLK_EN_REG (APMG_BASE + 0x0004)
77#define APMG_CLK_DIS_REG (APMG_BASE + 0x0008)
78#define APMG_PS_CTRL_REG (APMG_BASE + 0x000c)
79#define APMG_PCIDEV_STT_REG (APMG_BASE + 0x0010)
80#define APMG_RFKILL_REG (APMG_BASE + 0x0014)
81#define APMG_RTC_INT_STT_REG (APMG_BASE + 0x001c)
82#define APMG_RTC_INT_MSK_REG (APMG_BASE + 0x0020)
83#define APMG_DIGITAL_SVR_REG (APMG_BASE + 0x0058)
84#define APMG_ANALOG_SVR_REG (APMG_BASE + 0x006C)
85
86#define APMS_CLK_VAL_MRB_FUNC_MODE (0x00000001)
87#define APMG_CLK_VAL_DMA_CLK_RQT (0x00000200)
88#define APMG_CLK_VAL_BSM_CLK_RQT (0x00000800)
89
90#define APMG_PS_CTRL_EARLY_PWR_OFF_RESET_DIS (0x00400000)
91#define APMG_PS_CTRL_VAL_RESET_REQ (0x04000000)
92#define APMG_PS_CTRL_MSK_PWR_SRC (0x03000000)
93#define APMG_PS_CTRL_VAL_PWR_SRC_VMAIN (0x00000000)
94#define APMG_PS_CTRL_VAL_PWR_SRC_MAX (0x01000000) /* 3945 only */
95#define APMG_PS_CTRL_VAL_PWR_SRC_VAUX (0x02000000)
96#define APMG_SVR_VOLTAGE_CONFIG_BIT_MSK (0x000001E0) /* bit 8:5 */
97#define APMG_SVR_DIGITAL_VOLTAGE_1_32 (0x00000060)
98
99#define APMG_PCIDEV_STT_VAL_L1_ACT_DIS (0x00000800)
100
101/**
102 * BSM (Bootstrap State Machine)
103 *
104 * The Bootstrap State Machine (BSM) stores a short bootstrap uCode program
105 * in special SRAM that does not power down when the embedded control
106 * processor is sleeping (e.g. for periodic power-saving shutdowns of radio).
107 *
108 * When powering back up after sleeps (or during initial uCode load), the BSM
109 * internally loads the short bootstrap program from the special SRAM into the
110 * embedded processor's instruction SRAM, and starts the processor so it runs
111 * the bootstrap program.
112 *
113 * This bootstrap program loads (via PCI busmaster DMA) instructions and data
114 * images for a uCode program from host DRAM locations. The host driver
115 * indicates DRAM locations and sizes for instruction and data images via the
116 * four BSM_DRAM_* registers. Once the bootstrap program loads the new program,
117 * the new program starts automatically.
118 *
119 * The uCode used for open-source drivers includes two programs:
120 *
121 * 1) Initialization -- performs hardware calibration and sets up some
122 * internal data, then notifies host via "initialize alive" notification
123 * (struct iwl_init_alive_resp) that it has completed all of its work.
124 * After signal from host, it then loads and starts the runtime program.
125 * The initialization program must be used when initially setting up the
126 * NIC after loading the driver.
127 *
128 * 2) Runtime/Protocol -- performs all normal runtime operations. This
129 * notifies host via "alive" notification (struct iwl_alive_resp) that it
130 * is ready to be used.
131 *
132 * When initializing the NIC, the host driver does the following procedure:
133 *
134 * 1) Load bootstrap program (instructions only, no data image for bootstrap)
135 * into bootstrap memory. Use dword writes starting at BSM_SRAM_LOWER_BOUND
136 *
137 * 2) Point (via BSM_DRAM_*) to the "initialize" uCode data and instruction
138 * images in host DRAM.
139 *
140 * 3) Set up BSM to copy from BSM SRAM into uCode instruction SRAM when asked:
141 * BSM_WR_MEM_SRC_REG = 0
142 * BSM_WR_MEM_DST_REG = RTC_INST_LOWER_BOUND
143 * BSM_WR_MEM_DWCOUNT_REG = # dwords in bootstrap instruction image
144 *
145 * 4) Load bootstrap into instruction SRAM:
146 * BSM_WR_CTRL_REG = BSM_WR_CTRL_REG_BIT_START
147 *
148 * 5) Wait for load completion:
149 * Poll BSM_WR_CTRL_REG for BSM_WR_CTRL_REG_BIT_START = 0
150 *
151 * 6) Enable future boot loads whenever NIC's power management triggers it:
152 * BSM_WR_CTRL_REG = BSM_WR_CTRL_REG_BIT_START_EN
153 *
154 * 7) Start the NIC by removing all reset bits:
155 * CSR_RESET = 0
156 *
157 * The bootstrap uCode (already in instruction SRAM) loads initialization
158 * uCode. Initialization uCode performs data initialization, sends
159 * "initialize alive" notification to host, and waits for a signal from
160 * host to load runtime code.
161 *
162 * 4) Point (via BSM_DRAM_*) to the "runtime" uCode data and instruction
163 * images in host DRAM. The last register loaded must be the instruction
164 * byte count register ("1" in MSbit tells initialization uCode to load
165 * the runtime uCode):
166 * BSM_DRAM_INST_BYTECOUNT_REG = byte count | BSM_DRAM_INST_LOAD
167 *
168 * 5) Wait for "alive" notification, then issue normal runtime commands.
169 *
170 * Data caching during power-downs:
171 *
172 * Just before the embedded controller powers down (e.g for automatic
173 * power-saving modes, or for RFKILL), uCode stores (via PCI busmaster DMA)
174 * a current snapshot of the embedded processor's data SRAM into host DRAM.
175 * This caches the data while the embedded processor's memory is powered down.
176 * Location and size are controlled by BSM_DRAM_DATA_* registers.
177 *
178 * NOTE: Instruction SRAM does not need to be saved, since that doesn't
179 * change during operation; the original image (from uCode distribution
180 * file) can be used for reload.
181 *
182 * When powering back up, the BSM loads the bootstrap program. Bootstrap looks
183 * at the BSM_DRAM_* registers, which now point to the runtime instruction
184 * image and the cached (modified) runtime data (*not* the initialization
185 * uCode). Bootstrap reloads these runtime images into SRAM, and restarts the
186 * uCode from where it left off before the power-down.
187 *
188 * NOTE: Initialization uCode does *not* run as part of the save/restore
189 * procedure.
190 *
191 * This save/restore method is mostly for autonomous power management during
192 * normal operation (result of POWER_TABLE_CMD). Platform suspend/resume and
193 * RFKILL should use complete restarts (with total re-initialization) of uCode,
194 * allowing total shutdown (including BSM memory).
195 *
196 * Note that, during normal operation, the host DRAM that held the initial
197 * startup data for the runtime code is now being used as a backup data cache
198 * for modified data! If you need to completely re-initialize the NIC, make
199 * sure that you use the runtime data image from the uCode distribution file,
200 * not the modified/saved runtime data. You may want to store a separate
201 * "clean" runtime data image in DRAM to avoid disk reads of distribution file.
202 */
203
204/* BSM bit fields */
205#define BSM_WR_CTRL_REG_BIT_START (0x80000000) /* start boot load now */
206#define BSM_WR_CTRL_REG_BIT_START_EN (0x40000000) /* enable boot after pwrup*/
207#define BSM_DRAM_INST_LOAD (0x80000000) /* start program load now */
208
209/* BSM addresses */
210#define BSM_BASE (PRPH_BASE + 0x3400)
211#define BSM_END (PRPH_BASE + 0x3800)
212
213#define BSM_WR_CTRL_REG (BSM_BASE + 0x000) /* ctl and status */
214#define BSM_WR_MEM_SRC_REG (BSM_BASE + 0x004) /* source in BSM mem */
215#define BSM_WR_MEM_DST_REG (BSM_BASE + 0x008) /* dest in SRAM mem */
216#define BSM_WR_DWCOUNT_REG (BSM_BASE + 0x00C) /* bytes */
217#define BSM_WR_STATUS_REG (BSM_BASE + 0x010) /* bit 0: 1 == done */
218
219/*
220 * Pointers and size regs for bootstrap load and data SRAM save/restore.
221 * NOTE: 3945 pointers use bits 31:0 of DRAM address.
222 * 4965 pointers use bits 35:4 of DRAM address.
223 */
224#define BSM_DRAM_INST_PTR_REG (BSM_BASE + 0x090)
225#define BSM_DRAM_INST_BYTECOUNT_REG (BSM_BASE + 0x094)
226#define BSM_DRAM_DATA_PTR_REG (BSM_BASE + 0x098)
227#define BSM_DRAM_DATA_BYTECOUNT_REG (BSM_BASE + 0x09C)
228
229/*
230 * BSM special memory, stays powered on during power-save sleeps.
231 * Read/write, address range from LOWER_BOUND to (LOWER_BOUND + SIZE -1)
232 */
233#define BSM_SRAM_LOWER_BOUND (PRPH_BASE + 0x3800)
234#define BSM_SRAM_SIZE (1024) /* bytes */
235
236
237/* 3945 Tx scheduler registers */
238#define ALM_SCD_BASE (PRPH_BASE + 0x2E00)
239#define ALM_SCD_MODE_REG (ALM_SCD_BASE + 0x000)
240#define ALM_SCD_ARASTAT_REG (ALM_SCD_BASE + 0x004)
241#define ALM_SCD_TXFACT_REG (ALM_SCD_BASE + 0x010)
242#define ALM_SCD_TXF4MF_REG (ALM_SCD_BASE + 0x014)
243#define ALM_SCD_TXF5MF_REG (ALM_SCD_BASE + 0x020)
244#define ALM_SCD_SBYP_MODE_1_REG (ALM_SCD_BASE + 0x02C)
245#define ALM_SCD_SBYP_MODE_2_REG (ALM_SCD_BASE + 0x030)
246
247/**
248 * Tx Scheduler
249 *
250 * The Tx Scheduler selects the next frame to be transmitted, choosing TFDs
251 * (Transmit Frame Descriptors) from up to 16 circular Tx queues resident in
252 * host DRAM. It steers each frame's Tx command (which contains the frame
253 * data) into one of up to 7 prioritized Tx DMA FIFO channels within the
254 * device. A queue maps to only one (selectable by driver) Tx DMA channel,
255 * but one DMA channel may take input from several queues.
256 *
257 * Tx DMA FIFOs have dedicated purposes. For 4965, they are used as follows
258 * (cf. default_queue_to_tx_fifo in iwl-4965.c):
259 *
260 * 0 -- EDCA BK (background) frames, lowest priority
261 * 1 -- EDCA BE (best effort) frames, normal priority
262 * 2 -- EDCA VI (video) frames, higher priority
263 * 3 -- EDCA VO (voice) and management frames, highest priority
264 * 4 -- Commands (e.g. RXON, etc.)
265 * 5 -- unused (HCCA)
266 * 6 -- unused (HCCA)
267 * 7 -- not used by driver (device-internal only)
268 *
269 *
270 * Driver should normally map queues 0-6 to Tx DMA/FIFO channels 0-6.
271 * In addition, driver can map the remaining queues to Tx DMA/FIFO
272 * channels 0-3 to support 11n aggregation via EDCA DMA channels.
273 *
274 * The driver sets up each queue to work in one of two modes:
275 *
276 * 1) Scheduler-Ack, in which the scheduler automatically supports a
277 * block-ack (BA) window of up to 64 TFDs. In this mode, each queue
278 * contains TFDs for a unique combination of Recipient Address (RA)
279 * and Traffic Identifier (TID), that is, traffic of a given
280 * Quality-Of-Service (QOS) priority, destined for a single station.
281 *
282 * In scheduler-ack mode, the scheduler keeps track of the Tx status of
283 * each frame within the BA window, including whether it's been transmitted,
284 * and whether it's been acknowledged by the receiving station. The device
285 * automatically processes block-acks received from the receiving STA,
286 * and reschedules un-acked frames to be retransmitted (successful
287 * Tx completion may end up being out-of-order).
288 *
289 * The driver must maintain the queue's Byte Count table in host DRAM
290 * (struct iwl4965_sched_queue_byte_cnt_tbl) for this mode.
291 * This mode does not support fragmentation.
292 *
293 * 2) FIFO (a.k.a. non-Scheduler-ACK), in which each TFD is processed in order.
294 * The device may automatically retry Tx, but will retry only one frame
295 * at a time, until receiving ACK from receiving station, or reaching
296 * retry limit and giving up.
297 *
298 * The command queue (#4/#9) must use this mode!
299 * This mode does not require use of the Byte Count table in host DRAM.
300 *
301 * Driver controls scheduler operation via 3 means:
302 * 1) Scheduler registers
303 * 2) Shared scheduler data base in internal 4956 SRAM
304 * 3) Shared data in host DRAM
305 *
306 * Initialization:
307 *
308 * When loading, driver should allocate memory for:
309 * 1) 16 TFD circular buffers, each with space for (typically) 256 TFDs.
310 * 2) 16 Byte Count circular buffers in 16 KBytes contiguous memory
311 * (1024 bytes for each queue).
312 *
313 * After receiving "Alive" response from uCode, driver must initialize
314 * the scheduler (especially for queue #4/#9, the command queue, otherwise
315 * the driver can't issue commands!):
316 */
317
318/**
319 * Max Tx window size is the max number of contiguous TFDs that the scheduler
320 * can keep track of at one time when creating block-ack chains of frames.
321 * Note that "64" matches the number of ack bits in a block-ack packet.
322 * Driver should use SCD_WIN_SIZE and SCD_FRAME_LIMIT values to initialize
323 * IWL49_SCD_CONTEXT_QUEUE_OFFSET(x) values.
324 */
325#define SCD_WIN_SIZE 64
326#define SCD_FRAME_LIMIT 64
327
328/* SCD registers are internal, must be accessed via HBUS_TARG_PRPH regs */
329#define IWL49_SCD_START_OFFSET 0xa02c00
330
331/*
332 * 4965 tells driver SRAM address for internal scheduler structs via this reg.
333 * Value is valid only after "Alive" response from uCode.
334 */
335#define IWL49_SCD_SRAM_BASE_ADDR (IWL49_SCD_START_OFFSET + 0x0)
336
337/*
338 * Driver may need to update queue-empty bits after changing queue's
339 * write and read pointers (indexes) during (re-)initialization (i.e. when
340 * scheduler is not tracking what's happening).
341 * Bit fields:
342 * 31-16: Write mask -- 1: update empty bit, 0: don't change empty bit
343 * 15-00: Empty state, one for each queue -- 1: empty, 0: non-empty
344 * NOTE: This register is not used by Linux driver.
345 */
346#define IWL49_SCD_EMPTY_BITS (IWL49_SCD_START_OFFSET + 0x4)
347
348/*
349 * Physical base address of array of byte count (BC) circular buffers (CBs).
350 * Each Tx queue has a BC CB in host DRAM to support Scheduler-ACK mode.
351 * This register points to BC CB for queue 0, must be on 1024-byte boundary.
352 * Others are spaced by 1024 bytes.
353 * Each BC CB is 2 bytes * (256 + 64) = 740 bytes, followed by 384 bytes pad.
354 * (Index into a queue's BC CB) = (index into queue's TFD CB) = (SSN & 0xff).
355 * Bit fields:
356 * 25-00: Byte Count CB physical address [35:10], must be 1024-byte aligned.
357 */
358#define IWL49_SCD_DRAM_BASE_ADDR (IWL49_SCD_START_OFFSET + 0x10)
359
360/*
361 * Enables any/all Tx DMA/FIFO channels.
362 * Scheduler generates requests for only the active channels.
363 * Set this to 0xff to enable all 8 channels (normal usage).
364 * Bit fields:
365 * 7- 0: Enable (1), disable (0), one bit for each channel 0-7
366 */
367#define IWL49_SCD_TXFACT (IWL49_SCD_START_OFFSET + 0x1c)
368/*
369 * Queue (x) Write Pointers (indexes, really!), one for each Tx queue.
370 * Initialized and updated by driver as new TFDs are added to queue.
371 * NOTE: If using Block Ack, index must correspond to frame's
372 * Start Sequence Number; index = (SSN & 0xff)
373 * NOTE: Alternative to HBUS_TARG_WRPTR, which is what Linux driver uses?
374 */
375#define IWL49_SCD_QUEUE_WRPTR(x) (IWL49_SCD_START_OFFSET + 0x24 + (x) * 4)
376
377/*
378 * Queue (x) Read Pointers (indexes, really!), one for each Tx queue.
379 * For FIFO mode, index indicates next frame to transmit.
380 * For Scheduler-ACK mode, index indicates first frame in Tx window.
381 * Initialized by driver, updated by scheduler.
382 */
383#define IWL49_SCD_QUEUE_RDPTR(x) (IWL49_SCD_START_OFFSET + 0x64 + (x) * 4)
384
385/*
386 * Select which queues work in chain mode (1) vs. not (0).
387 * Use chain mode to build chains of aggregated frames.
388 * Bit fields:
389 * 31-16: Reserved
390 * 15-00: Mode, one bit for each queue -- 1: Chain mode, 0: one-at-a-time
391 * NOTE: If driver sets up queue for chain mode, it should be also set up
392 * Scheduler-ACK mode as well, via SCD_QUEUE_STATUS_BITS(x).
393 */
394#define IWL49_SCD_QUEUECHAIN_SEL (IWL49_SCD_START_OFFSET + 0xd0)
395
396/*
397 * Select which queues interrupt driver when scheduler increments
398 * a queue's read pointer (index).
399 * Bit fields:
400 * 31-16: Reserved
401 * 15-00: Interrupt enable, one bit for each queue -- 1: enabled, 0: disabled
402 * NOTE: This functionality is apparently a no-op; driver relies on interrupts
403 * from Rx queue to read Tx command responses and update Tx queues.
404 */
405#define IWL49_SCD_INTERRUPT_MASK (IWL49_SCD_START_OFFSET + 0xe4)
406
407/*
408 * Queue search status registers. One for each queue.
409 * Sets up queue mode and assigns queue to Tx DMA channel.
410 * Bit fields:
411 * 19-10: Write mask/enable bits for bits 0-9
412 * 9: Driver should init to "0"
413 * 8: Scheduler-ACK mode (1), non-Scheduler-ACK (i.e. FIFO) mode (0).
414 * Driver should init to "1" for aggregation mode, or "0" otherwise.
415 * 7-6: Driver should init to "0"
416 * 5: Window Size Left; indicates whether scheduler can request
417 * another TFD, based on window size, etc. Driver should init
418 * this bit to "1" for aggregation mode, or "0" for non-agg.
419 * 4-1: Tx FIFO to use (range 0-7).
420 * 0: Queue is active (1), not active (0).
421 * Other bits should be written as "0"
422 *
423 * NOTE: If enabling Scheduler-ACK mode, chain mode should also be enabled
424 * via SCD_QUEUECHAIN_SEL.
425 */
426#define IWL49_SCD_QUEUE_STATUS_BITS(x)\
427 (IWL49_SCD_START_OFFSET + 0x104 + (x) * 4)
428
429/* Bit field positions */
430#define IWL49_SCD_QUEUE_STTS_REG_POS_ACTIVE (0)
431#define IWL49_SCD_QUEUE_STTS_REG_POS_TXF (1)
432#define IWL49_SCD_QUEUE_STTS_REG_POS_WSL (5)
433#define IWL49_SCD_QUEUE_STTS_REG_POS_SCD_ACK (8)
434
435/* Write masks */
436#define IWL49_SCD_QUEUE_STTS_REG_POS_SCD_ACT_EN (10)
437#define IWL49_SCD_QUEUE_STTS_REG_MSK (0x0007FC00)
438
439/**
440 * 4965 internal SRAM structures for scheduler, shared with driver ...
441 *
442 * Driver should clear and initialize the following areas after receiving
443 * "Alive" response from 4965 uCode, i.e. after initial
444 * uCode load, or after a uCode load done for error recovery:
445 *
446 * SCD_CONTEXT_DATA_OFFSET (size 128 bytes)
447 * SCD_TX_STTS_BITMAP_OFFSET (size 256 bytes)
448 * SCD_TRANSLATE_TBL_OFFSET (size 32 bytes)
449 *
450 * Driver accesses SRAM via HBUS_TARG_MEM_* registers.
451 * Driver reads base address of this scheduler area from SCD_SRAM_BASE_ADDR.
452 * All OFFSET values must be added to this base address.
453 */
454
455/*
456 * Queue context. One 8-byte entry for each of 16 queues.
457 *
458 * Driver should clear this entire area (size 0x80) to 0 after receiving
459 * "Alive" notification from uCode. Additionally, driver should init
460 * each queue's entry as follows:
461 *
462 * LS Dword bit fields:
463 * 0-06: Max Tx window size for Scheduler-ACK. Driver should init to 64.
464 *
465 * MS Dword bit fields:
466 * 16-22: Frame limit. Driver should init to 10 (0xa).
467 *
468 * Driver should init all other bits to 0.
469 *
470 * Init must be done after driver receives "Alive" response from 4965 uCode,
471 * and when setting up queue for aggregation.
472 */
473#define IWL49_SCD_CONTEXT_DATA_OFFSET 0x380
474#define IWL49_SCD_CONTEXT_QUEUE_OFFSET(x) \
475 (IWL49_SCD_CONTEXT_DATA_OFFSET + ((x) * 8))
476
477#define IWL49_SCD_QUEUE_CTX_REG1_WIN_SIZE_POS (0)
478#define IWL49_SCD_QUEUE_CTX_REG1_WIN_SIZE_MSK (0x0000007F)
479#define IWL49_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS (16)
480#define IWL49_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK (0x007F0000)
481
482/*
483 * Tx Status Bitmap
484 *
485 * Driver should clear this entire area (size 0x100) to 0 after receiving
486 * "Alive" notification from uCode. Area is used only by device itself;
487 * no other support (besides clearing) is required from driver.
488 */
489#define IWL49_SCD_TX_STTS_BITMAP_OFFSET 0x400
490
491/*
492 * RAxTID to queue translation mapping.
493 *
494 * When queue is in Scheduler-ACK mode, frames placed in a that queue must be
495 * for only one combination of receiver address (RA) and traffic ID (TID), i.e.
496 * one QOS priority level destined for one station (for this wireless link,
497 * not final destination). The SCD_TRANSLATE_TABLE area provides 16 16-bit
498 * mappings, one for each of the 16 queues. If queue is not in Scheduler-ACK
499 * mode, the device ignores the mapping value.
500 *
501 * Bit fields, for each 16-bit map:
502 * 15-9: Reserved, set to 0
503 * 8-4: Index into device's station table for recipient station
504 * 3-0: Traffic ID (tid), range 0-15
505 *
506 * Driver should clear this entire area (size 32 bytes) to 0 after receiving
507 * "Alive" notification from uCode. To update a 16-bit map value, driver
508 * must read a dword-aligned value from device SRAM, replace the 16-bit map
509 * value of interest, and write the dword value back into device SRAM.
510 */
511#define IWL49_SCD_TRANSLATE_TBL_OFFSET 0x500
512
513/* Find translation table dword to read/write for given queue */
514#define IWL49_SCD_TRANSLATE_TBL_OFFSET_QUEUE(x) \
515 ((IWL49_SCD_TRANSLATE_TBL_OFFSET + ((x) * 2)) & 0xfffffffc)
516
517#define IWL_SCD_TXFIFO_POS_TID (0)
518#define IWL_SCD_TXFIFO_POS_RA (4)
519#define IWL_SCD_QUEUE_RA_TID_MAP_RATID_MSK (0x01FF)
520
521/*********************** END TX SCHEDULER *************************************/
522
523#endif /* __iwl_legacy_prph_h__ */
diff --git a/drivers/net/wireless/iwlegacy/iwl-rx.c b/drivers/net/wireless/iwlegacy/iwl-rx.c
new file mode 100644
index 00000000000..9b5d0abe8be
--- /dev/null
+++ b/drivers/net/wireless/iwlegacy/iwl-rx.c
@@ -0,0 +1,281 @@
1/******************************************************************************
2 *
3 * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved.
4 *
5 * Portions of this file are derived from the ipw3945 project, as well
6 * as portions of the ieee80211 subsystem header files.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of version 2 of the GNU General Public License as
10 * published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it will be useful, but WITHOUT
13 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 * more details.
16 *
17 * You should have received a copy of the GNU General Public License along with
18 * this program; if not, write to the Free Software Foundation, Inc.,
19 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
20 *
21 * The full GNU General Public License is included in this distribution in the
22 * file called LICENSE.
23 *
24 * Contact Information:
25 * Intel Linux Wireless <ilw@linux.intel.com>
26 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
27 *
28 *****************************************************************************/
29
30#include <linux/etherdevice.h>
31#include <linux/slab.h>
32#include <net/mac80211.h>
33#include <asm/unaligned.h>
34#include "iwl-eeprom.h"
35#include "iwl-dev.h"
36#include "iwl-core.h"
37#include "iwl-sta.h"
38#include "iwl-io.h"
39#include "iwl-helpers.h"
40/************************** RX-FUNCTIONS ****************************/
41/*
42 * Rx theory of operation
43 *
44 * Driver allocates a circular buffer of Receive Buffer Descriptors (RBDs),
45 * each of which point to Receive Buffers to be filled by the NIC. These get
46 * used not only for Rx frames, but for any command response or notification
47 * from the NIC. The driver and NIC manage the Rx buffers by means
48 * of indexes into the circular buffer.
49 *
50 * Rx Queue Indexes
51 * The host/firmware share two index registers for managing the Rx buffers.
52 *
53 * The READ index maps to the first position that the firmware may be writing
54 * to -- the driver can read up to (but not including) this position and get
55 * good data.
56 * The READ index is managed by the firmware once the card is enabled.
57 *
58 * The WRITE index maps to the last position the driver has read from -- the
59 * position preceding WRITE is the last slot the firmware can place a packet.
60 *
61 * The queue is empty (no good data) if WRITE = READ - 1, and is full if
62 * WRITE = READ.
63 *
64 * During initialization, the host sets up the READ queue position to the first
65 * INDEX position, and WRITE to the last (READ - 1 wrapped)
66 *
67 * When the firmware places a packet in a buffer, it will advance the READ index
68 * and fire the RX interrupt. The driver can then query the READ index and
69 * process as many packets as possible, moving the WRITE index forward as it
70 * resets the Rx queue buffers with new memory.
71 *
72 * The management in the driver is as follows:
73 * + A list of pre-allocated SKBs is stored in iwl->rxq->rx_free. When
74 * iwl->rxq->free_count drops to or below RX_LOW_WATERMARK, work is scheduled
75 * to replenish the iwl->rxq->rx_free.
76 * + In iwl_rx_replenish (scheduled) if 'processed' != 'read' then the
77 * iwl->rxq is replenished and the READ INDEX is updated (updating the
78 * 'processed' and 'read' driver indexes as well)
79 * + A received packet is processed and handed to the kernel network stack,
80 * detached from the iwl->rxq. The driver 'processed' index is updated.
81 * + The Host/Firmware iwl->rxq is replenished at tasklet time from the rx_free
82 * list. If there are no allocated buffers in iwl->rxq->rx_free, the READ
83 * INDEX is not incremented and iwl->status(RX_STALLED) is set. If there
84 * were enough free buffers and RX_STALLED is set it is cleared.
85 *
86 *
87 * Driver sequence:
88 *
89 * iwl_legacy_rx_queue_alloc() Allocates rx_free
90 * iwl_rx_replenish() Replenishes rx_free list from rx_used, and calls
91 * iwl_rx_queue_restock
92 * iwl_rx_queue_restock() Moves available buffers from rx_free into Rx
93 * queue, updates firmware pointers, and updates
94 * the WRITE index. If insufficient rx_free buffers
95 * are available, schedules iwl_rx_replenish
96 *
97 * -- enable interrupts --
98 * ISR - iwl_rx() Detach iwl_rx_mem_buffers from pool up to the
99 * READ INDEX, detaching the SKB from the pool.
100 * Moves the packet buffer from queue to rx_used.
101 * Calls iwl_rx_queue_restock to refill any empty
102 * slots.
103 * ...
104 *
105 */
106
107/**
108 * iwl_legacy_rx_queue_space - Return number of free slots available in queue.
109 */
110int iwl_legacy_rx_queue_space(const struct iwl_rx_queue *q)
111{
112 int s = q->read - q->write;
113 if (s <= 0)
114 s += RX_QUEUE_SIZE;
115 /* keep some buffer to not confuse full and empty queue */
116 s -= 2;
117 if (s < 0)
118 s = 0;
119 return s;
120}
121EXPORT_SYMBOL(iwl_legacy_rx_queue_space);
122
123/**
124 * iwl_legacy_rx_queue_update_write_ptr - Update the write pointer for the RX queue
125 */
126void
127iwl_legacy_rx_queue_update_write_ptr(struct iwl_priv *priv,
128 struct iwl_rx_queue *q)
129{
130 unsigned long flags;
131 u32 rx_wrt_ptr_reg = priv->hw_params.rx_wrt_ptr_reg;
132 u32 reg;
133
134 spin_lock_irqsave(&q->lock, flags);
135
136 if (q->need_update == 0)
137 goto exit_unlock;
138
139 /* If power-saving is in use, make sure device is awake */
140 if (test_bit(STATUS_POWER_PMI, &priv->status)) {
141 reg = iwl_read32(priv, CSR_UCODE_DRV_GP1);
142
143 if (reg & CSR_UCODE_DRV_GP1_BIT_MAC_SLEEP) {
144 IWL_DEBUG_INFO(priv,
145 "Rx queue requesting wakeup,"
146 " GP1 = 0x%x\n", reg);
147 iwl_legacy_set_bit(priv, CSR_GP_CNTRL,
148 CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
149 goto exit_unlock;
150 }
151
152 q->write_actual = (q->write & ~0x7);
153 iwl_legacy_write_direct32(priv, rx_wrt_ptr_reg,
154 q->write_actual);
155
156 /* Else device is assumed to be awake */
157 } else {
158 /* Device expects a multiple of 8 */
159 q->write_actual = (q->write & ~0x7);
160 iwl_legacy_write_direct32(priv, rx_wrt_ptr_reg,
161 q->write_actual);
162 }
163
164 q->need_update = 0;
165
166 exit_unlock:
167 spin_unlock_irqrestore(&q->lock, flags);
168}
169EXPORT_SYMBOL(iwl_legacy_rx_queue_update_write_ptr);
170
171int iwl_legacy_rx_queue_alloc(struct iwl_priv *priv)
172{
173 struct iwl_rx_queue *rxq = &priv->rxq;
174 struct device *dev = &priv->pci_dev->dev;
175 int i;
176
177 spin_lock_init(&rxq->lock);
178 INIT_LIST_HEAD(&rxq->rx_free);
179 INIT_LIST_HEAD(&rxq->rx_used);
180
181 /* Alloc the circular buffer of Read Buffer Descriptors (RBDs) */
182 rxq->bd = dma_alloc_coherent(dev, 4 * RX_QUEUE_SIZE, &rxq->bd_dma,
183 GFP_KERNEL);
184 if (!rxq->bd)
185 goto err_bd;
186
187 rxq->rb_stts = dma_alloc_coherent(dev, sizeof(struct iwl_rb_status),
188 &rxq->rb_stts_dma, GFP_KERNEL);
189 if (!rxq->rb_stts)
190 goto err_rb;
191
192 /* Fill the rx_used queue with _all_ of the Rx buffers */
193 for (i = 0; i < RX_FREE_BUFFERS + RX_QUEUE_SIZE; i++)
194 list_add_tail(&rxq->pool[i].list, &rxq->rx_used);
195
196 /* Set us so that we have processed and used all buffers, but have
197 * not restocked the Rx queue with fresh buffers */
198 rxq->read = rxq->write = 0;
199 rxq->write_actual = 0;
200 rxq->free_count = 0;
201 rxq->need_update = 0;
202 return 0;
203
204err_rb:
205 dma_free_coherent(&priv->pci_dev->dev, 4 * RX_QUEUE_SIZE, rxq->bd,
206 rxq->bd_dma);
207err_bd:
208 return -ENOMEM;
209}
210EXPORT_SYMBOL(iwl_legacy_rx_queue_alloc);
211
212
213void iwl_legacy_rx_spectrum_measure_notif(struct iwl_priv *priv,
214 struct iwl_rx_mem_buffer *rxb)
215{
216 struct iwl_rx_packet *pkt = rxb_addr(rxb);
217 struct iwl_spectrum_notification *report = &(pkt->u.spectrum_notif);
218
219 if (!report->state) {
220 IWL_DEBUG_11H(priv,
221 "Spectrum Measure Notification: Start\n");
222 return;
223 }
224
225 memcpy(&priv->measure_report, report, sizeof(*report));
226 priv->measurement_status |= MEASUREMENT_READY;
227}
228EXPORT_SYMBOL(iwl_legacy_rx_spectrum_measure_notif);
229
230/*
231 * returns non-zero if packet should be dropped
232 */
233int iwl_legacy_set_decrypted_flag(struct iwl_priv *priv,
234 struct ieee80211_hdr *hdr,
235 u32 decrypt_res,
236 struct ieee80211_rx_status *stats)
237{
238 u16 fc = le16_to_cpu(hdr->frame_control);
239
240 /*
241 * All contexts have the same setting here due to it being
242 * a module parameter, so OK to check any context.
243 */
244 if (priv->contexts[IWL_RXON_CTX_BSS].active.filter_flags &
245 RXON_FILTER_DIS_DECRYPT_MSK)
246 return 0;
247
248 if (!(fc & IEEE80211_FCTL_PROTECTED))
249 return 0;
250
251 IWL_DEBUG_RX(priv, "decrypt_res:0x%x\n", decrypt_res);
252 switch (decrypt_res & RX_RES_STATUS_SEC_TYPE_MSK) {
253 case RX_RES_STATUS_SEC_TYPE_TKIP:
254 /* The uCode has got a bad phase 1 Key, pushes the packet.
255 * Decryption will be done in SW. */
256 if ((decrypt_res & RX_RES_STATUS_DECRYPT_TYPE_MSK) ==
257 RX_RES_STATUS_BAD_KEY_TTAK)
258 break;
259
260 case RX_RES_STATUS_SEC_TYPE_WEP:
261 if ((decrypt_res & RX_RES_STATUS_DECRYPT_TYPE_MSK) ==
262 RX_RES_STATUS_BAD_ICV_MIC) {
263 /* bad ICV, the packet is destroyed since the
264 * decryption is inplace, drop it */
265 IWL_DEBUG_RX(priv, "Packet destroyed\n");
266 return -1;
267 }
268 case RX_RES_STATUS_SEC_TYPE_CCMP:
269 if ((decrypt_res & RX_RES_STATUS_DECRYPT_TYPE_MSK) ==
270 RX_RES_STATUS_DECRYPT_OK) {
271 IWL_DEBUG_RX(priv, "hw decrypt successfully!!!\n");
272 stats->flag |= RX_FLAG_DECRYPTED;
273 }
274 break;
275
276 default:
277 break;
278 }
279 return 0;
280}
281EXPORT_SYMBOL(iwl_legacy_set_decrypted_flag);
diff --git a/drivers/net/wireless/iwlegacy/iwl-scan.c b/drivers/net/wireless/iwlegacy/iwl-scan.c
new file mode 100644
index 00000000000..a6b5222fc59
--- /dev/null
+++ b/drivers/net/wireless/iwlegacy/iwl-scan.c
@@ -0,0 +1,549 @@
1/******************************************************************************
2 *
3 * GPL LICENSE SUMMARY
4 *
5 * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of version 2 of the GNU General Public License as
9 * published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful, but
12 * WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
19 * USA
20 *
21 * The full GNU General Public License is included in this distribution
22 * in the file called LICENSE.GPL.
23 *
24 * Contact Information:
25 * Intel Linux Wireless <ilw@linux.intel.com>
26 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
27 *****************************************************************************/
28#include <linux/slab.h>
29#include <linux/types.h>
30#include <linux/etherdevice.h>
31#include <net/mac80211.h>
32
33#include "iwl-eeprom.h"
34#include "iwl-dev.h"
35#include "iwl-core.h"
36#include "iwl-sta.h"
37#include "iwl-io.h"
38#include "iwl-helpers.h"
39
40/* For active scan, listen ACTIVE_DWELL_TIME (msec) on each channel after
41 * sending probe req. This should be set long enough to hear probe responses
42 * from more than one AP. */
43#define IWL_ACTIVE_DWELL_TIME_24 (30) /* all times in msec */
44#define IWL_ACTIVE_DWELL_TIME_52 (20)
45
46#define IWL_ACTIVE_DWELL_FACTOR_24GHZ (3)
47#define IWL_ACTIVE_DWELL_FACTOR_52GHZ (2)
48
49/* For passive scan, listen PASSIVE_DWELL_TIME (msec) on each channel.
50 * Must be set longer than active dwell time.
51 * For the most reliable scan, set > AP beacon interval (typically 100msec). */
52#define IWL_PASSIVE_DWELL_TIME_24 (20) /* all times in msec */
53#define IWL_PASSIVE_DWELL_TIME_52 (10)
54#define IWL_PASSIVE_DWELL_BASE (100)
55#define IWL_CHANNEL_TUNE_TIME 5
56
57static int iwl_legacy_send_scan_abort(struct iwl_priv *priv)
58{
59 int ret;
60 struct iwl_rx_packet *pkt;
61 struct iwl_host_cmd cmd = {
62 .id = REPLY_SCAN_ABORT_CMD,
63 .flags = CMD_WANT_SKB,
64 };
65
66 /* Exit instantly with error when device is not ready
67 * to receive scan abort command or it does not perform
68 * hardware scan currently */
69 if (!test_bit(STATUS_READY, &priv->status) ||
70 !test_bit(STATUS_GEO_CONFIGURED, &priv->status) ||
71 !test_bit(STATUS_SCAN_HW, &priv->status) ||
72 test_bit(STATUS_FW_ERROR, &priv->status) ||
73 test_bit(STATUS_EXIT_PENDING, &priv->status))
74 return -EIO;
75
76 ret = iwl_legacy_send_cmd_sync(priv, &cmd);
77 if (ret)
78 return ret;
79
80 pkt = (struct iwl_rx_packet *)cmd.reply_page;
81 if (pkt->u.status != CAN_ABORT_STATUS) {
82 /* The scan abort will return 1 for success or
83 * 2 for "failure". A failure condition can be
84 * due to simply not being in an active scan which
85 * can occur if we send the scan abort before we
86 * the microcode has notified us that a scan is
87 * completed. */
88 IWL_DEBUG_SCAN(priv, "SCAN_ABORT ret %d.\n", pkt->u.status);
89 ret = -EIO;
90 }
91
92 iwl_legacy_free_pages(priv, cmd.reply_page);
93 return ret;
94}
95
96static void iwl_legacy_complete_scan(struct iwl_priv *priv, bool aborted)
97{
98 /* check if scan was requested from mac80211 */
99 if (priv->scan_request) {
100 IWL_DEBUG_SCAN(priv, "Complete scan in mac80211\n");
101 ieee80211_scan_completed(priv->hw, aborted);
102 }
103
104 priv->scan_vif = NULL;
105 priv->scan_request = NULL;
106}
107
108void iwl_legacy_force_scan_end(struct iwl_priv *priv)
109{
110 lockdep_assert_held(&priv->mutex);
111
112 if (!test_bit(STATUS_SCANNING, &priv->status)) {
113 IWL_DEBUG_SCAN(priv, "Forcing scan end while not scanning\n");
114 return;
115 }
116
117 IWL_DEBUG_SCAN(priv, "Forcing scan end\n");
118 clear_bit(STATUS_SCANNING, &priv->status);
119 clear_bit(STATUS_SCAN_HW, &priv->status);
120 clear_bit(STATUS_SCAN_ABORTING, &priv->status);
121 iwl_legacy_complete_scan(priv, true);
122}
123
124static void iwl_legacy_do_scan_abort(struct iwl_priv *priv)
125{
126 int ret;
127
128 lockdep_assert_held(&priv->mutex);
129
130 if (!test_bit(STATUS_SCANNING, &priv->status)) {
131 IWL_DEBUG_SCAN(priv, "Not performing scan to abort\n");
132 return;
133 }
134
135 if (test_and_set_bit(STATUS_SCAN_ABORTING, &priv->status)) {
136 IWL_DEBUG_SCAN(priv, "Scan abort in progress\n");
137 return;
138 }
139
140 ret = iwl_legacy_send_scan_abort(priv);
141 if (ret) {
142 IWL_DEBUG_SCAN(priv, "Send scan abort failed %d\n", ret);
143 iwl_legacy_force_scan_end(priv);
144 } else
145 IWL_DEBUG_SCAN(priv, "Successfully send scan abort\n");
146}
147
148/**
149 * iwl_scan_cancel - Cancel any currently executing HW scan
150 */
151int iwl_legacy_scan_cancel(struct iwl_priv *priv)
152{
153 IWL_DEBUG_SCAN(priv, "Queuing abort scan\n");
154 queue_work(priv->workqueue, &priv->abort_scan);
155 return 0;
156}
157EXPORT_SYMBOL(iwl_legacy_scan_cancel);
158
159/**
160 * iwl_legacy_scan_cancel_timeout - Cancel any currently executing HW scan
161 * @ms: amount of time to wait (in milliseconds) for scan to abort
162 *
163 */
164int iwl_legacy_scan_cancel_timeout(struct iwl_priv *priv, unsigned long ms)
165{
166 unsigned long timeout = jiffies + msecs_to_jiffies(ms);
167
168 lockdep_assert_held(&priv->mutex);
169
170 IWL_DEBUG_SCAN(priv, "Scan cancel timeout\n");
171
172 iwl_legacy_do_scan_abort(priv);
173
174 while (time_before_eq(jiffies, timeout)) {
175 if (!test_bit(STATUS_SCAN_HW, &priv->status))
176 break;
177 msleep(20);
178 }
179
180 return test_bit(STATUS_SCAN_HW, &priv->status);
181}
182EXPORT_SYMBOL(iwl_legacy_scan_cancel_timeout);
183
184/* Service response to REPLY_SCAN_CMD (0x80) */
185static void iwl_legacy_rx_reply_scan(struct iwl_priv *priv,
186 struct iwl_rx_mem_buffer *rxb)
187{
188#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
189 struct iwl_rx_packet *pkt = rxb_addr(rxb);
190 struct iwl_scanreq_notification *notif =
191 (struct iwl_scanreq_notification *)pkt->u.raw;
192
193 IWL_DEBUG_SCAN(priv, "Scan request status = 0x%x\n", notif->status);
194#endif
195}
196
197/* Service SCAN_START_NOTIFICATION (0x82) */
198static void iwl_legacy_rx_scan_start_notif(struct iwl_priv *priv,
199 struct iwl_rx_mem_buffer *rxb)
200{
201 struct iwl_rx_packet *pkt = rxb_addr(rxb);
202 struct iwl_scanstart_notification *notif =
203 (struct iwl_scanstart_notification *)pkt->u.raw;
204 priv->scan_start_tsf = le32_to_cpu(notif->tsf_low);
205 IWL_DEBUG_SCAN(priv, "Scan start: "
206 "%d [802.11%s] "
207 "(TSF: 0x%08X:%08X) - %d (beacon timer %u)\n",
208 notif->channel,
209 notif->band ? "bg" : "a",
210 le32_to_cpu(notif->tsf_high),
211 le32_to_cpu(notif->tsf_low),
212 notif->status, notif->beacon_timer);
213}
214
215/* Service SCAN_RESULTS_NOTIFICATION (0x83) */
216static void iwl_legacy_rx_scan_results_notif(struct iwl_priv *priv,
217 struct iwl_rx_mem_buffer *rxb)
218{
219#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
220 struct iwl_rx_packet *pkt = rxb_addr(rxb);
221 struct iwl_scanresults_notification *notif =
222 (struct iwl_scanresults_notification *)pkt->u.raw;
223
224 IWL_DEBUG_SCAN(priv, "Scan ch.res: "
225 "%d [802.11%s] "
226 "(TSF: 0x%08X:%08X) - %d "
227 "elapsed=%lu usec\n",
228 notif->channel,
229 notif->band ? "bg" : "a",
230 le32_to_cpu(notif->tsf_high),
231 le32_to_cpu(notif->tsf_low),
232 le32_to_cpu(notif->statistics[0]),
233 le32_to_cpu(notif->tsf_low) - priv->scan_start_tsf);
234#endif
235}
236
237/* Service SCAN_COMPLETE_NOTIFICATION (0x84) */
238static void iwl_legacy_rx_scan_complete_notif(struct iwl_priv *priv,
239 struct iwl_rx_mem_buffer *rxb)
240{
241
242#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
243 struct iwl_rx_packet *pkt = rxb_addr(rxb);
244 struct iwl_scancomplete_notification *scan_notif = (void *)pkt->u.raw;
245#endif
246
247 IWL_DEBUG_SCAN(priv,
248 "Scan complete: %d channels (TSF 0x%08X:%08X) - %d\n",
249 scan_notif->scanned_channels,
250 scan_notif->tsf_low,
251 scan_notif->tsf_high, scan_notif->status);
252
253 /* The HW is no longer scanning */
254 clear_bit(STATUS_SCAN_HW, &priv->status);
255
256 IWL_DEBUG_SCAN(priv, "Scan on %sGHz took %dms\n",
257 (priv->scan_band == IEEE80211_BAND_2GHZ) ? "2.4" : "5.2",
258 jiffies_to_msecs(jiffies - priv->scan_start));
259
260 queue_work(priv->workqueue, &priv->scan_completed);
261}
262
263void iwl_legacy_setup_rx_scan_handlers(struct iwl_priv *priv)
264{
265 /* scan handlers */
266 priv->rx_handlers[REPLY_SCAN_CMD] = iwl_legacy_rx_reply_scan;
267 priv->rx_handlers[SCAN_START_NOTIFICATION] =
268 iwl_legacy_rx_scan_start_notif;
269 priv->rx_handlers[SCAN_RESULTS_NOTIFICATION] =
270 iwl_legacy_rx_scan_results_notif;
271 priv->rx_handlers[SCAN_COMPLETE_NOTIFICATION] =
272 iwl_legacy_rx_scan_complete_notif;
273}
274EXPORT_SYMBOL(iwl_legacy_setup_rx_scan_handlers);
275
276inline u16 iwl_legacy_get_active_dwell_time(struct iwl_priv *priv,
277 enum ieee80211_band band,
278 u8 n_probes)
279{
280 if (band == IEEE80211_BAND_5GHZ)
281 return IWL_ACTIVE_DWELL_TIME_52 +
282 IWL_ACTIVE_DWELL_FACTOR_52GHZ * (n_probes + 1);
283 else
284 return IWL_ACTIVE_DWELL_TIME_24 +
285 IWL_ACTIVE_DWELL_FACTOR_24GHZ * (n_probes + 1);
286}
287EXPORT_SYMBOL(iwl_legacy_get_active_dwell_time);
288
289u16 iwl_legacy_get_passive_dwell_time(struct iwl_priv *priv,
290 enum ieee80211_band band,
291 struct ieee80211_vif *vif)
292{
293 struct iwl_rxon_context *ctx;
294 u16 passive = (band == IEEE80211_BAND_2GHZ) ?
295 IWL_PASSIVE_DWELL_BASE + IWL_PASSIVE_DWELL_TIME_24 :
296 IWL_PASSIVE_DWELL_BASE + IWL_PASSIVE_DWELL_TIME_52;
297
298 if (iwl_legacy_is_any_associated(priv)) {
299 /*
300 * If we're associated, we clamp the maximum passive
301 * dwell time to be 98% of the smallest beacon interval
302 * (minus 2 * channel tune time)
303 */
304 for_each_context(priv, ctx) {
305 u16 value;
306
307 if (!iwl_legacy_is_associated_ctx(ctx))
308 continue;
309 value = ctx->vif ? ctx->vif->bss_conf.beacon_int : 0;
310 if ((value > IWL_PASSIVE_DWELL_BASE) || !value)
311 value = IWL_PASSIVE_DWELL_BASE;
312 value = (value * 98) / 100 - IWL_CHANNEL_TUNE_TIME * 2;
313 passive = min(value, passive);
314 }
315 }
316
317 return passive;
318}
319EXPORT_SYMBOL(iwl_legacy_get_passive_dwell_time);
320
321void iwl_legacy_init_scan_params(struct iwl_priv *priv)
322{
323 u8 ant_idx = fls(priv->hw_params.valid_tx_ant) - 1;
324 if (!priv->scan_tx_ant[IEEE80211_BAND_5GHZ])
325 priv->scan_tx_ant[IEEE80211_BAND_5GHZ] = ant_idx;
326 if (!priv->scan_tx_ant[IEEE80211_BAND_2GHZ])
327 priv->scan_tx_ant[IEEE80211_BAND_2GHZ] = ant_idx;
328}
329EXPORT_SYMBOL(iwl_legacy_init_scan_params);
330
331static int iwl_legacy_scan_initiate(struct iwl_priv *priv,
332 struct ieee80211_vif *vif)
333{
334 int ret;
335
336 lockdep_assert_held(&priv->mutex);
337
338 if (WARN_ON(!priv->cfg->ops->utils->request_scan))
339 return -EOPNOTSUPP;
340
341 cancel_delayed_work(&priv->scan_check);
342
343 if (!iwl_legacy_is_ready_rf(priv)) {
344 IWL_WARN(priv, "Request scan called when driver not ready.\n");
345 return -EIO;
346 }
347
348 if (test_bit(STATUS_SCAN_HW, &priv->status)) {
349 IWL_DEBUG_SCAN(priv,
350 "Multiple concurrent scan requests in parallel.\n");
351 return -EBUSY;
352 }
353
354 if (test_bit(STATUS_SCAN_ABORTING, &priv->status)) {
355 IWL_DEBUG_SCAN(priv, "Scan request while abort pending.\n");
356 return -EBUSY;
357 }
358
359 IWL_DEBUG_SCAN(priv, "Starting scan...\n");
360
361 set_bit(STATUS_SCANNING, &priv->status);
362 priv->scan_start = jiffies;
363
364 ret = priv->cfg->ops->utils->request_scan(priv, vif);
365 if (ret) {
366 clear_bit(STATUS_SCANNING, &priv->status);
367 return ret;
368 }
369
370 queue_delayed_work(priv->workqueue, &priv->scan_check,
371 IWL_SCAN_CHECK_WATCHDOG);
372
373 return 0;
374}
375
376int iwl_legacy_mac_hw_scan(struct ieee80211_hw *hw,
377 struct ieee80211_vif *vif,
378 struct cfg80211_scan_request *req)
379{
380 struct iwl_priv *priv = hw->priv;
381 int ret;
382
383 IWL_DEBUG_MAC80211(priv, "enter\n");
384
385 if (req->n_channels == 0)
386 return -EINVAL;
387
388 mutex_lock(&priv->mutex);
389
390 if (test_bit(STATUS_SCANNING, &priv->status)) {
391 IWL_DEBUG_SCAN(priv, "Scan already in progress.\n");
392 ret = -EAGAIN;
393 goto out_unlock;
394 }
395
396 /* mac80211 will only ask for one band at a time */
397 priv->scan_request = req;
398 priv->scan_vif = vif;
399 priv->scan_band = req->channels[0]->band;
400
401 ret = iwl_legacy_scan_initiate(priv, vif);
402
403 IWL_DEBUG_MAC80211(priv, "leave\n");
404
405out_unlock:
406 mutex_unlock(&priv->mutex);
407
408 return ret;
409}
410EXPORT_SYMBOL(iwl_legacy_mac_hw_scan);
411
412static void iwl_legacy_bg_scan_check(struct work_struct *data)
413{
414 struct iwl_priv *priv =
415 container_of(data, struct iwl_priv, scan_check.work);
416
417 IWL_DEBUG_SCAN(priv, "Scan check work\n");
418
419 /* Since we are here firmware does not finish scan and
420 * most likely is in bad shape, so we don't bother to
421 * send abort command, just force scan complete to mac80211 */
422 mutex_lock(&priv->mutex);
423 iwl_legacy_force_scan_end(priv);
424 mutex_unlock(&priv->mutex);
425}
426
427/**
428 * iwl_legacy_fill_probe_req - fill in all required fields and IE for probe request
429 */
430
431u16
432iwl_legacy_fill_probe_req(struct iwl_priv *priv, struct ieee80211_mgmt *frame,
433 const u8 *ta, const u8 *ies, int ie_len, int left)
434{
435 int len = 0;
436 u8 *pos = NULL;
437
438 /* Make sure there is enough space for the probe request,
439 * two mandatory IEs and the data */
440 left -= 24;
441 if (left < 0)
442 return 0;
443
444 frame->frame_control = cpu_to_le16(IEEE80211_STYPE_PROBE_REQ);
445 memcpy(frame->da, iwlegacy_bcast_addr, ETH_ALEN);
446 memcpy(frame->sa, ta, ETH_ALEN);
447 memcpy(frame->bssid, iwlegacy_bcast_addr, ETH_ALEN);
448 frame->seq_ctrl = 0;
449
450 len += 24;
451
452 /* ...next IE... */
453 pos = &frame->u.probe_req.variable[0];
454
455 /* fill in our indirect SSID IE */
456 left -= 2;
457 if (left < 0)
458 return 0;
459 *pos++ = WLAN_EID_SSID;
460 *pos++ = 0;
461
462 len += 2;
463
464 if (WARN_ON(left < ie_len))
465 return len;
466
467 if (ies && ie_len) {
468 memcpy(pos, ies, ie_len);
469 len += ie_len;
470 }
471
472 return (u16)len;
473}
474EXPORT_SYMBOL(iwl_legacy_fill_probe_req);
475
476static void iwl_legacy_bg_abort_scan(struct work_struct *work)
477{
478 struct iwl_priv *priv = container_of(work, struct iwl_priv, abort_scan);
479
480 IWL_DEBUG_SCAN(priv, "Abort scan work\n");
481
482 /* We keep scan_check work queued in case when firmware will not
483 * report back scan completed notification */
484 mutex_lock(&priv->mutex);
485 iwl_legacy_scan_cancel_timeout(priv, 200);
486 mutex_unlock(&priv->mutex);
487}
488
489static void iwl_legacy_bg_scan_completed(struct work_struct *work)
490{
491 struct iwl_priv *priv =
492 container_of(work, struct iwl_priv, scan_completed);
493 bool aborted;
494
495 IWL_DEBUG_SCAN(priv, "Completed scan.\n");
496
497 cancel_delayed_work(&priv->scan_check);
498
499 mutex_lock(&priv->mutex);
500
501 aborted = test_and_clear_bit(STATUS_SCAN_ABORTING, &priv->status);
502 if (aborted)
503 IWL_DEBUG_SCAN(priv, "Aborted scan completed.\n");
504
505 if (!test_and_clear_bit(STATUS_SCANNING, &priv->status)) {
506 IWL_DEBUG_SCAN(priv, "Scan already completed.\n");
507 goto out_settings;
508 }
509
510 iwl_legacy_complete_scan(priv, aborted);
511
512out_settings:
513 /* Can we still talk to firmware ? */
514 if (!iwl_legacy_is_ready_rf(priv))
515 goto out;
516
517 /*
518 * We do not commit power settings while scan is pending,
519 * do it now if the settings changed.
520 */
521 iwl_legacy_power_set_mode(priv, &priv->power_data.sleep_cmd_next, false);
522 iwl_legacy_set_tx_power(priv, priv->tx_power_next, false);
523
524 priv->cfg->ops->utils->post_scan(priv);
525
526out:
527 mutex_unlock(&priv->mutex);
528}
529
530void iwl_legacy_setup_scan_deferred_work(struct iwl_priv *priv)
531{
532 INIT_WORK(&priv->scan_completed, iwl_legacy_bg_scan_completed);
533 INIT_WORK(&priv->abort_scan, iwl_legacy_bg_abort_scan);
534 INIT_DELAYED_WORK(&priv->scan_check, iwl_legacy_bg_scan_check);
535}
536EXPORT_SYMBOL(iwl_legacy_setup_scan_deferred_work);
537
538void iwl_legacy_cancel_scan_deferred_work(struct iwl_priv *priv)
539{
540 cancel_work_sync(&priv->abort_scan);
541 cancel_work_sync(&priv->scan_completed);
542
543 if (cancel_delayed_work_sync(&priv->scan_check)) {
544 mutex_lock(&priv->mutex);
545 iwl_legacy_force_scan_end(priv);
546 mutex_unlock(&priv->mutex);
547 }
548}
549EXPORT_SYMBOL(iwl_legacy_cancel_scan_deferred_work);
diff --git a/drivers/net/wireless/iwlegacy/iwl-sta.c b/drivers/net/wireless/iwlegacy/iwl-sta.c
new file mode 100644
index 00000000000..66f0fb2bbe0
--- /dev/null
+++ b/drivers/net/wireless/iwlegacy/iwl-sta.c
@@ -0,0 +1,816 @@
1/******************************************************************************
2 *
3 * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved.
4 *
5 * Portions of this file are derived from the ipw3945 project, as well
6 * as portions of the ieee80211 subsystem header files.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of version 2 of the GNU General Public License as
10 * published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it will be useful, but WITHOUT
13 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 * more details.
16 *
17 * You should have received a copy of the GNU General Public License along with
18 * this program; if not, write to the Free Software Foundation, Inc.,
19 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
20 *
21 * The full GNU General Public License is included in this distribution in the
22 * file called LICENSE.
23 *
24 * Contact Information:
25 * Intel Linux Wireless <ilw@linux.intel.com>
26 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
27 *
28 *****************************************************************************/
29
30#include <net/mac80211.h>
31#include <linux/etherdevice.h>
32#include <linux/sched.h>
33#include <linux/lockdep.h>
34
35#include "iwl-dev.h"
36#include "iwl-core.h"
37#include "iwl-sta.h"
38
39/* priv->sta_lock must be held */
40static void iwl_legacy_sta_ucode_activate(struct iwl_priv *priv, u8 sta_id)
41{
42
43 if (!(priv->stations[sta_id].used & IWL_STA_DRIVER_ACTIVE))
44 IWL_ERR(priv,
45 "ACTIVATE a non DRIVER active station id %u addr %pM\n",
46 sta_id, priv->stations[sta_id].sta.sta.addr);
47
48 if (priv->stations[sta_id].used & IWL_STA_UCODE_ACTIVE) {
49 IWL_DEBUG_ASSOC(priv,
50 "STA id %u addr %pM already present"
51 " in uCode (according to driver)\n",
52 sta_id, priv->stations[sta_id].sta.sta.addr);
53 } else {
54 priv->stations[sta_id].used |= IWL_STA_UCODE_ACTIVE;
55 IWL_DEBUG_ASSOC(priv, "Added STA id %u addr %pM to uCode\n",
56 sta_id, priv->stations[sta_id].sta.sta.addr);
57 }
58}
59
60static int iwl_legacy_process_add_sta_resp(struct iwl_priv *priv,
61 struct iwl_legacy_addsta_cmd *addsta,
62 struct iwl_rx_packet *pkt,
63 bool sync)
64{
65 u8 sta_id = addsta->sta.sta_id;
66 unsigned long flags;
67 int ret = -EIO;
68
69 if (pkt->hdr.flags & IWL_CMD_FAILED_MSK) {
70 IWL_ERR(priv, "Bad return from REPLY_ADD_STA (0x%08X)\n",
71 pkt->hdr.flags);
72 return ret;
73 }
74
75 IWL_DEBUG_INFO(priv, "Processing response for adding station %u\n",
76 sta_id);
77
78 spin_lock_irqsave(&priv->sta_lock, flags);
79
80 switch (pkt->u.add_sta.status) {
81 case ADD_STA_SUCCESS_MSK:
82 IWL_DEBUG_INFO(priv, "REPLY_ADD_STA PASSED\n");
83 iwl_legacy_sta_ucode_activate(priv, sta_id);
84 ret = 0;
85 break;
86 case ADD_STA_NO_ROOM_IN_TABLE:
87 IWL_ERR(priv, "Adding station %d failed, no room in table.\n",
88 sta_id);
89 break;
90 case ADD_STA_NO_BLOCK_ACK_RESOURCE:
91 IWL_ERR(priv,
92 "Adding station %d failed, no block ack resource.\n",
93 sta_id);
94 break;
95 case ADD_STA_MODIFY_NON_EXIST_STA:
96 IWL_ERR(priv, "Attempting to modify non-existing station %d\n",
97 sta_id);
98 break;
99 default:
100 IWL_DEBUG_ASSOC(priv, "Received REPLY_ADD_STA:(0x%08X)\n",
101 pkt->u.add_sta.status);
102 break;
103 }
104
105 IWL_DEBUG_INFO(priv, "%s station id %u addr %pM\n",
106 priv->stations[sta_id].sta.mode ==
107 STA_CONTROL_MODIFY_MSK ? "Modified" : "Added",
108 sta_id, priv->stations[sta_id].sta.sta.addr);
109
110 /*
111 * XXX: The MAC address in the command buffer is often changed from
112 * the original sent to the device. That is, the MAC address
113 * written to the command buffer often is not the same MAC address
114 * read from the command buffer when the command returns. This
115 * issue has not yet been resolved and this debugging is left to
116 * observe the problem.
117 */
118 IWL_DEBUG_INFO(priv, "%s station according to cmd buffer %pM\n",
119 priv->stations[sta_id].sta.mode ==
120 STA_CONTROL_MODIFY_MSK ? "Modified" : "Added",
121 addsta->sta.addr);
122 spin_unlock_irqrestore(&priv->sta_lock, flags);
123
124 return ret;
125}
126
127static void iwl_legacy_add_sta_callback(struct iwl_priv *priv,
128 struct iwl_device_cmd *cmd,
129 struct iwl_rx_packet *pkt)
130{
131 struct iwl_legacy_addsta_cmd *addsta =
132 (struct iwl_legacy_addsta_cmd *)cmd->cmd.payload;
133
134 iwl_legacy_process_add_sta_resp(priv, addsta, pkt, false);
135
136}
137
138int iwl_legacy_send_add_sta(struct iwl_priv *priv,
139 struct iwl_legacy_addsta_cmd *sta, u8 flags)
140{
141 struct iwl_rx_packet *pkt = NULL;
142 int ret = 0;
143 u8 data[sizeof(*sta)];
144 struct iwl_host_cmd cmd = {
145 .id = REPLY_ADD_STA,
146 .flags = flags,
147 .data = data,
148 };
149 u8 sta_id __maybe_unused = sta->sta.sta_id;
150
151 IWL_DEBUG_INFO(priv, "Adding sta %u (%pM) %ssynchronously\n",
152 sta_id, sta->sta.addr, flags & CMD_ASYNC ? "a" : "");
153
154 if (flags & CMD_ASYNC)
155 cmd.callback = iwl_legacy_add_sta_callback;
156 else {
157 cmd.flags |= CMD_WANT_SKB;
158 might_sleep();
159 }
160
161 cmd.len = priv->cfg->ops->utils->build_addsta_hcmd(sta, data);
162 ret = iwl_legacy_send_cmd(priv, &cmd);
163
164 if (ret || (flags & CMD_ASYNC))
165 return ret;
166
167 if (ret == 0) {
168 pkt = (struct iwl_rx_packet *)cmd.reply_page;
169 ret = iwl_legacy_process_add_sta_resp(priv, sta, pkt, true);
170 }
171 iwl_legacy_free_pages(priv, cmd.reply_page);
172
173 return ret;
174}
175EXPORT_SYMBOL(iwl_legacy_send_add_sta);
176
177static void iwl_legacy_set_ht_add_station(struct iwl_priv *priv, u8 index,
178 struct ieee80211_sta *sta,
179 struct iwl_rxon_context *ctx)
180{
181 struct ieee80211_sta_ht_cap *sta_ht_inf = &sta->ht_cap;
182 __le32 sta_flags;
183 u8 mimo_ps_mode;
184
185 if (!sta || !sta_ht_inf->ht_supported)
186 goto done;
187
188 mimo_ps_mode = (sta_ht_inf->cap & IEEE80211_HT_CAP_SM_PS) >> 2;
189 IWL_DEBUG_ASSOC(priv, "spatial multiplexing power save mode: %s\n",
190 (mimo_ps_mode == WLAN_HT_CAP_SM_PS_STATIC) ?
191 "static" :
192 (mimo_ps_mode == WLAN_HT_CAP_SM_PS_DYNAMIC) ?
193 "dynamic" : "disabled");
194
195 sta_flags = priv->stations[index].sta.station_flags;
196
197 sta_flags &= ~(STA_FLG_RTS_MIMO_PROT_MSK | STA_FLG_MIMO_DIS_MSK);
198
199 switch (mimo_ps_mode) {
200 case WLAN_HT_CAP_SM_PS_STATIC:
201 sta_flags |= STA_FLG_MIMO_DIS_MSK;
202 break;
203 case WLAN_HT_CAP_SM_PS_DYNAMIC:
204 sta_flags |= STA_FLG_RTS_MIMO_PROT_MSK;
205 break;
206 case WLAN_HT_CAP_SM_PS_DISABLED:
207 break;
208 default:
209 IWL_WARN(priv, "Invalid MIMO PS mode %d\n", mimo_ps_mode);
210 break;
211 }
212
213 sta_flags |= cpu_to_le32(
214 (u32)sta_ht_inf->ampdu_factor << STA_FLG_MAX_AGG_SIZE_POS);
215
216 sta_flags |= cpu_to_le32(
217 (u32)sta_ht_inf->ampdu_density << STA_FLG_AGG_MPDU_DENSITY_POS);
218
219 if (iwl_legacy_is_ht40_tx_allowed(priv, ctx, &sta->ht_cap))
220 sta_flags |= STA_FLG_HT40_EN_MSK;
221 else
222 sta_flags &= ~STA_FLG_HT40_EN_MSK;
223
224 priv->stations[index].sta.station_flags = sta_flags;
225 done:
226 return;
227}
228
229/**
230 * iwl_legacy_prep_station - Prepare station information for addition
231 *
232 * should be called with sta_lock held
233 */
234u8 iwl_legacy_prep_station(struct iwl_priv *priv, struct iwl_rxon_context *ctx,
235 const u8 *addr, bool is_ap, struct ieee80211_sta *sta)
236{
237 struct iwl_station_entry *station;
238 int i;
239 u8 sta_id = IWL_INVALID_STATION;
240 u16 rate;
241
242 if (is_ap)
243 sta_id = ctx->ap_sta_id;
244 else if (is_broadcast_ether_addr(addr))
245 sta_id = ctx->bcast_sta_id;
246 else
247 for (i = IWL_STA_ID; i < priv->hw_params.max_stations; i++) {
248 if (!compare_ether_addr(priv->stations[i].sta.sta.addr,
249 addr)) {
250 sta_id = i;
251 break;
252 }
253
254 if (!priv->stations[i].used &&
255 sta_id == IWL_INVALID_STATION)
256 sta_id = i;
257 }
258
259 /*
260 * These two conditions have the same outcome, but keep them
261 * separate
262 */
263 if (unlikely(sta_id == IWL_INVALID_STATION))
264 return sta_id;
265
266 /*
267 * uCode is not able to deal with multiple requests to add a
268 * station. Keep track if one is in progress so that we do not send
269 * another.
270 */
271 if (priv->stations[sta_id].used & IWL_STA_UCODE_INPROGRESS) {
272 IWL_DEBUG_INFO(priv,
273 "STA %d already in process of being added.\n",
274 sta_id);
275 return sta_id;
276 }
277
278 if ((priv->stations[sta_id].used & IWL_STA_DRIVER_ACTIVE) &&
279 (priv->stations[sta_id].used & IWL_STA_UCODE_ACTIVE) &&
280 !compare_ether_addr(priv->stations[sta_id].sta.sta.addr, addr)) {
281 IWL_DEBUG_ASSOC(priv,
282 "STA %d (%pM) already added, not adding again.\n",
283 sta_id, addr);
284 return sta_id;
285 }
286
287 station = &priv->stations[sta_id];
288 station->used = IWL_STA_DRIVER_ACTIVE;
289 IWL_DEBUG_ASSOC(priv, "Add STA to driver ID %d: %pM\n",
290 sta_id, addr);
291 priv->num_stations++;
292
293 /* Set up the REPLY_ADD_STA command to send to device */
294 memset(&station->sta, 0, sizeof(struct iwl_legacy_addsta_cmd));
295 memcpy(station->sta.sta.addr, addr, ETH_ALEN);
296 station->sta.mode = 0;
297 station->sta.sta.sta_id = sta_id;
298 station->sta.station_flags = ctx->station_flags;
299 station->ctxid = ctx->ctxid;
300
301 if (sta) {
302 struct iwl_station_priv_common *sta_priv;
303
304 sta_priv = (void *)sta->drv_priv;
305 sta_priv->ctx = ctx;
306 }
307
308 /*
309 * OK to call unconditionally, since local stations (IBSS BSSID
310 * STA and broadcast STA) pass in a NULL sta, and mac80211
311 * doesn't allow HT IBSS.
312 */
313 iwl_legacy_set_ht_add_station(priv, sta_id, sta, ctx);
314
315 /* 3945 only */
316 rate = (priv->band == IEEE80211_BAND_5GHZ) ?
317 IWL_RATE_6M_PLCP : IWL_RATE_1M_PLCP;
318 /* Turn on both antennas for the station... */
319 station->sta.rate_n_flags = cpu_to_le16(rate | RATE_MCS_ANT_AB_MSK);
320
321 return sta_id;
322
323}
324EXPORT_SYMBOL_GPL(iwl_legacy_prep_station);
325
326#define STA_WAIT_TIMEOUT (HZ/2)
327
328/**
329 * iwl_legacy_add_station_common -
330 */
331int
332iwl_legacy_add_station_common(struct iwl_priv *priv,
333 struct iwl_rxon_context *ctx,
334 const u8 *addr, bool is_ap,
335 struct ieee80211_sta *sta, u8 *sta_id_r)
336{
337 unsigned long flags_spin;
338 int ret = 0;
339 u8 sta_id;
340 struct iwl_legacy_addsta_cmd sta_cmd;
341
342 *sta_id_r = 0;
343 spin_lock_irqsave(&priv->sta_lock, flags_spin);
344 sta_id = iwl_legacy_prep_station(priv, ctx, addr, is_ap, sta);
345 if (sta_id == IWL_INVALID_STATION) {
346 IWL_ERR(priv, "Unable to prepare station %pM for addition\n",
347 addr);
348 spin_unlock_irqrestore(&priv->sta_lock, flags_spin);
349 return -EINVAL;
350 }
351
352 /*
353 * uCode is not able to deal with multiple requests to add a
354 * station. Keep track if one is in progress so that we do not send
355 * another.
356 */
357 if (priv->stations[sta_id].used & IWL_STA_UCODE_INPROGRESS) {
358 IWL_DEBUG_INFO(priv,
359 "STA %d already in process of being added.\n",
360 sta_id);
361 spin_unlock_irqrestore(&priv->sta_lock, flags_spin);
362 return -EEXIST;
363 }
364
365 if ((priv->stations[sta_id].used & IWL_STA_DRIVER_ACTIVE) &&
366 (priv->stations[sta_id].used & IWL_STA_UCODE_ACTIVE)) {
367 IWL_DEBUG_ASSOC(priv,
368 "STA %d (%pM) already added, not adding again.\n",
369 sta_id, addr);
370 spin_unlock_irqrestore(&priv->sta_lock, flags_spin);
371 return -EEXIST;
372 }
373
374 priv->stations[sta_id].used |= IWL_STA_UCODE_INPROGRESS;
375 memcpy(&sta_cmd, &priv->stations[sta_id].sta,
376 sizeof(struct iwl_legacy_addsta_cmd));
377 spin_unlock_irqrestore(&priv->sta_lock, flags_spin);
378
379 /* Add station to device's station table */
380 ret = iwl_legacy_send_add_sta(priv, &sta_cmd, CMD_SYNC);
381 if (ret) {
382 spin_lock_irqsave(&priv->sta_lock, flags_spin);
383 IWL_ERR(priv, "Adding station %pM failed.\n",
384 priv->stations[sta_id].sta.sta.addr);
385 priv->stations[sta_id].used &= ~IWL_STA_DRIVER_ACTIVE;
386 priv->stations[sta_id].used &= ~IWL_STA_UCODE_INPROGRESS;
387 spin_unlock_irqrestore(&priv->sta_lock, flags_spin);
388 }
389 *sta_id_r = sta_id;
390 return ret;
391}
392EXPORT_SYMBOL(iwl_legacy_add_station_common);
393
394/**
395 * iwl_legacy_sta_ucode_deactivate - deactivate ucode status for a station
396 *
397 * priv->sta_lock must be held
398 */
399static void iwl_legacy_sta_ucode_deactivate(struct iwl_priv *priv, u8 sta_id)
400{
401 /* Ucode must be active and driver must be non active */
402 if ((priv->stations[sta_id].used &
403 (IWL_STA_UCODE_ACTIVE | IWL_STA_DRIVER_ACTIVE)) !=
404 IWL_STA_UCODE_ACTIVE)
405 IWL_ERR(priv, "removed non active STA %u\n", sta_id);
406
407 priv->stations[sta_id].used &= ~IWL_STA_UCODE_ACTIVE;
408
409 memset(&priv->stations[sta_id], 0, sizeof(struct iwl_station_entry));
410 IWL_DEBUG_ASSOC(priv, "Removed STA %u\n", sta_id);
411}
412
413static int iwl_legacy_send_remove_station(struct iwl_priv *priv,
414 const u8 *addr, int sta_id,
415 bool temporary)
416{
417 struct iwl_rx_packet *pkt;
418 int ret;
419
420 unsigned long flags_spin;
421 struct iwl_rem_sta_cmd rm_sta_cmd;
422
423 struct iwl_host_cmd cmd = {
424 .id = REPLY_REMOVE_STA,
425 .len = sizeof(struct iwl_rem_sta_cmd),
426 .flags = CMD_SYNC,
427 .data = &rm_sta_cmd,
428 };
429
430 memset(&rm_sta_cmd, 0, sizeof(rm_sta_cmd));
431 rm_sta_cmd.num_sta = 1;
432 memcpy(&rm_sta_cmd.addr, addr, ETH_ALEN);
433
434 cmd.flags |= CMD_WANT_SKB;
435
436 ret = iwl_legacy_send_cmd(priv, &cmd);
437
438 if (ret)
439 return ret;
440
441 pkt = (struct iwl_rx_packet *)cmd.reply_page;
442 if (pkt->hdr.flags & IWL_CMD_FAILED_MSK) {
443 IWL_ERR(priv, "Bad return from REPLY_REMOVE_STA (0x%08X)\n",
444 pkt->hdr.flags);
445 ret = -EIO;
446 }
447
448 if (!ret) {
449 switch (pkt->u.rem_sta.status) {
450 case REM_STA_SUCCESS_MSK:
451 if (!temporary) {
452 spin_lock_irqsave(&priv->sta_lock, flags_spin);
453 iwl_legacy_sta_ucode_deactivate(priv, sta_id);
454 spin_unlock_irqrestore(&priv->sta_lock,
455 flags_spin);
456 }
457 IWL_DEBUG_ASSOC(priv, "REPLY_REMOVE_STA PASSED\n");
458 break;
459 default:
460 ret = -EIO;
461 IWL_ERR(priv, "REPLY_REMOVE_STA failed\n");
462 break;
463 }
464 }
465 iwl_legacy_free_pages(priv, cmd.reply_page);
466
467 return ret;
468}
469
470/**
471 * iwl_legacy_remove_station - Remove driver's knowledge of station.
472 */
473int iwl_legacy_remove_station(struct iwl_priv *priv, const u8 sta_id,
474 const u8 *addr)
475{
476 unsigned long flags;
477
478 if (!iwl_legacy_is_ready(priv)) {
479 IWL_DEBUG_INFO(priv,
480 "Unable to remove station %pM, device not ready.\n",
481 addr);
482 /*
483 * It is typical for stations to be removed when we are
484 * going down. Return success since device will be down
485 * soon anyway
486 */
487 return 0;
488 }
489
490 IWL_DEBUG_ASSOC(priv, "Removing STA from driver:%d %pM\n",
491 sta_id, addr);
492
493 if (WARN_ON(sta_id == IWL_INVALID_STATION))
494 return -EINVAL;
495
496 spin_lock_irqsave(&priv->sta_lock, flags);
497
498 if (!(priv->stations[sta_id].used & IWL_STA_DRIVER_ACTIVE)) {
499 IWL_DEBUG_INFO(priv, "Removing %pM but non DRIVER active\n",
500 addr);
501 goto out_err;
502 }
503
504 if (!(priv->stations[sta_id].used & IWL_STA_UCODE_ACTIVE)) {
505 IWL_DEBUG_INFO(priv, "Removing %pM but non UCODE active\n",
506 addr);
507 goto out_err;
508 }
509
510 if (priv->stations[sta_id].used & IWL_STA_LOCAL) {
511 kfree(priv->stations[sta_id].lq);
512 priv->stations[sta_id].lq = NULL;
513 }
514
515 priv->stations[sta_id].used &= ~IWL_STA_DRIVER_ACTIVE;
516
517 priv->num_stations--;
518
519 BUG_ON(priv->num_stations < 0);
520
521 spin_unlock_irqrestore(&priv->sta_lock, flags);
522
523 return iwl_legacy_send_remove_station(priv, addr, sta_id, false);
524out_err:
525 spin_unlock_irqrestore(&priv->sta_lock, flags);
526 return -EINVAL;
527}
528EXPORT_SYMBOL_GPL(iwl_legacy_remove_station);
529
530/**
531 * iwl_legacy_clear_ucode_stations - clear ucode station table bits
532 *
533 * This function clears all the bits in the driver indicating
534 * which stations are active in the ucode. Call when something
535 * other than explicit station management would cause this in
536 * the ucode, e.g. unassociated RXON.
537 */
538void iwl_legacy_clear_ucode_stations(struct iwl_priv *priv,
539 struct iwl_rxon_context *ctx)
540{
541 int i;
542 unsigned long flags_spin;
543 bool cleared = false;
544
545 IWL_DEBUG_INFO(priv, "Clearing ucode stations in driver\n");
546
547 spin_lock_irqsave(&priv->sta_lock, flags_spin);
548 for (i = 0; i < priv->hw_params.max_stations; i++) {
549 if (ctx && ctx->ctxid != priv->stations[i].ctxid)
550 continue;
551
552 if (priv->stations[i].used & IWL_STA_UCODE_ACTIVE) {
553 IWL_DEBUG_INFO(priv,
554 "Clearing ucode active for station %d\n", i);
555 priv->stations[i].used &= ~IWL_STA_UCODE_ACTIVE;
556 cleared = true;
557 }
558 }
559 spin_unlock_irqrestore(&priv->sta_lock, flags_spin);
560
561 if (!cleared)
562 IWL_DEBUG_INFO(priv,
563 "No active stations found to be cleared\n");
564}
565EXPORT_SYMBOL(iwl_legacy_clear_ucode_stations);
566
567/**
568 * iwl_legacy_restore_stations() - Restore driver known stations to device
569 *
570 * All stations considered active by driver, but not present in ucode, is
571 * restored.
572 *
573 * Function sleeps.
574 */
575void
576iwl_legacy_restore_stations(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
577{
578 struct iwl_legacy_addsta_cmd sta_cmd;
579 struct iwl_link_quality_cmd lq;
580 unsigned long flags_spin;
581 int i;
582 bool found = false;
583 int ret;
584 bool send_lq;
585
586 if (!iwl_legacy_is_ready(priv)) {
587 IWL_DEBUG_INFO(priv,
588 "Not ready yet, not restoring any stations.\n");
589 return;
590 }
591
592 IWL_DEBUG_ASSOC(priv, "Restoring all known stations ... start.\n");
593 spin_lock_irqsave(&priv->sta_lock, flags_spin);
594 for (i = 0; i < priv->hw_params.max_stations; i++) {
595 if (ctx->ctxid != priv->stations[i].ctxid)
596 continue;
597 if ((priv->stations[i].used & IWL_STA_DRIVER_ACTIVE) &&
598 !(priv->stations[i].used & IWL_STA_UCODE_ACTIVE)) {
599 IWL_DEBUG_ASSOC(priv, "Restoring sta %pM\n",
600 priv->stations[i].sta.sta.addr);
601 priv->stations[i].sta.mode = 0;
602 priv->stations[i].used |= IWL_STA_UCODE_INPROGRESS;
603 found = true;
604 }
605 }
606
607 for (i = 0; i < priv->hw_params.max_stations; i++) {
608 if ((priv->stations[i].used & IWL_STA_UCODE_INPROGRESS)) {
609 memcpy(&sta_cmd, &priv->stations[i].sta,
610 sizeof(struct iwl_legacy_addsta_cmd));
611 send_lq = false;
612 if (priv->stations[i].lq) {
613 memcpy(&lq, priv->stations[i].lq,
614 sizeof(struct iwl_link_quality_cmd));
615 send_lq = true;
616 }
617 spin_unlock_irqrestore(&priv->sta_lock, flags_spin);
618 ret = iwl_legacy_send_add_sta(priv, &sta_cmd, CMD_SYNC);
619 if (ret) {
620 spin_lock_irqsave(&priv->sta_lock, flags_spin);
621 IWL_ERR(priv, "Adding station %pM failed.\n",
622 priv->stations[i].sta.sta.addr);
623 priv->stations[i].used &=
624 ~IWL_STA_DRIVER_ACTIVE;
625 priv->stations[i].used &=
626 ~IWL_STA_UCODE_INPROGRESS;
627 spin_unlock_irqrestore(&priv->sta_lock,
628 flags_spin);
629 }
630 /*
631 * Rate scaling has already been initialized, send
632 * current LQ command
633 */
634 if (send_lq)
635 iwl_legacy_send_lq_cmd(priv, ctx, &lq,
636 CMD_SYNC, true);
637 spin_lock_irqsave(&priv->sta_lock, flags_spin);
638 priv->stations[i].used &= ~IWL_STA_UCODE_INPROGRESS;
639 }
640 }
641
642 spin_unlock_irqrestore(&priv->sta_lock, flags_spin);
643 if (!found)
644 IWL_DEBUG_INFO(priv, "Restoring all known stations"
645 " .... no stations to be restored.\n");
646 else
647 IWL_DEBUG_INFO(priv, "Restoring all known stations"
648 " .... complete.\n");
649}
650EXPORT_SYMBOL(iwl_legacy_restore_stations);
651
652int iwl_legacy_get_free_ucode_key_index(struct iwl_priv *priv)
653{
654 int i;
655
656 for (i = 0; i < priv->sta_key_max_num; i++)
657 if (!test_and_set_bit(i, &priv->ucode_key_table))
658 return i;
659
660 return WEP_INVALID_OFFSET;
661}
662EXPORT_SYMBOL(iwl_legacy_get_free_ucode_key_index);
663
664void iwl_legacy_dealloc_bcast_stations(struct iwl_priv *priv)
665{
666 unsigned long flags;
667 int i;
668
669 spin_lock_irqsave(&priv->sta_lock, flags);
670 for (i = 0; i < priv->hw_params.max_stations; i++) {
671 if (!(priv->stations[i].used & IWL_STA_BCAST))
672 continue;
673
674 priv->stations[i].used &= ~IWL_STA_UCODE_ACTIVE;
675 priv->num_stations--;
676 BUG_ON(priv->num_stations < 0);
677 kfree(priv->stations[i].lq);
678 priv->stations[i].lq = NULL;
679 }
680 spin_unlock_irqrestore(&priv->sta_lock, flags);
681}
682EXPORT_SYMBOL_GPL(iwl_legacy_dealloc_bcast_stations);
683
684#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
685static void iwl_legacy_dump_lq_cmd(struct iwl_priv *priv,
686 struct iwl_link_quality_cmd *lq)
687{
688 int i;
689 IWL_DEBUG_RATE(priv, "lq station id 0x%x\n", lq->sta_id);
690 IWL_DEBUG_RATE(priv, "lq ant 0x%X 0x%X\n",
691 lq->general_params.single_stream_ant_msk,
692 lq->general_params.dual_stream_ant_msk);
693
694 for (i = 0; i < LINK_QUAL_MAX_RETRY_NUM; i++)
695 IWL_DEBUG_RATE(priv, "lq index %d 0x%X\n",
696 i, lq->rs_table[i].rate_n_flags);
697}
698#else
699static inline void iwl_legacy_dump_lq_cmd(struct iwl_priv *priv,
700 struct iwl_link_quality_cmd *lq)
701{
702}
703#endif
704
705/**
706 * iwl_legacy_is_lq_table_valid() - Test one aspect of LQ cmd for validity
707 *
708 * It sometimes happens when a HT rate has been in use and we
709 * loose connectivity with AP then mac80211 will first tell us that the
710 * current channel is not HT anymore before removing the station. In such a
711 * scenario the RXON flags will be updated to indicate we are not
712 * communicating HT anymore, but the LQ command may still contain HT rates.
713 * Test for this to prevent driver from sending LQ command between the time
714 * RXON flags are updated and when LQ command is updated.
715 */
716static bool iwl_legacy_is_lq_table_valid(struct iwl_priv *priv,
717 struct iwl_rxon_context *ctx,
718 struct iwl_link_quality_cmd *lq)
719{
720 int i;
721
722 if (ctx->ht.enabled)
723 return true;
724
725 IWL_DEBUG_INFO(priv, "Channel %u is not an HT channel\n",
726 ctx->active.channel);
727 for (i = 0; i < LINK_QUAL_MAX_RETRY_NUM; i++) {
728 if (le32_to_cpu(lq->rs_table[i].rate_n_flags) &
729 RATE_MCS_HT_MSK) {
730 IWL_DEBUG_INFO(priv,
731 "index %d of LQ expects HT channel\n",
732 i);
733 return false;
734 }
735 }
736 return true;
737}
738
739/**
740 * iwl_legacy_send_lq_cmd() - Send link quality command
741 * @init: This command is sent as part of station initialization right
742 * after station has been added.
743 *
744 * The link quality command is sent as the last step of station creation.
745 * This is the special case in which init is set and we call a callback in
746 * this case to clear the state indicating that station creation is in
747 * progress.
748 */
749int iwl_legacy_send_lq_cmd(struct iwl_priv *priv, struct iwl_rxon_context *ctx,
750 struct iwl_link_quality_cmd *lq, u8 flags, bool init)
751{
752 int ret = 0;
753 unsigned long flags_spin;
754
755 struct iwl_host_cmd cmd = {
756 .id = REPLY_TX_LINK_QUALITY_CMD,
757 .len = sizeof(struct iwl_link_quality_cmd),
758 .flags = flags,
759 .data = lq,
760 };
761
762 if (WARN_ON(lq->sta_id == IWL_INVALID_STATION))
763 return -EINVAL;
764
765
766 spin_lock_irqsave(&priv->sta_lock, flags_spin);
767 if (!(priv->stations[lq->sta_id].used & IWL_STA_DRIVER_ACTIVE)) {
768 spin_unlock_irqrestore(&priv->sta_lock, flags_spin);
769 return -EINVAL;
770 }
771 spin_unlock_irqrestore(&priv->sta_lock, flags_spin);
772
773 iwl_legacy_dump_lq_cmd(priv, lq);
774 BUG_ON(init && (cmd.flags & CMD_ASYNC));
775
776 if (iwl_legacy_is_lq_table_valid(priv, ctx, lq))
777 ret = iwl_legacy_send_cmd(priv, &cmd);
778 else
779 ret = -EINVAL;
780
781 if (cmd.flags & CMD_ASYNC)
782 return ret;
783
784 if (init) {
785 IWL_DEBUG_INFO(priv, "init LQ command complete,"
786 " clearing sta addition status for sta %d\n",
787 lq->sta_id);
788 spin_lock_irqsave(&priv->sta_lock, flags_spin);
789 priv->stations[lq->sta_id].used &= ~IWL_STA_UCODE_INPROGRESS;
790 spin_unlock_irqrestore(&priv->sta_lock, flags_spin);
791 }
792 return ret;
793}
794EXPORT_SYMBOL(iwl_legacy_send_lq_cmd);
795
796int iwl_legacy_mac_sta_remove(struct ieee80211_hw *hw,
797 struct ieee80211_vif *vif,
798 struct ieee80211_sta *sta)
799{
800 struct iwl_priv *priv = hw->priv;
801 struct iwl_station_priv_common *sta_common = (void *)sta->drv_priv;
802 int ret;
803
804 IWL_DEBUG_INFO(priv, "received request to remove station %pM\n",
805 sta->addr);
806 mutex_lock(&priv->mutex);
807 IWL_DEBUG_INFO(priv, "proceeding to remove station %pM\n",
808 sta->addr);
809 ret = iwl_legacy_remove_station(priv, sta_common->sta_id, sta->addr);
810 if (ret)
811 IWL_ERR(priv, "Error removing station %pM\n",
812 sta->addr);
813 mutex_unlock(&priv->mutex);
814 return ret;
815}
816EXPORT_SYMBOL(iwl_legacy_mac_sta_remove);
diff --git a/drivers/net/wireless/iwlegacy/iwl-sta.h b/drivers/net/wireless/iwlegacy/iwl-sta.h
new file mode 100644
index 00000000000..67bd75fe01a
--- /dev/null
+++ b/drivers/net/wireless/iwlegacy/iwl-sta.h
@@ -0,0 +1,148 @@
1/******************************************************************************
2 *
3 * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved.
4 *
5 * Portions of this file are derived from the ipw3945 project, as well
6 * as portions of the ieee80211 subsystem header files.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of version 2 of the GNU General Public License as
10 * published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it will be useful, but WITHOUT
13 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 * more details.
16 *
17 * You should have received a copy of the GNU General Public License along with
18 * this program; if not, write to the Free Software Foundation, Inc.,
19 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
20 *
21 * The full GNU General Public License is included in this distribution in the
22 * file called LICENSE.
23 *
24 * Contact Information:
25 * Intel Linux Wireless <ilw@linux.intel.com>
26 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
27 *
28 *****************************************************************************/
29#ifndef __iwl_legacy_sta_h__
30#define __iwl_legacy_sta_h__
31
32#include "iwl-dev.h"
33
34#define HW_KEY_DYNAMIC 0
35#define HW_KEY_DEFAULT 1
36
37#define IWL_STA_DRIVER_ACTIVE BIT(0) /* driver entry is active */
38#define IWL_STA_UCODE_ACTIVE BIT(1) /* ucode entry is active */
39#define IWL_STA_UCODE_INPROGRESS BIT(2) /* ucode entry is in process of
40 being activated */
41#define IWL_STA_LOCAL BIT(3) /* station state not directed by mac80211;
42 (this is for the IBSS BSSID stations) */
43#define IWL_STA_BCAST BIT(4) /* this station is the special bcast station */
44
45
46void iwl_legacy_restore_stations(struct iwl_priv *priv,
47 struct iwl_rxon_context *ctx);
48void iwl_legacy_clear_ucode_stations(struct iwl_priv *priv,
49 struct iwl_rxon_context *ctx);
50void iwl_legacy_dealloc_bcast_stations(struct iwl_priv *priv);
51int iwl_legacy_get_free_ucode_key_index(struct iwl_priv *priv);
52int iwl_legacy_send_add_sta(struct iwl_priv *priv,
53 struct iwl_legacy_addsta_cmd *sta, u8 flags);
54int iwl_legacy_add_station_common(struct iwl_priv *priv,
55 struct iwl_rxon_context *ctx,
56 const u8 *addr, bool is_ap,
57 struct ieee80211_sta *sta, u8 *sta_id_r);
58int iwl_legacy_remove_station(struct iwl_priv *priv,
59 const u8 sta_id,
60 const u8 *addr);
61int iwl_legacy_mac_sta_remove(struct ieee80211_hw *hw,
62 struct ieee80211_vif *vif,
63 struct ieee80211_sta *sta);
64
65u8 iwl_legacy_prep_station(struct iwl_priv *priv,
66 struct iwl_rxon_context *ctx,
67 const u8 *addr, bool is_ap,
68 struct ieee80211_sta *sta);
69
70int iwl_legacy_send_lq_cmd(struct iwl_priv *priv,
71 struct iwl_rxon_context *ctx,
72 struct iwl_link_quality_cmd *lq,
73 u8 flags, bool init);
74
75/**
76 * iwl_legacy_clear_driver_stations - clear knowledge of all stations from driver
77 * @priv: iwl priv struct
78 *
79 * This is called during iwl_down() to make sure that in the case
80 * we're coming there from a hardware restart mac80211 will be
81 * able to reconfigure stations -- if we're getting there in the
82 * normal down flow then the stations will already be cleared.
83 */
84static inline void iwl_legacy_clear_driver_stations(struct iwl_priv *priv)
85{
86 unsigned long flags;
87 struct iwl_rxon_context *ctx;
88
89 spin_lock_irqsave(&priv->sta_lock, flags);
90 memset(priv->stations, 0, sizeof(priv->stations));
91 priv->num_stations = 0;
92
93 priv->ucode_key_table = 0;
94
95 for_each_context(priv, ctx) {
96 /*
97 * Remove all key information that is not stored as part
98 * of station information since mac80211 may not have had
99 * a chance to remove all the keys. When device is
100 * reconfigured by mac80211 after an error all keys will
101 * be reconfigured.
102 */
103 memset(ctx->wep_keys, 0, sizeof(ctx->wep_keys));
104 ctx->key_mapping_keys = 0;
105 }
106
107 spin_unlock_irqrestore(&priv->sta_lock, flags);
108}
109
110static inline int iwl_legacy_sta_id(struct ieee80211_sta *sta)
111{
112 if (WARN_ON(!sta))
113 return IWL_INVALID_STATION;
114
115 return ((struct iwl_station_priv_common *)sta->drv_priv)->sta_id;
116}
117
118/**
119 * iwl_legacy_sta_id_or_broadcast - return sta_id or broadcast sta
120 * @priv: iwl priv
121 * @context: the current context
122 * @sta: mac80211 station
123 *
124 * In certain circumstances mac80211 passes a station pointer
125 * that may be %NULL, for example during TX or key setup. In
126 * that case, we need to use the broadcast station, so this
127 * inline wraps that pattern.
128 */
129static inline int iwl_legacy_sta_id_or_broadcast(struct iwl_priv *priv,
130 struct iwl_rxon_context *context,
131 struct ieee80211_sta *sta)
132{
133 int sta_id;
134
135 if (!sta)
136 return context->bcast_sta_id;
137
138 sta_id = iwl_legacy_sta_id(sta);
139
140 /*
141 * mac80211 should not be passing a partially
142 * initialised station!
143 */
144 WARN_ON(sta_id == IWL_INVALID_STATION);
145
146 return sta_id;
147}
148#endif /* __iwl_legacy_sta_h__ */
diff --git a/drivers/net/wireless/iwlegacy/iwl-tx.c b/drivers/net/wireless/iwlegacy/iwl-tx.c
new file mode 100644
index 00000000000..ef9e268bf8a
--- /dev/null
+++ b/drivers/net/wireless/iwlegacy/iwl-tx.c
@@ -0,0 +1,658 @@
1/******************************************************************************
2 *
3 * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved.
4 *
5 * Portions of this file are derived from the ipw3945 project, as well
6 * as portions of the ieee80211 subsystem header files.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of version 2 of the GNU General Public License as
10 * published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it will be useful, but WITHOUT
13 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 * more details.
16 *
17 * You should have received a copy of the GNU General Public License along with
18 * this program; if not, write to the Free Software Foundation, Inc.,
19 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
20 *
21 * The full GNU General Public License is included in this distribution in the
22 * file called LICENSE.
23 *
24 * Contact Information:
25 * Intel Linux Wireless <ilw@linux.intel.com>
26 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
27 *
28 *****************************************************************************/
29
30#include <linux/etherdevice.h>
31#include <linux/sched.h>
32#include <linux/slab.h>
33#include <net/mac80211.h>
34#include "iwl-eeprom.h"
35#include "iwl-dev.h"
36#include "iwl-core.h"
37#include "iwl-sta.h"
38#include "iwl-io.h"
39#include "iwl-helpers.h"
40
41/**
42 * iwl_legacy_txq_update_write_ptr - Send new write index to hardware
43 */
44void
45iwl_legacy_txq_update_write_ptr(struct iwl_priv *priv, struct iwl_tx_queue *txq)
46{
47 u32 reg = 0;
48 int txq_id = txq->q.id;
49
50 if (txq->need_update == 0)
51 return;
52
53 /* if we're trying to save power */
54 if (test_bit(STATUS_POWER_PMI, &priv->status)) {
55 /* wake up nic if it's powered down ...
56 * uCode will wake up, and interrupt us again, so next
57 * time we'll skip this part. */
58 reg = iwl_read32(priv, CSR_UCODE_DRV_GP1);
59
60 if (reg & CSR_UCODE_DRV_GP1_BIT_MAC_SLEEP) {
61 IWL_DEBUG_INFO(priv,
62 "Tx queue %d requesting wakeup,"
63 " GP1 = 0x%x\n", txq_id, reg);
64 iwl_legacy_set_bit(priv, CSR_GP_CNTRL,
65 CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
66 return;
67 }
68
69 iwl_legacy_write_direct32(priv, HBUS_TARG_WRPTR,
70 txq->q.write_ptr | (txq_id << 8));
71
72 /*
73 * else not in power-save mode,
74 * uCode will never sleep when we're
75 * trying to tx (during RFKILL, we're not trying to tx).
76 */
77 } else
78 iwl_write32(priv, HBUS_TARG_WRPTR,
79 txq->q.write_ptr | (txq_id << 8));
80 txq->need_update = 0;
81}
82EXPORT_SYMBOL(iwl_legacy_txq_update_write_ptr);
83
84/**
85 * iwl_legacy_tx_queue_unmap - Unmap any remaining DMA mappings and free skb's
86 */
87void iwl_legacy_tx_queue_unmap(struct iwl_priv *priv, int txq_id)
88{
89 struct iwl_tx_queue *txq = &priv->txq[txq_id];
90 struct iwl_queue *q = &txq->q;
91
92 if (q->n_bd == 0)
93 return;
94
95 while (q->write_ptr != q->read_ptr) {
96 priv->cfg->ops->lib->txq_free_tfd(priv, txq);
97 q->read_ptr = iwl_legacy_queue_inc_wrap(q->read_ptr, q->n_bd);
98 }
99}
100EXPORT_SYMBOL(iwl_legacy_tx_queue_unmap);
101
102/**
103 * iwl_legacy_tx_queue_free - Deallocate DMA queue.
104 * @txq: Transmit queue to deallocate.
105 *
106 * Empty queue by removing and destroying all BD's.
107 * Free all buffers.
108 * 0-fill, but do not free "txq" descriptor structure.
109 */
110void iwl_legacy_tx_queue_free(struct iwl_priv *priv, int txq_id)
111{
112 struct iwl_tx_queue *txq = &priv->txq[txq_id];
113 struct device *dev = &priv->pci_dev->dev;
114 int i;
115
116 iwl_legacy_tx_queue_unmap(priv, txq_id);
117
118 /* De-alloc array of command/tx buffers */
119 for (i = 0; i < TFD_TX_CMD_SLOTS; i++)
120 kfree(txq->cmd[i]);
121
122 /* De-alloc circular buffer of TFDs */
123 if (txq->q.n_bd)
124 dma_free_coherent(dev, priv->hw_params.tfd_size *
125 txq->q.n_bd, txq->tfds, txq->q.dma_addr);
126
127 /* De-alloc array of per-TFD driver data */
128 kfree(txq->txb);
129 txq->txb = NULL;
130
131 /* deallocate arrays */
132 kfree(txq->cmd);
133 kfree(txq->meta);
134 txq->cmd = NULL;
135 txq->meta = NULL;
136
137 /* 0-fill queue descriptor structure */
138 memset(txq, 0, sizeof(*txq));
139}
140EXPORT_SYMBOL(iwl_legacy_tx_queue_free);
141
142/**
143 * iwl_cmd_queue_unmap - Unmap any remaining DMA mappings from command queue
144 */
145void iwl_legacy_cmd_queue_unmap(struct iwl_priv *priv)
146{
147 struct iwl_tx_queue *txq = &priv->txq[priv->cmd_queue];
148 struct iwl_queue *q = &txq->q;
149 int i;
150
151 if (q->n_bd == 0)
152 return;
153
154 while (q->read_ptr != q->write_ptr) {
155 i = iwl_legacy_get_cmd_index(q, q->read_ptr, 0);
156
157 if (txq->meta[i].flags & CMD_MAPPED) {
158 pci_unmap_single(priv->pci_dev,
159 dma_unmap_addr(&txq->meta[i], mapping),
160 dma_unmap_len(&txq->meta[i], len),
161 PCI_DMA_BIDIRECTIONAL);
162 txq->meta[i].flags = 0;
163 }
164
165 q->read_ptr = iwl_legacy_queue_inc_wrap(q->read_ptr, q->n_bd);
166 }
167
168 i = q->n_window;
169 if (txq->meta[i].flags & CMD_MAPPED) {
170 pci_unmap_single(priv->pci_dev,
171 dma_unmap_addr(&txq->meta[i], mapping),
172 dma_unmap_len(&txq->meta[i], len),
173 PCI_DMA_BIDIRECTIONAL);
174 txq->meta[i].flags = 0;
175 }
176}
177EXPORT_SYMBOL(iwl_legacy_cmd_queue_unmap);
178
179/**
180 * iwl_legacy_cmd_queue_free - Deallocate DMA queue.
181 * @txq: Transmit queue to deallocate.
182 *
183 * Empty queue by removing and destroying all BD's.
184 * Free all buffers.
185 * 0-fill, but do not free "txq" descriptor structure.
186 */
187void iwl_legacy_cmd_queue_free(struct iwl_priv *priv)
188{
189 struct iwl_tx_queue *txq = &priv->txq[priv->cmd_queue];
190 struct device *dev = &priv->pci_dev->dev;
191 int i;
192
193 iwl_legacy_cmd_queue_unmap(priv);
194
195 /* De-alloc array of command/tx buffers */
196 for (i = 0; i <= TFD_CMD_SLOTS; i++)
197 kfree(txq->cmd[i]);
198
199 /* De-alloc circular buffer of TFDs */
200 if (txq->q.n_bd)
201 dma_free_coherent(dev, priv->hw_params.tfd_size * txq->q.n_bd,
202 txq->tfds, txq->q.dma_addr);
203
204 /* deallocate arrays */
205 kfree(txq->cmd);
206 kfree(txq->meta);
207 txq->cmd = NULL;
208 txq->meta = NULL;
209
210 /* 0-fill queue descriptor structure */
211 memset(txq, 0, sizeof(*txq));
212}
213EXPORT_SYMBOL(iwl_legacy_cmd_queue_free);
214
215/*************** DMA-QUEUE-GENERAL-FUNCTIONS *****
216 * DMA services
217 *
218 * Theory of operation
219 *
220 * A Tx or Rx queue resides in host DRAM, and is comprised of a circular buffer
221 * of buffer descriptors, each of which points to one or more data buffers for
222 * the device to read from or fill. Driver and device exchange status of each
223 * queue via "read" and "write" pointers. Driver keeps minimum of 2 empty
224 * entries in each circular buffer, to protect against confusing empty and full
225 * queue states.
226 *
227 * The device reads or writes the data in the queues via the device's several
228 * DMA/FIFO channels. Each queue is mapped to a single DMA channel.
229 *
230 * For Tx queue, there are low mark and high mark limits. If, after queuing
231 * the packet for Tx, free space become < low mark, Tx queue stopped. When
232 * reclaiming packets (on 'tx done IRQ), if free space become > high mark,
233 * Tx queue resumed.
234 *
235 * See more detailed info in iwl-4965-hw.h.
236 ***************************************************/
237
238int iwl_legacy_queue_space(const struct iwl_queue *q)
239{
240 int s = q->read_ptr - q->write_ptr;
241
242 if (q->read_ptr > q->write_ptr)
243 s -= q->n_bd;
244
245 if (s <= 0)
246 s += q->n_window;
247 /* keep some reserve to not confuse empty and full situations */
248 s -= 2;
249 if (s < 0)
250 s = 0;
251 return s;
252}
253EXPORT_SYMBOL(iwl_legacy_queue_space);
254
255
256/**
257 * iwl_legacy_queue_init - Initialize queue's high/low-water and read/write indexes
258 */
259static int iwl_legacy_queue_init(struct iwl_priv *priv, struct iwl_queue *q,
260 int count, int slots_num, u32 id)
261{
262 q->n_bd = count;
263 q->n_window = slots_num;
264 q->id = id;
265
266 /* count must be power-of-two size, otherwise iwl_legacy_queue_inc_wrap
267 * and iwl_legacy_queue_dec_wrap are broken. */
268 BUG_ON(!is_power_of_2(count));
269
270 /* slots_num must be power-of-two size, otherwise
271 * iwl_legacy_get_cmd_index is broken. */
272 BUG_ON(!is_power_of_2(slots_num));
273
274 q->low_mark = q->n_window / 4;
275 if (q->low_mark < 4)
276 q->low_mark = 4;
277
278 q->high_mark = q->n_window / 8;
279 if (q->high_mark < 2)
280 q->high_mark = 2;
281
282 q->write_ptr = q->read_ptr = 0;
283
284 return 0;
285}
286
287/**
288 * iwl_legacy_tx_queue_alloc - Alloc driver data and TFD CB for one Tx/cmd queue
289 */
290static int iwl_legacy_tx_queue_alloc(struct iwl_priv *priv,
291 struct iwl_tx_queue *txq, u32 id)
292{
293 struct device *dev = &priv->pci_dev->dev;
294 size_t tfd_sz = priv->hw_params.tfd_size * TFD_QUEUE_SIZE_MAX;
295
296 /* Driver private data, only for Tx (not command) queues,
297 * not shared with device. */
298 if (id != priv->cmd_queue) {
299 txq->txb = kzalloc(sizeof(txq->txb[0]) *
300 TFD_QUEUE_SIZE_MAX, GFP_KERNEL);
301 if (!txq->txb) {
302 IWL_ERR(priv, "kmalloc for auxiliary BD "
303 "structures failed\n");
304 goto error;
305 }
306 } else {
307 txq->txb = NULL;
308 }
309
310 /* Circular buffer of transmit frame descriptors (TFDs),
311 * shared with device */
312 txq->tfds = dma_alloc_coherent(dev, tfd_sz, &txq->q.dma_addr,
313 GFP_KERNEL);
314 if (!txq->tfds) {
315 IWL_ERR(priv, "pci_alloc_consistent(%zd) failed\n", tfd_sz);
316 goto error;
317 }
318 txq->q.id = id;
319
320 return 0;
321
322 error:
323 kfree(txq->txb);
324 txq->txb = NULL;
325
326 return -ENOMEM;
327}
328
329/**
330 * iwl_legacy_tx_queue_init - Allocate and initialize one tx/cmd queue
331 */
332int iwl_legacy_tx_queue_init(struct iwl_priv *priv, struct iwl_tx_queue *txq,
333 int slots_num, u32 txq_id)
334{
335 int i, len;
336 int ret;
337 int actual_slots = slots_num;
338
339 /*
340 * Alloc buffer array for commands (Tx or other types of commands).
341 * For the command queue (#4/#9), allocate command space + one big
342 * command for scan, since scan command is very huge; the system will
343 * not have two scans at the same time, so only one is needed.
344 * For normal Tx queues (all other queues), no super-size command
345 * space is needed.
346 */
347 if (txq_id == priv->cmd_queue)
348 actual_slots++;
349
350 txq->meta = kzalloc(sizeof(struct iwl_cmd_meta) * actual_slots,
351 GFP_KERNEL);
352 txq->cmd = kzalloc(sizeof(struct iwl_device_cmd *) * actual_slots,
353 GFP_KERNEL);
354
355 if (!txq->meta || !txq->cmd)
356 goto out_free_arrays;
357
358 len = sizeof(struct iwl_device_cmd);
359 for (i = 0; i < actual_slots; i++) {
360 /* only happens for cmd queue */
361 if (i == slots_num)
362 len = IWL_MAX_CMD_SIZE;
363
364 txq->cmd[i] = kmalloc(len, GFP_KERNEL);
365 if (!txq->cmd[i])
366 goto err;
367 }
368
369 /* Alloc driver data array and TFD circular buffer */
370 ret = iwl_legacy_tx_queue_alloc(priv, txq, txq_id);
371 if (ret)
372 goto err;
373
374 txq->need_update = 0;
375
376 /*
377 * For the default queues 0-3, set up the swq_id
378 * already -- all others need to get one later
379 * (if they need one at all).
380 */
381 if (txq_id < 4)
382 iwl_legacy_set_swq_id(txq, txq_id, txq_id);
383
384 /* TFD_QUEUE_SIZE_MAX must be power-of-two size, otherwise
385 * iwl_legacy_queue_inc_wrap and iwl_legacy_queue_dec_wrap are broken. */
386 BUILD_BUG_ON(TFD_QUEUE_SIZE_MAX & (TFD_QUEUE_SIZE_MAX - 1));
387
388 /* Initialize queue's high/low-water marks, and head/tail indexes */
389 iwl_legacy_queue_init(priv, &txq->q,
390 TFD_QUEUE_SIZE_MAX, slots_num, txq_id);
391
392 /* Tell device where to find queue */
393 priv->cfg->ops->lib->txq_init(priv, txq);
394
395 return 0;
396err:
397 for (i = 0; i < actual_slots; i++)
398 kfree(txq->cmd[i]);
399out_free_arrays:
400 kfree(txq->meta);
401 kfree(txq->cmd);
402
403 return -ENOMEM;
404}
405EXPORT_SYMBOL(iwl_legacy_tx_queue_init);
406
407void iwl_legacy_tx_queue_reset(struct iwl_priv *priv, struct iwl_tx_queue *txq,
408 int slots_num, u32 txq_id)
409{
410 int actual_slots = slots_num;
411
412 if (txq_id == priv->cmd_queue)
413 actual_slots++;
414
415 memset(txq->meta, 0, sizeof(struct iwl_cmd_meta) * actual_slots);
416
417 txq->need_update = 0;
418
419 /* Initialize queue's high/low-water marks, and head/tail indexes */
420 iwl_legacy_queue_init(priv, &txq->q,
421 TFD_QUEUE_SIZE_MAX, slots_num, txq_id);
422
423 /* Tell device where to find queue */
424 priv->cfg->ops->lib->txq_init(priv, txq);
425}
426EXPORT_SYMBOL(iwl_legacy_tx_queue_reset);
427
428/*************** HOST COMMAND QUEUE FUNCTIONS *****/
429
430/**
431 * iwl_legacy_enqueue_hcmd - enqueue a uCode command
432 * @priv: device private data point
433 * @cmd: a point to the ucode command structure
434 *
435 * The function returns < 0 values to indicate the operation is
436 * failed. On success, it turns the index (> 0) of command in the
437 * command queue.
438 */
439int iwl_legacy_enqueue_hcmd(struct iwl_priv *priv, struct iwl_host_cmd *cmd)
440{
441 struct iwl_tx_queue *txq = &priv->txq[priv->cmd_queue];
442 struct iwl_queue *q = &txq->q;
443 struct iwl_device_cmd *out_cmd;
444 struct iwl_cmd_meta *out_meta;
445 dma_addr_t phys_addr;
446 unsigned long flags;
447 int len;
448 u32 idx;
449 u16 fix_size;
450
451 cmd->len = priv->cfg->ops->utils->get_hcmd_size(cmd->id, cmd->len);
452 fix_size = (u16)(cmd->len + sizeof(out_cmd->hdr));
453
454 /* If any of the command structures end up being larger than
455 * the TFD_MAX_PAYLOAD_SIZE, and it sent as a 'small' command then
456 * we will need to increase the size of the TFD entries
457 * Also, check to see if command buffer should not exceed the size
458 * of device_cmd and max_cmd_size. */
459 BUG_ON((fix_size > TFD_MAX_PAYLOAD_SIZE) &&
460 !(cmd->flags & CMD_SIZE_HUGE));
461 BUG_ON(fix_size > IWL_MAX_CMD_SIZE);
462
463 if (iwl_legacy_is_rfkill(priv) || iwl_legacy_is_ctkill(priv)) {
464 IWL_WARN(priv, "Not sending command - %s KILL\n",
465 iwl_legacy_is_rfkill(priv) ? "RF" : "CT");
466 return -EIO;
467 }
468
469 spin_lock_irqsave(&priv->hcmd_lock, flags);
470
471 if (iwl_legacy_queue_space(q) < ((cmd->flags & CMD_ASYNC) ? 2 : 1)) {
472 spin_unlock_irqrestore(&priv->hcmd_lock, flags);
473
474 IWL_ERR(priv, "Restarting adapter due to command queue full\n");
475 queue_work(priv->workqueue, &priv->restart);
476 return -ENOSPC;
477 }
478
479 idx = iwl_legacy_get_cmd_index(q, q->write_ptr, cmd->flags & CMD_SIZE_HUGE);
480 out_cmd = txq->cmd[idx];
481 out_meta = &txq->meta[idx];
482
483 if (WARN_ON(out_meta->flags & CMD_MAPPED)) {
484 spin_unlock_irqrestore(&priv->hcmd_lock, flags);
485 return -ENOSPC;
486 }
487
488 memset(out_meta, 0, sizeof(*out_meta)); /* re-initialize to NULL */
489 out_meta->flags = cmd->flags | CMD_MAPPED;
490 if (cmd->flags & CMD_WANT_SKB)
491 out_meta->source = cmd;
492 if (cmd->flags & CMD_ASYNC)
493 out_meta->callback = cmd->callback;
494
495 out_cmd->hdr.cmd = cmd->id;
496 memcpy(&out_cmd->cmd.payload, cmd->data, cmd->len);
497
498 /* At this point, the out_cmd now has all of the incoming cmd
499 * information */
500
501 out_cmd->hdr.flags = 0;
502 out_cmd->hdr.sequence = cpu_to_le16(QUEUE_TO_SEQ(priv->cmd_queue) |
503 INDEX_TO_SEQ(q->write_ptr));
504 if (cmd->flags & CMD_SIZE_HUGE)
505 out_cmd->hdr.sequence |= SEQ_HUGE_FRAME;
506 len = sizeof(struct iwl_device_cmd);
507 if (idx == TFD_CMD_SLOTS)
508 len = IWL_MAX_CMD_SIZE;
509
510#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
511 switch (out_cmd->hdr.cmd) {
512 case REPLY_TX_LINK_QUALITY_CMD:
513 case SENSITIVITY_CMD:
514 IWL_DEBUG_HC_DUMP(priv,
515 "Sending command %s (#%x), seq: 0x%04X, "
516 "%d bytes at %d[%d]:%d\n",
517 iwl_legacy_get_cmd_string(out_cmd->hdr.cmd),
518 out_cmd->hdr.cmd,
519 le16_to_cpu(out_cmd->hdr.sequence), fix_size,
520 q->write_ptr, idx, priv->cmd_queue);
521 break;
522 default:
523 IWL_DEBUG_HC(priv, "Sending command %s (#%x), seq: 0x%04X, "
524 "%d bytes at %d[%d]:%d\n",
525 iwl_legacy_get_cmd_string(out_cmd->hdr.cmd),
526 out_cmd->hdr.cmd,
527 le16_to_cpu(out_cmd->hdr.sequence), fix_size,
528 q->write_ptr, idx, priv->cmd_queue);
529 }
530#endif
531 txq->need_update = 1;
532
533 if (priv->cfg->ops->lib->txq_update_byte_cnt_tbl)
534 /* Set up entry in queue's byte count circular buffer */
535 priv->cfg->ops->lib->txq_update_byte_cnt_tbl(priv, txq, 0);
536
537 phys_addr = pci_map_single(priv->pci_dev, &out_cmd->hdr,
538 fix_size, PCI_DMA_BIDIRECTIONAL);
539 dma_unmap_addr_set(out_meta, mapping, phys_addr);
540 dma_unmap_len_set(out_meta, len, fix_size);
541
542 trace_iwlwifi_legacy_dev_hcmd(priv, &out_cmd->hdr,
543 fix_size, cmd->flags);
544
545 priv->cfg->ops->lib->txq_attach_buf_to_tfd(priv, txq,
546 phys_addr, fix_size, 1,
547 U32_PAD(cmd->len));
548
549 /* Increment and update queue's write index */
550 q->write_ptr = iwl_legacy_queue_inc_wrap(q->write_ptr, q->n_bd);
551 iwl_legacy_txq_update_write_ptr(priv, txq);
552
553 spin_unlock_irqrestore(&priv->hcmd_lock, flags);
554 return idx;
555}
556
557/**
558 * iwl_legacy_hcmd_queue_reclaim - Reclaim TX command queue entries already Tx'd
559 *
560 * When FW advances 'R' index, all entries between old and new 'R' index
561 * need to be reclaimed. As result, some free space forms. If there is
562 * enough free space (> low mark), wake the stack that feeds us.
563 */
564static void iwl_legacy_hcmd_queue_reclaim(struct iwl_priv *priv, int txq_id,
565 int idx, int cmd_idx)
566{
567 struct iwl_tx_queue *txq = &priv->txq[txq_id];
568 struct iwl_queue *q = &txq->q;
569 int nfreed = 0;
570
571 if ((idx >= q->n_bd) || (iwl_legacy_queue_used(q, idx) == 0)) {
572 IWL_ERR(priv, "Read index for DMA queue txq id (%d), index %d, "
573 "is out of range [0-%d] %d %d.\n", txq_id,
574 idx, q->n_bd, q->write_ptr, q->read_ptr);
575 return;
576 }
577
578 for (idx = iwl_legacy_queue_inc_wrap(idx, q->n_bd); q->read_ptr != idx;
579 q->read_ptr = iwl_legacy_queue_inc_wrap(q->read_ptr, q->n_bd)) {
580
581 if (nfreed++ > 0) {
582 IWL_ERR(priv, "HCMD skipped: index (%d) %d %d\n", idx,
583 q->write_ptr, q->read_ptr);
584 queue_work(priv->workqueue, &priv->restart);
585 }
586
587 }
588}
589
590/**
591 * iwl_legacy_tx_cmd_complete - Pull unused buffers off the queue and reclaim them
592 * @rxb: Rx buffer to reclaim
593 *
594 * If an Rx buffer has an async callback associated with it the callback
595 * will be executed. The attached skb (if present) will only be freed
596 * if the callback returns 1
597 */
598void
599iwl_legacy_tx_cmd_complete(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb)
600{
601 struct iwl_rx_packet *pkt = rxb_addr(rxb);
602 u16 sequence = le16_to_cpu(pkt->hdr.sequence);
603 int txq_id = SEQ_TO_QUEUE(sequence);
604 int index = SEQ_TO_INDEX(sequence);
605 int cmd_index;
606 bool huge = !!(pkt->hdr.sequence & SEQ_HUGE_FRAME);
607 struct iwl_device_cmd *cmd;
608 struct iwl_cmd_meta *meta;
609 struct iwl_tx_queue *txq = &priv->txq[priv->cmd_queue];
610 unsigned long flags;
611
612 /* If a Tx command is being handled and it isn't in the actual
613 * command queue then there a command routing bug has been introduced
614 * in the queue management code. */
615 if (WARN(txq_id != priv->cmd_queue,
616 "wrong command queue %d (should be %d), sequence 0x%X readp=%d writep=%d\n",
617 txq_id, priv->cmd_queue, sequence,
618 priv->txq[priv->cmd_queue].q.read_ptr,
619 priv->txq[priv->cmd_queue].q.write_ptr)) {
620 iwl_print_hex_error(priv, pkt, 32);
621 return;
622 }
623
624 cmd_index = iwl_legacy_get_cmd_index(&txq->q, index, huge);
625 cmd = txq->cmd[cmd_index];
626 meta = &txq->meta[cmd_index];
627
628 txq->time_stamp = jiffies;
629
630 pci_unmap_single(priv->pci_dev,
631 dma_unmap_addr(meta, mapping),
632 dma_unmap_len(meta, len),
633 PCI_DMA_BIDIRECTIONAL);
634
635 /* Input error checking is done when commands are added to queue. */
636 if (meta->flags & CMD_WANT_SKB) {
637 meta->source->reply_page = (unsigned long)rxb_addr(rxb);
638 rxb->page = NULL;
639 } else if (meta->callback)
640 meta->callback(priv, cmd, pkt);
641
642 spin_lock_irqsave(&priv->hcmd_lock, flags);
643
644 iwl_legacy_hcmd_queue_reclaim(priv, txq_id, index, cmd_index);
645
646 if (!(meta->flags & CMD_ASYNC)) {
647 clear_bit(STATUS_HCMD_ACTIVE, &priv->status);
648 IWL_DEBUG_INFO(priv, "Clearing HCMD_ACTIVE for command %s\n",
649 iwl_legacy_get_cmd_string(cmd->hdr.cmd));
650 wake_up(&priv->wait_command_queue);
651 }
652
653 /* Mark as unmapped */
654 meta->flags = 0;
655
656 spin_unlock_irqrestore(&priv->hcmd_lock, flags);
657}
658EXPORT_SYMBOL(iwl_legacy_tx_cmd_complete);
diff --git a/drivers/net/wireless/iwlegacy/iwl3945-base.c b/drivers/net/wireless/iwlegacy/iwl3945-base.c
new file mode 100644
index 00000000000..66ee15629a7
--- /dev/null
+++ b/drivers/net/wireless/iwlegacy/iwl3945-base.c
@@ -0,0 +1,4017 @@
1/******************************************************************************
2 *
3 * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved.
4 *
5 * Portions of this file are derived from the ipw3945 project, as well
6 * as portions of the ieee80211 subsystem header files.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of version 2 of the GNU General Public License as
10 * published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it will be useful, but WITHOUT
13 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 * more details.
16 *
17 * You should have received a copy of the GNU General Public License along with
18 * this program; if not, write to the Free Software Foundation, Inc.,
19 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
20 *
21 * The full GNU General Public License is included in this distribution in the
22 * file called LICENSE.
23 *
24 * Contact Information:
25 * Intel Linux Wireless <ilw@linux.intel.com>
26 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
27 *
28 *****************************************************************************/
29
30#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
31
32#include <linux/kernel.h>
33#include <linux/module.h>
34#include <linux/init.h>
35#include <linux/pci.h>
36#include <linux/pci-aspm.h>
37#include <linux/slab.h>
38#include <linux/dma-mapping.h>
39#include <linux/delay.h>
40#include <linux/sched.h>
41#include <linux/skbuff.h>
42#include <linux/netdevice.h>
43#include <linux/wireless.h>
44#include <linux/firmware.h>
45#include <linux/etherdevice.h>
46#include <linux/if_arp.h>
47
48#include <net/ieee80211_radiotap.h>
49#include <net/mac80211.h>
50
51#include <asm/div64.h>
52
53#define DRV_NAME "iwl3945"
54
55#include "iwl-fh.h"
56#include "iwl-3945-fh.h"
57#include "iwl-commands.h"
58#include "iwl-sta.h"
59#include "iwl-3945.h"
60#include "iwl-core.h"
61#include "iwl-helpers.h"
62#include "iwl-dev.h"
63#include "iwl-spectrum.h"
64
65/*
66 * module name, copyright, version, etc.
67 */
68
69#define DRV_DESCRIPTION \
70"Intel(R) PRO/Wireless 3945ABG/BG Network Connection driver for Linux"
71
72#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
73#define VD "d"
74#else
75#define VD
76#endif
77
78/*
79 * add "s" to indicate spectrum measurement included.
80 * we add it here to be consistent with previous releases in which
81 * this was configurable.
82 */
83#define DRV_VERSION IWLWIFI_VERSION VD "s"
84#define DRV_COPYRIGHT "Copyright(c) 2003-2011 Intel Corporation"
85#define DRV_AUTHOR "<ilw@linux.intel.com>"
86
87MODULE_DESCRIPTION(DRV_DESCRIPTION);
88MODULE_VERSION(DRV_VERSION);
89MODULE_AUTHOR(DRV_COPYRIGHT " " DRV_AUTHOR);
90MODULE_LICENSE("GPL");
91
92 /* module parameters */
93struct iwl_mod_params iwl3945_mod_params = {
94 .sw_crypto = 1,
95 .restart_fw = 1,
96 .disable_hw_scan = 1,
97 /* the rest are 0 by default */
98};
99
100/**
101 * iwl3945_get_antenna_flags - Get antenna flags for RXON command
102 * @priv: eeprom and antenna fields are used to determine antenna flags
103 *
104 * priv->eeprom39 is used to determine if antenna AUX/MAIN are reversed
105 * iwl3945_mod_params.antenna specifies the antenna diversity mode:
106 *
107 * IWL_ANTENNA_DIVERSITY - NIC selects best antenna by itself
108 * IWL_ANTENNA_MAIN - Force MAIN antenna
109 * IWL_ANTENNA_AUX - Force AUX antenna
110 */
111__le32 iwl3945_get_antenna_flags(const struct iwl_priv *priv)
112{
113 struct iwl3945_eeprom *eeprom = (struct iwl3945_eeprom *)priv->eeprom;
114
115 switch (iwl3945_mod_params.antenna) {
116 case IWL_ANTENNA_DIVERSITY:
117 return 0;
118
119 case IWL_ANTENNA_MAIN:
120 if (eeprom->antenna_switch_type)
121 return RXON_FLG_DIS_DIV_MSK | RXON_FLG_ANT_B_MSK;
122 return RXON_FLG_DIS_DIV_MSK | RXON_FLG_ANT_A_MSK;
123
124 case IWL_ANTENNA_AUX:
125 if (eeprom->antenna_switch_type)
126 return RXON_FLG_DIS_DIV_MSK | RXON_FLG_ANT_A_MSK;
127 return RXON_FLG_DIS_DIV_MSK | RXON_FLG_ANT_B_MSK;
128 }
129
130 /* bad antenna selector value */
131 IWL_ERR(priv, "Bad antenna selector value (0x%x)\n",
132 iwl3945_mod_params.antenna);
133
134 return 0; /* "diversity" is default if error */
135}
136
137static int iwl3945_set_ccmp_dynamic_key_info(struct iwl_priv *priv,
138 struct ieee80211_key_conf *keyconf,
139 u8 sta_id)
140{
141 unsigned long flags;
142 __le16 key_flags = 0;
143 int ret;
144
145 key_flags |= (STA_KEY_FLG_CCMP | STA_KEY_FLG_MAP_KEY_MSK);
146 key_flags |= cpu_to_le16(keyconf->keyidx << STA_KEY_FLG_KEYID_POS);
147
148 if (sta_id == priv->contexts[IWL_RXON_CTX_BSS].bcast_sta_id)
149 key_flags |= STA_KEY_MULTICAST_MSK;
150
151 keyconf->flags |= IEEE80211_KEY_FLAG_GENERATE_IV;
152 keyconf->hw_key_idx = keyconf->keyidx;
153 key_flags &= ~STA_KEY_FLG_INVALID;
154
155 spin_lock_irqsave(&priv->sta_lock, flags);
156 priv->stations[sta_id].keyinfo.cipher = keyconf->cipher;
157 priv->stations[sta_id].keyinfo.keylen = keyconf->keylen;
158 memcpy(priv->stations[sta_id].keyinfo.key, keyconf->key,
159 keyconf->keylen);
160
161 memcpy(priv->stations[sta_id].sta.key.key, keyconf->key,
162 keyconf->keylen);
163
164 if ((priv->stations[sta_id].sta.key.key_flags & STA_KEY_FLG_ENCRYPT_MSK)
165 == STA_KEY_FLG_NO_ENC)
166 priv->stations[sta_id].sta.key.key_offset =
167 iwl_legacy_get_free_ucode_key_index(priv);
168 /* else, we are overriding an existing key => no need to allocated room
169 * in uCode. */
170
171 WARN(priv->stations[sta_id].sta.key.key_offset == WEP_INVALID_OFFSET,
172 "no space for a new key");
173
174 priv->stations[sta_id].sta.key.key_flags = key_flags;
175 priv->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_KEY_MASK;
176 priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
177
178 IWL_DEBUG_INFO(priv, "hwcrypto: modify ucode station key info\n");
179
180 ret = iwl_legacy_send_add_sta(priv,
181 &priv->stations[sta_id].sta, CMD_ASYNC);
182
183 spin_unlock_irqrestore(&priv->sta_lock, flags);
184
185 return ret;
186}
187
188static int iwl3945_set_tkip_dynamic_key_info(struct iwl_priv *priv,
189 struct ieee80211_key_conf *keyconf,
190 u8 sta_id)
191{
192 return -EOPNOTSUPP;
193}
194
195static int iwl3945_set_wep_dynamic_key_info(struct iwl_priv *priv,
196 struct ieee80211_key_conf *keyconf,
197 u8 sta_id)
198{
199 return -EOPNOTSUPP;
200}
201
202static int iwl3945_clear_sta_key_info(struct iwl_priv *priv, u8 sta_id)
203{
204 unsigned long flags;
205 struct iwl_legacy_addsta_cmd sta_cmd;
206
207 spin_lock_irqsave(&priv->sta_lock, flags);
208 memset(&priv->stations[sta_id].keyinfo, 0, sizeof(struct iwl_hw_key));
209 memset(&priv->stations[sta_id].sta.key, 0,
210 sizeof(struct iwl4965_keyinfo));
211 priv->stations[sta_id].sta.key.key_flags = STA_KEY_FLG_NO_ENC;
212 priv->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_KEY_MASK;
213 priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
214 memcpy(&sta_cmd, &priv->stations[sta_id].sta, sizeof(struct iwl_legacy_addsta_cmd));
215 spin_unlock_irqrestore(&priv->sta_lock, flags);
216
217 IWL_DEBUG_INFO(priv, "hwcrypto: clear ucode station key info\n");
218 return iwl_legacy_send_add_sta(priv, &sta_cmd, CMD_SYNC);
219}
220
221static int iwl3945_set_dynamic_key(struct iwl_priv *priv,
222 struct ieee80211_key_conf *keyconf, u8 sta_id)
223{
224 int ret = 0;
225
226 keyconf->hw_key_idx = HW_KEY_DYNAMIC;
227
228 switch (keyconf->cipher) {
229 case WLAN_CIPHER_SUITE_CCMP:
230 ret = iwl3945_set_ccmp_dynamic_key_info(priv, keyconf, sta_id);
231 break;
232 case WLAN_CIPHER_SUITE_TKIP:
233 ret = iwl3945_set_tkip_dynamic_key_info(priv, keyconf, sta_id);
234 break;
235 case WLAN_CIPHER_SUITE_WEP40:
236 case WLAN_CIPHER_SUITE_WEP104:
237 ret = iwl3945_set_wep_dynamic_key_info(priv, keyconf, sta_id);
238 break;
239 default:
240 IWL_ERR(priv, "Unknown alg: %s alg=%x\n", __func__,
241 keyconf->cipher);
242 ret = -EINVAL;
243 }
244
245 IWL_DEBUG_WEP(priv, "Set dynamic key: alg=%x len=%d idx=%d sta=%d ret=%d\n",
246 keyconf->cipher, keyconf->keylen, keyconf->keyidx,
247 sta_id, ret);
248
249 return ret;
250}
251
252static int iwl3945_remove_static_key(struct iwl_priv *priv)
253{
254 int ret = -EOPNOTSUPP;
255
256 return ret;
257}
258
259static int iwl3945_set_static_key(struct iwl_priv *priv,
260 struct ieee80211_key_conf *key)
261{
262 if (key->cipher == WLAN_CIPHER_SUITE_WEP40 ||
263 key->cipher == WLAN_CIPHER_SUITE_WEP104)
264 return -EOPNOTSUPP;
265
266 IWL_ERR(priv, "Static key invalid: cipher %x\n", key->cipher);
267 return -EINVAL;
268}
269
270static void iwl3945_clear_free_frames(struct iwl_priv *priv)
271{
272 struct list_head *element;
273
274 IWL_DEBUG_INFO(priv, "%d frames on pre-allocated heap on clear.\n",
275 priv->frames_count);
276
277 while (!list_empty(&priv->free_frames)) {
278 element = priv->free_frames.next;
279 list_del(element);
280 kfree(list_entry(element, struct iwl3945_frame, list));
281 priv->frames_count--;
282 }
283
284 if (priv->frames_count) {
285 IWL_WARN(priv, "%d frames still in use. Did we lose one?\n",
286 priv->frames_count);
287 priv->frames_count = 0;
288 }
289}
290
291static struct iwl3945_frame *iwl3945_get_free_frame(struct iwl_priv *priv)
292{
293 struct iwl3945_frame *frame;
294 struct list_head *element;
295 if (list_empty(&priv->free_frames)) {
296 frame = kzalloc(sizeof(*frame), GFP_KERNEL);
297 if (!frame) {
298 IWL_ERR(priv, "Could not allocate frame!\n");
299 return NULL;
300 }
301
302 priv->frames_count++;
303 return frame;
304 }
305
306 element = priv->free_frames.next;
307 list_del(element);
308 return list_entry(element, struct iwl3945_frame, list);
309}
310
311static void iwl3945_free_frame(struct iwl_priv *priv, struct iwl3945_frame *frame)
312{
313 memset(frame, 0, sizeof(*frame));
314 list_add(&frame->list, &priv->free_frames);
315}
316
317unsigned int iwl3945_fill_beacon_frame(struct iwl_priv *priv,
318 struct ieee80211_hdr *hdr,
319 int left)
320{
321
322 if (!iwl_legacy_is_associated(priv, IWL_RXON_CTX_BSS) || !priv->beacon_skb)
323 return 0;
324
325 if (priv->beacon_skb->len > left)
326 return 0;
327
328 memcpy(hdr, priv->beacon_skb->data, priv->beacon_skb->len);
329
330 return priv->beacon_skb->len;
331}
332
333static int iwl3945_send_beacon_cmd(struct iwl_priv *priv)
334{
335 struct iwl3945_frame *frame;
336 unsigned int frame_size;
337 int rc;
338 u8 rate;
339
340 frame = iwl3945_get_free_frame(priv);
341
342 if (!frame) {
343 IWL_ERR(priv, "Could not obtain free frame buffer for beacon "
344 "command.\n");
345 return -ENOMEM;
346 }
347
348 rate = iwl_legacy_get_lowest_plcp(priv,
349 &priv->contexts[IWL_RXON_CTX_BSS]);
350
351 frame_size = iwl3945_hw_get_beacon_cmd(priv, frame, rate);
352
353 rc = iwl_legacy_send_cmd_pdu(priv, REPLY_TX_BEACON, frame_size,
354 &frame->u.cmd[0]);
355
356 iwl3945_free_frame(priv, frame);
357
358 return rc;
359}
360
361static void iwl3945_unset_hw_params(struct iwl_priv *priv)
362{
363 if (priv->_3945.shared_virt)
364 dma_free_coherent(&priv->pci_dev->dev,
365 sizeof(struct iwl3945_shared),
366 priv->_3945.shared_virt,
367 priv->_3945.shared_phys);
368}
369
370static void iwl3945_build_tx_cmd_hwcrypto(struct iwl_priv *priv,
371 struct ieee80211_tx_info *info,
372 struct iwl_device_cmd *cmd,
373 struct sk_buff *skb_frag,
374 int sta_id)
375{
376 struct iwl3945_tx_cmd *tx_cmd = (struct iwl3945_tx_cmd *)cmd->cmd.payload;
377 struct iwl_hw_key *keyinfo = &priv->stations[sta_id].keyinfo;
378
379 tx_cmd->sec_ctl = 0;
380
381 switch (keyinfo->cipher) {
382 case WLAN_CIPHER_SUITE_CCMP:
383 tx_cmd->sec_ctl = TX_CMD_SEC_CCM;
384 memcpy(tx_cmd->key, keyinfo->key, keyinfo->keylen);
385 IWL_DEBUG_TX(priv, "tx_cmd with AES hwcrypto\n");
386 break;
387
388 case WLAN_CIPHER_SUITE_TKIP:
389 break;
390
391 case WLAN_CIPHER_SUITE_WEP104:
392 tx_cmd->sec_ctl |= TX_CMD_SEC_KEY128;
393 /* fall through */
394 case WLAN_CIPHER_SUITE_WEP40:
395 tx_cmd->sec_ctl |= TX_CMD_SEC_WEP |
396 (info->control.hw_key->hw_key_idx & TX_CMD_SEC_MSK) << TX_CMD_SEC_SHIFT;
397
398 memcpy(&tx_cmd->key[3], keyinfo->key, keyinfo->keylen);
399
400 IWL_DEBUG_TX(priv, "Configuring packet for WEP encryption "
401 "with key %d\n", info->control.hw_key->hw_key_idx);
402 break;
403
404 default:
405 IWL_ERR(priv, "Unknown encode cipher %x\n", keyinfo->cipher);
406 break;
407 }
408}
409
410/*
411 * handle build REPLY_TX command notification.
412 */
413static void iwl3945_build_tx_cmd_basic(struct iwl_priv *priv,
414 struct iwl_device_cmd *cmd,
415 struct ieee80211_tx_info *info,
416 struct ieee80211_hdr *hdr, u8 std_id)
417{
418 struct iwl3945_tx_cmd *tx_cmd = (struct iwl3945_tx_cmd *)cmd->cmd.payload;
419 __le32 tx_flags = tx_cmd->tx_flags;
420 __le16 fc = hdr->frame_control;
421
422 tx_cmd->stop_time.life_time = TX_CMD_LIFE_TIME_INFINITE;
423 if (!(info->flags & IEEE80211_TX_CTL_NO_ACK)) {
424 tx_flags |= TX_CMD_FLG_ACK_MSK;
425 if (ieee80211_is_mgmt(fc))
426 tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK;
427 if (ieee80211_is_probe_resp(fc) &&
428 !(le16_to_cpu(hdr->seq_ctrl) & 0xf))
429 tx_flags |= TX_CMD_FLG_TSF_MSK;
430 } else {
431 tx_flags &= (~TX_CMD_FLG_ACK_MSK);
432 tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK;
433 }
434
435 tx_cmd->sta_id = std_id;
436 if (ieee80211_has_morefrags(fc))
437 tx_flags |= TX_CMD_FLG_MORE_FRAG_MSK;
438
439 if (ieee80211_is_data_qos(fc)) {
440 u8 *qc = ieee80211_get_qos_ctl(hdr);
441 tx_cmd->tid_tspec = qc[0] & 0xf;
442 tx_flags &= ~TX_CMD_FLG_SEQ_CTL_MSK;
443 } else {
444 tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK;
445 }
446
447 iwl_legacy_tx_cmd_protection(priv, info, fc, &tx_flags);
448
449 tx_flags &= ~(TX_CMD_FLG_ANT_SEL_MSK);
450 if (ieee80211_is_mgmt(fc)) {
451 if (ieee80211_is_assoc_req(fc) || ieee80211_is_reassoc_req(fc))
452 tx_cmd->timeout.pm_frame_timeout = cpu_to_le16(3);
453 else
454 tx_cmd->timeout.pm_frame_timeout = cpu_to_le16(2);
455 } else {
456 tx_cmd->timeout.pm_frame_timeout = 0;
457 }
458
459 tx_cmd->driver_txop = 0;
460 tx_cmd->tx_flags = tx_flags;
461 tx_cmd->next_frame_len = 0;
462}
463
464/*
465 * start REPLY_TX command process
466 */
467static int iwl3945_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
468{
469 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
470 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
471 struct iwl3945_tx_cmd *tx_cmd;
472 struct iwl_tx_queue *txq = NULL;
473 struct iwl_queue *q = NULL;
474 struct iwl_device_cmd *out_cmd;
475 struct iwl_cmd_meta *out_meta;
476 dma_addr_t phys_addr;
477 dma_addr_t txcmd_phys;
478 int txq_id = skb_get_queue_mapping(skb);
479 u16 len, idx, hdr_len;
480 u8 id;
481 u8 unicast;
482 u8 sta_id;
483 u8 tid = 0;
484 __le16 fc;
485 u8 wait_write_ptr = 0;
486 unsigned long flags;
487
488 spin_lock_irqsave(&priv->lock, flags);
489 if (iwl_legacy_is_rfkill(priv)) {
490 IWL_DEBUG_DROP(priv, "Dropping - RF KILL\n");
491 goto drop_unlock;
492 }
493
494 if ((ieee80211_get_tx_rate(priv->hw, info)->hw_value & 0xFF) == IWL_INVALID_RATE) {
495 IWL_ERR(priv, "ERROR: No TX rate available.\n");
496 goto drop_unlock;
497 }
498
499 unicast = !is_multicast_ether_addr(hdr->addr1);
500 id = 0;
501
502 fc = hdr->frame_control;
503
504#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
505 if (ieee80211_is_auth(fc))
506 IWL_DEBUG_TX(priv, "Sending AUTH frame\n");
507 else if (ieee80211_is_assoc_req(fc))
508 IWL_DEBUG_TX(priv, "Sending ASSOC frame\n");
509 else if (ieee80211_is_reassoc_req(fc))
510 IWL_DEBUG_TX(priv, "Sending REASSOC frame\n");
511#endif
512
513 spin_unlock_irqrestore(&priv->lock, flags);
514
515 hdr_len = ieee80211_hdrlen(fc);
516
517 /* Find index into station table for destination station */
518 sta_id = iwl_legacy_sta_id_or_broadcast(
519 priv, &priv->contexts[IWL_RXON_CTX_BSS],
520 info->control.sta);
521 if (sta_id == IWL_INVALID_STATION) {
522 IWL_DEBUG_DROP(priv, "Dropping - INVALID STATION: %pM\n",
523 hdr->addr1);
524 goto drop;
525 }
526
527 IWL_DEBUG_RATE(priv, "station Id %d\n", sta_id);
528
529 if (ieee80211_is_data_qos(fc)) {
530 u8 *qc = ieee80211_get_qos_ctl(hdr);
531 tid = qc[0] & IEEE80211_QOS_CTL_TID_MASK;
532 if (unlikely(tid >= MAX_TID_COUNT))
533 goto drop;
534 }
535
536 /* Descriptor for chosen Tx queue */
537 txq = &priv->txq[txq_id];
538 q = &txq->q;
539
540 if ((iwl_legacy_queue_space(q) < q->high_mark))
541 goto drop;
542
543 spin_lock_irqsave(&priv->lock, flags);
544
545 idx = iwl_legacy_get_cmd_index(q, q->write_ptr, 0);
546
547 /* Set up driver data for this TFD */
548 memset(&(txq->txb[q->write_ptr]), 0, sizeof(struct iwl_tx_info));
549 txq->txb[q->write_ptr].skb = skb;
550 txq->txb[q->write_ptr].ctx = &priv->contexts[IWL_RXON_CTX_BSS];
551
552 /* Init first empty entry in queue's array of Tx/cmd buffers */
553 out_cmd = txq->cmd[idx];
554 out_meta = &txq->meta[idx];
555 tx_cmd = (struct iwl3945_tx_cmd *)out_cmd->cmd.payload;
556 memset(&out_cmd->hdr, 0, sizeof(out_cmd->hdr));
557 memset(tx_cmd, 0, sizeof(*tx_cmd));
558
559 /*
560 * Set up the Tx-command (not MAC!) header.
561 * Store the chosen Tx queue and TFD index within the sequence field;
562 * after Tx, uCode's Tx response will return this value so driver can
563 * locate the frame within the tx queue and do post-tx processing.
564 */
565 out_cmd->hdr.cmd = REPLY_TX;
566 out_cmd->hdr.sequence = cpu_to_le16((u16)(QUEUE_TO_SEQ(txq_id) |
567 INDEX_TO_SEQ(q->write_ptr)));
568
569 /* Copy MAC header from skb into command buffer */
570 memcpy(tx_cmd->hdr, hdr, hdr_len);
571
572
573 if (info->control.hw_key)
574 iwl3945_build_tx_cmd_hwcrypto(priv, info, out_cmd, skb, sta_id);
575
576 /* TODO need this for burst mode later on */
577 iwl3945_build_tx_cmd_basic(priv, out_cmd, info, hdr, sta_id);
578
579 /* set is_hcca to 0; it probably will never be implemented */
580 iwl3945_hw_build_tx_cmd_rate(priv, out_cmd, info, hdr, sta_id, 0);
581
582 /* Total # bytes to be transmitted */
583 len = (u16)skb->len;
584 tx_cmd->len = cpu_to_le16(len);
585
586 iwl_legacy_dbg_log_tx_data_frame(priv, len, hdr);
587 iwl_legacy_update_stats(priv, true, fc, len);
588 tx_cmd->tx_flags &= ~TX_CMD_FLG_ANT_A_MSK;
589 tx_cmd->tx_flags &= ~TX_CMD_FLG_ANT_B_MSK;
590
591 if (!ieee80211_has_morefrags(hdr->frame_control)) {
592 txq->need_update = 1;
593 } else {
594 wait_write_ptr = 1;
595 txq->need_update = 0;
596 }
597
598 IWL_DEBUG_TX(priv, "sequence nr = 0X%x\n",
599 le16_to_cpu(out_cmd->hdr.sequence));
600 IWL_DEBUG_TX(priv, "tx_flags = 0X%x\n", le32_to_cpu(tx_cmd->tx_flags));
601 iwl_print_hex_dump(priv, IWL_DL_TX, tx_cmd, sizeof(*tx_cmd));
602 iwl_print_hex_dump(priv, IWL_DL_TX, (u8 *)tx_cmd->hdr,
603 ieee80211_hdrlen(fc));
604
605 /*
606 * Use the first empty entry in this queue's command buffer array
607 * to contain the Tx command and MAC header concatenated together
608 * (payload data will be in another buffer).
609 * Size of this varies, due to varying MAC header length.
610 * If end is not dword aligned, we'll have 2 extra bytes at the end
611 * of the MAC header (device reads on dword boundaries).
612 * We'll tell device about this padding later.
613 */
614 len = sizeof(struct iwl3945_tx_cmd) +
615 sizeof(struct iwl_cmd_header) + hdr_len;
616 len = (len + 3) & ~3;
617
618 /* Physical address of this Tx command's header (not MAC header!),
619 * within command buffer array. */
620 txcmd_phys = pci_map_single(priv->pci_dev, &out_cmd->hdr,
621 len, PCI_DMA_TODEVICE);
622 /* we do not map meta data ... so we can safely access address to
623 * provide to unmap command*/
624 dma_unmap_addr_set(out_meta, mapping, txcmd_phys);
625 dma_unmap_len_set(out_meta, len, len);
626
627 /* Add buffer containing Tx command and MAC(!) header to TFD's
628 * first entry */
629 priv->cfg->ops->lib->txq_attach_buf_to_tfd(priv, txq,
630 txcmd_phys, len, 1, 0);
631
632
633 /* Set up TFD's 2nd entry to point directly to remainder of skb,
634 * if any (802.11 null frames have no payload). */
635 len = skb->len - hdr_len;
636 if (len) {
637 phys_addr = pci_map_single(priv->pci_dev, skb->data + hdr_len,
638 len, PCI_DMA_TODEVICE);
639 priv->cfg->ops->lib->txq_attach_buf_to_tfd(priv, txq,
640 phys_addr, len,
641 0, U32_PAD(len));
642 }
643
644
645 /* Tell device the write index *just past* this latest filled TFD */
646 q->write_ptr = iwl_legacy_queue_inc_wrap(q->write_ptr, q->n_bd);
647 iwl_legacy_txq_update_write_ptr(priv, txq);
648 spin_unlock_irqrestore(&priv->lock, flags);
649
650 if ((iwl_legacy_queue_space(q) < q->high_mark)
651 && priv->mac80211_registered) {
652 if (wait_write_ptr) {
653 spin_lock_irqsave(&priv->lock, flags);
654 txq->need_update = 1;
655 iwl_legacy_txq_update_write_ptr(priv, txq);
656 spin_unlock_irqrestore(&priv->lock, flags);
657 }
658
659 iwl_legacy_stop_queue(priv, txq);
660 }
661
662 return 0;
663
664drop_unlock:
665 spin_unlock_irqrestore(&priv->lock, flags);
666drop:
667 return -1;
668}
669
670static int iwl3945_get_measurement(struct iwl_priv *priv,
671 struct ieee80211_measurement_params *params,
672 u8 type)
673{
674 struct iwl_spectrum_cmd spectrum;
675 struct iwl_rx_packet *pkt;
676 struct iwl_host_cmd cmd = {
677 .id = REPLY_SPECTRUM_MEASUREMENT_CMD,
678 .data = (void *)&spectrum,
679 .flags = CMD_WANT_SKB,
680 };
681 u32 add_time = le64_to_cpu(params->start_time);
682 int rc;
683 int spectrum_resp_status;
684 int duration = le16_to_cpu(params->duration);
685 struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
686
687 if (iwl_legacy_is_associated(priv, IWL_RXON_CTX_BSS))
688 add_time = iwl_legacy_usecs_to_beacons(priv,
689 le64_to_cpu(params->start_time) - priv->_3945.last_tsf,
690 le16_to_cpu(ctx->timing.beacon_interval));
691
692 memset(&spectrum, 0, sizeof(spectrum));
693
694 spectrum.channel_count = cpu_to_le16(1);
695 spectrum.flags =
696 RXON_FLG_TSF2HOST_MSK | RXON_FLG_ANT_A_MSK | RXON_FLG_DIS_DIV_MSK;
697 spectrum.filter_flags = MEASUREMENT_FILTER_FLAG;
698 cmd.len = sizeof(spectrum);
699 spectrum.len = cpu_to_le16(cmd.len - sizeof(spectrum.len));
700
701 if (iwl_legacy_is_associated(priv, IWL_RXON_CTX_BSS))
702 spectrum.start_time =
703 iwl_legacy_add_beacon_time(priv,
704 priv->_3945.last_beacon_time, add_time,
705 le16_to_cpu(ctx->timing.beacon_interval));
706 else
707 spectrum.start_time = 0;
708
709 spectrum.channels[0].duration = cpu_to_le32(duration * TIME_UNIT);
710 spectrum.channels[0].channel = params->channel;
711 spectrum.channels[0].type = type;
712 if (ctx->active.flags & RXON_FLG_BAND_24G_MSK)
713 spectrum.flags |= RXON_FLG_BAND_24G_MSK |
714 RXON_FLG_AUTO_DETECT_MSK | RXON_FLG_TGG_PROTECT_MSK;
715
716 rc = iwl_legacy_send_cmd_sync(priv, &cmd);
717 if (rc)
718 return rc;
719
720 pkt = (struct iwl_rx_packet *)cmd.reply_page;
721 if (pkt->hdr.flags & IWL_CMD_FAILED_MSK) {
722 IWL_ERR(priv, "Bad return from REPLY_RX_ON_ASSOC command\n");
723 rc = -EIO;
724 }
725
726 spectrum_resp_status = le16_to_cpu(pkt->u.spectrum.status);
727 switch (spectrum_resp_status) {
728 case 0: /* Command will be handled */
729 if (pkt->u.spectrum.id != 0xff) {
730 IWL_DEBUG_INFO(priv, "Replaced existing measurement: %d\n",
731 pkt->u.spectrum.id);
732 priv->measurement_status &= ~MEASUREMENT_READY;
733 }
734 priv->measurement_status |= MEASUREMENT_ACTIVE;
735 rc = 0;
736 break;
737
738 case 1: /* Command will not be handled */
739 rc = -EAGAIN;
740 break;
741 }
742
743 iwl_legacy_free_pages(priv, cmd.reply_page);
744
745 return rc;
746}
747
748static void iwl3945_rx_reply_alive(struct iwl_priv *priv,
749 struct iwl_rx_mem_buffer *rxb)
750{
751 struct iwl_rx_packet *pkt = rxb_addr(rxb);
752 struct iwl_alive_resp *palive;
753 struct delayed_work *pwork;
754
755 palive = &pkt->u.alive_frame;
756
757 IWL_DEBUG_INFO(priv, "Alive ucode status 0x%08X revision "
758 "0x%01X 0x%01X\n",
759 palive->is_valid, palive->ver_type,
760 palive->ver_subtype);
761
762 if (palive->ver_subtype == INITIALIZE_SUBTYPE) {
763 IWL_DEBUG_INFO(priv, "Initialization Alive received.\n");
764 memcpy(&priv->card_alive_init, &pkt->u.alive_frame,
765 sizeof(struct iwl_alive_resp));
766 pwork = &priv->init_alive_start;
767 } else {
768 IWL_DEBUG_INFO(priv, "Runtime Alive received.\n");
769 memcpy(&priv->card_alive, &pkt->u.alive_frame,
770 sizeof(struct iwl_alive_resp));
771 pwork = &priv->alive_start;
772 iwl3945_disable_events(priv);
773 }
774
775 /* We delay the ALIVE response by 5ms to
776 * give the HW RF Kill time to activate... */
777 if (palive->is_valid == UCODE_VALID_OK)
778 queue_delayed_work(priv->workqueue, pwork,
779 msecs_to_jiffies(5));
780 else
781 IWL_WARN(priv, "uCode did not respond OK.\n");
782}
783
784static void iwl3945_rx_reply_add_sta(struct iwl_priv *priv,
785 struct iwl_rx_mem_buffer *rxb)
786{
787#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
788 struct iwl_rx_packet *pkt = rxb_addr(rxb);
789#endif
790
791 IWL_DEBUG_RX(priv, "Received REPLY_ADD_STA: 0x%02X\n", pkt->u.status);
792}
793
794static void iwl3945_rx_beacon_notif(struct iwl_priv *priv,
795 struct iwl_rx_mem_buffer *rxb)
796{
797 struct iwl_rx_packet *pkt = rxb_addr(rxb);
798 struct iwl3945_beacon_notif *beacon = &(pkt->u.beacon_status);
799#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
800 u8 rate = beacon->beacon_notify_hdr.rate;
801
802 IWL_DEBUG_RX(priv, "beacon status %x retries %d iss %d "
803 "tsf %d %d rate %d\n",
804 le32_to_cpu(beacon->beacon_notify_hdr.status) & TX_STATUS_MSK,
805 beacon->beacon_notify_hdr.failure_frame,
806 le32_to_cpu(beacon->ibss_mgr_status),
807 le32_to_cpu(beacon->high_tsf),
808 le32_to_cpu(beacon->low_tsf), rate);
809#endif
810
811 priv->ibss_manager = le32_to_cpu(beacon->ibss_mgr_status);
812
813}
814
815/* Handle notification from uCode that card's power state is changing
816 * due to software, hardware, or critical temperature RFKILL */
817static void iwl3945_rx_card_state_notif(struct iwl_priv *priv,
818 struct iwl_rx_mem_buffer *rxb)
819{
820 struct iwl_rx_packet *pkt = rxb_addr(rxb);
821 u32 flags = le32_to_cpu(pkt->u.card_state_notif.flags);
822 unsigned long status = priv->status;
823
824 IWL_WARN(priv, "Card state received: HW:%s SW:%s\n",
825 (flags & HW_CARD_DISABLED) ? "Kill" : "On",
826 (flags & SW_CARD_DISABLED) ? "Kill" : "On");
827
828 iwl_write32(priv, CSR_UCODE_DRV_GP1_SET,
829 CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED);
830
831 if (flags & HW_CARD_DISABLED)
832 set_bit(STATUS_RF_KILL_HW, &priv->status);
833 else
834 clear_bit(STATUS_RF_KILL_HW, &priv->status);
835
836
837 iwl_legacy_scan_cancel(priv);
838
839 if ((test_bit(STATUS_RF_KILL_HW, &status) !=
840 test_bit(STATUS_RF_KILL_HW, &priv->status)))
841 wiphy_rfkill_set_hw_state(priv->hw->wiphy,
842 test_bit(STATUS_RF_KILL_HW, &priv->status));
843 else
844 wake_up(&priv->wait_command_queue);
845}
846
847/**
848 * iwl3945_setup_rx_handlers - Initialize Rx handler callbacks
849 *
850 * Setup the RX handlers for each of the reply types sent from the uCode
851 * to the host.
852 *
853 * This function chains into the hardware specific files for them to setup
854 * any hardware specific handlers as well.
855 */
856static void iwl3945_setup_rx_handlers(struct iwl_priv *priv)
857{
858 priv->rx_handlers[REPLY_ALIVE] = iwl3945_rx_reply_alive;
859 priv->rx_handlers[REPLY_ADD_STA] = iwl3945_rx_reply_add_sta;
860 priv->rx_handlers[REPLY_ERROR] = iwl_legacy_rx_reply_error;
861 priv->rx_handlers[CHANNEL_SWITCH_NOTIFICATION] = iwl_legacy_rx_csa;
862 priv->rx_handlers[SPECTRUM_MEASURE_NOTIFICATION] =
863 iwl_legacy_rx_spectrum_measure_notif;
864 priv->rx_handlers[PM_SLEEP_NOTIFICATION] = iwl_legacy_rx_pm_sleep_notif;
865 priv->rx_handlers[PM_DEBUG_STATISTIC_NOTIFIC] =
866 iwl_legacy_rx_pm_debug_statistics_notif;
867 priv->rx_handlers[BEACON_NOTIFICATION] = iwl3945_rx_beacon_notif;
868
869 /*
870 * The same handler is used for both the REPLY to a discrete
871 * statistics request from the host as well as for the periodic
872 * statistics notifications (after received beacons) from the uCode.
873 */
874 priv->rx_handlers[REPLY_STATISTICS_CMD] = iwl3945_reply_statistics;
875 priv->rx_handlers[STATISTICS_NOTIFICATION] = iwl3945_hw_rx_statistics;
876
877 iwl_legacy_setup_rx_scan_handlers(priv);
878 priv->rx_handlers[CARD_STATE_NOTIFICATION] = iwl3945_rx_card_state_notif;
879
880 /* Set up hardware specific Rx handlers */
881 iwl3945_hw_rx_handler_setup(priv);
882}
883
884/************************** RX-FUNCTIONS ****************************/
885/*
886 * Rx theory of operation
887 *
888 * The host allocates 32 DMA target addresses and passes the host address
889 * to the firmware at register IWL_RFDS_TABLE_LOWER + N * RFD_SIZE where N is
890 * 0 to 31
891 *
892 * Rx Queue Indexes
893 * The host/firmware share two index registers for managing the Rx buffers.
894 *
895 * The READ index maps to the first position that the firmware may be writing
896 * to -- the driver can read up to (but not including) this position and get
897 * good data.
898 * The READ index is managed by the firmware once the card is enabled.
899 *
900 * The WRITE index maps to the last position the driver has read from -- the
901 * position preceding WRITE is the last slot the firmware can place a packet.
902 *
903 * The queue is empty (no good data) if WRITE = READ - 1, and is full if
904 * WRITE = READ.
905 *
906 * During initialization, the host sets up the READ queue position to the first
907 * INDEX position, and WRITE to the last (READ - 1 wrapped)
908 *
909 * When the firmware places a packet in a buffer, it will advance the READ index
910 * and fire the RX interrupt. The driver can then query the READ index and
911 * process as many packets as possible, moving the WRITE index forward as it
912 * resets the Rx queue buffers with new memory.
913 *
914 * The management in the driver is as follows:
915 * + A list of pre-allocated SKBs is stored in iwl->rxq->rx_free. When
916 * iwl->rxq->free_count drops to or below RX_LOW_WATERMARK, work is scheduled
917 * to replenish the iwl->rxq->rx_free.
918 * + In iwl3945_rx_replenish (scheduled) if 'processed' != 'read' then the
919 * iwl->rxq is replenished and the READ INDEX is updated (updating the
920 * 'processed' and 'read' driver indexes as well)
921 * + A received packet is processed and handed to the kernel network stack,
922 * detached from the iwl->rxq. The driver 'processed' index is updated.
923 * + The Host/Firmware iwl->rxq is replenished at tasklet time from the rx_free
924 * list. If there are no allocated buffers in iwl->rxq->rx_free, the READ
925 * INDEX is not incremented and iwl->status(RX_STALLED) is set. If there
926 * were enough free buffers and RX_STALLED is set it is cleared.
927 *
928 *
929 * Driver sequence:
930 *
931 * iwl3945_rx_replenish() Replenishes rx_free list from rx_used, and calls
932 * iwl3945_rx_queue_restock
933 * iwl3945_rx_queue_restock() Moves available buffers from rx_free into Rx
934 * queue, updates firmware pointers, and updates
935 * the WRITE index. If insufficient rx_free buffers
936 * are available, schedules iwl3945_rx_replenish
937 *
938 * -- enable interrupts --
939 * ISR - iwl3945_rx() Detach iwl_rx_mem_buffers from pool up to the
940 * READ INDEX, detaching the SKB from the pool.
941 * Moves the packet buffer from queue to rx_used.
942 * Calls iwl3945_rx_queue_restock to refill any empty
943 * slots.
944 * ...
945 *
946 */
947
948/**
949 * iwl3945_dma_addr2rbd_ptr - convert a DMA address to a uCode read buffer ptr
950 */
951static inline __le32 iwl3945_dma_addr2rbd_ptr(struct iwl_priv *priv,
952 dma_addr_t dma_addr)
953{
954 return cpu_to_le32((u32)dma_addr);
955}
956
957/**
958 * iwl3945_rx_queue_restock - refill RX queue from pre-allocated pool
959 *
960 * If there are slots in the RX queue that need to be restocked,
961 * and we have free pre-allocated buffers, fill the ranks as much
962 * as we can, pulling from rx_free.
963 *
964 * This moves the 'write' index forward to catch up with 'processed', and
965 * also updates the memory address in the firmware to reference the new
966 * target buffer.
967 */
968static void iwl3945_rx_queue_restock(struct iwl_priv *priv)
969{
970 struct iwl_rx_queue *rxq = &priv->rxq;
971 struct list_head *element;
972 struct iwl_rx_mem_buffer *rxb;
973 unsigned long flags;
974 int write;
975
976 spin_lock_irqsave(&rxq->lock, flags);
977 write = rxq->write & ~0x7;
978 while ((iwl_legacy_rx_queue_space(rxq) > 0) && (rxq->free_count)) {
979 /* Get next free Rx buffer, remove from free list */
980 element = rxq->rx_free.next;
981 rxb = list_entry(element, struct iwl_rx_mem_buffer, list);
982 list_del(element);
983
984 /* Point to Rx buffer via next RBD in circular buffer */
985 rxq->bd[rxq->write] = iwl3945_dma_addr2rbd_ptr(priv, rxb->page_dma);
986 rxq->queue[rxq->write] = rxb;
987 rxq->write = (rxq->write + 1) & RX_QUEUE_MASK;
988 rxq->free_count--;
989 }
990 spin_unlock_irqrestore(&rxq->lock, flags);
991 /* If the pre-allocated buffer pool is dropping low, schedule to
992 * refill it */
993 if (rxq->free_count <= RX_LOW_WATERMARK)
994 queue_work(priv->workqueue, &priv->rx_replenish);
995
996
997 /* If we've added more space for the firmware to place data, tell it.
998 * Increment device's write pointer in multiples of 8. */
999 if ((rxq->write_actual != (rxq->write & ~0x7))
1000 || (abs(rxq->write - rxq->read) > 7)) {
1001 spin_lock_irqsave(&rxq->lock, flags);
1002 rxq->need_update = 1;
1003 spin_unlock_irqrestore(&rxq->lock, flags);
1004 iwl_legacy_rx_queue_update_write_ptr(priv, rxq);
1005 }
1006}
1007
1008/**
1009 * iwl3945_rx_replenish - Move all used packet from rx_used to rx_free
1010 *
1011 * When moving to rx_free an SKB is allocated for the slot.
1012 *
1013 * Also restock the Rx queue via iwl3945_rx_queue_restock.
1014 * This is called as a scheduled work item (except for during initialization)
1015 */
1016static void iwl3945_rx_allocate(struct iwl_priv *priv, gfp_t priority)
1017{
1018 struct iwl_rx_queue *rxq = &priv->rxq;
1019 struct list_head *element;
1020 struct iwl_rx_mem_buffer *rxb;
1021 struct page *page;
1022 unsigned long flags;
1023 gfp_t gfp_mask = priority;
1024
1025 while (1) {
1026 spin_lock_irqsave(&rxq->lock, flags);
1027
1028 if (list_empty(&rxq->rx_used)) {
1029 spin_unlock_irqrestore(&rxq->lock, flags);
1030 return;
1031 }
1032 spin_unlock_irqrestore(&rxq->lock, flags);
1033
1034 if (rxq->free_count > RX_LOW_WATERMARK)
1035 gfp_mask |= __GFP_NOWARN;
1036
1037 if (priv->hw_params.rx_page_order > 0)
1038 gfp_mask |= __GFP_COMP;
1039
1040 /* Alloc a new receive buffer */
1041 page = alloc_pages(gfp_mask, priv->hw_params.rx_page_order);
1042 if (!page) {
1043 if (net_ratelimit())
1044 IWL_DEBUG_INFO(priv, "Failed to allocate SKB buffer.\n");
1045 if ((rxq->free_count <= RX_LOW_WATERMARK) &&
1046 net_ratelimit())
1047 IWL_CRIT(priv, "Failed to allocate SKB buffer with %s. Only %u free buffers remaining.\n",
1048 priority == GFP_ATOMIC ? "GFP_ATOMIC" : "GFP_KERNEL",
1049 rxq->free_count);
1050 /* We don't reschedule replenish work here -- we will
1051 * call the restock method and if it still needs
1052 * more buffers it will schedule replenish */
1053 break;
1054 }
1055
1056 spin_lock_irqsave(&rxq->lock, flags);
1057 if (list_empty(&rxq->rx_used)) {
1058 spin_unlock_irqrestore(&rxq->lock, flags);
1059 __free_pages(page, priv->hw_params.rx_page_order);
1060 return;
1061 }
1062 element = rxq->rx_used.next;
1063 rxb = list_entry(element, struct iwl_rx_mem_buffer, list);
1064 list_del(element);
1065 spin_unlock_irqrestore(&rxq->lock, flags);
1066
1067 rxb->page = page;
1068 /* Get physical address of RB/SKB */
1069 rxb->page_dma = pci_map_page(priv->pci_dev, page, 0,
1070 PAGE_SIZE << priv->hw_params.rx_page_order,
1071 PCI_DMA_FROMDEVICE);
1072
1073 spin_lock_irqsave(&rxq->lock, flags);
1074
1075 list_add_tail(&rxb->list, &rxq->rx_free);
1076 rxq->free_count++;
1077 priv->alloc_rxb_page++;
1078
1079 spin_unlock_irqrestore(&rxq->lock, flags);
1080 }
1081}
1082
1083void iwl3945_rx_queue_reset(struct iwl_priv *priv, struct iwl_rx_queue *rxq)
1084{
1085 unsigned long flags;
1086 int i;
1087 spin_lock_irqsave(&rxq->lock, flags);
1088 INIT_LIST_HEAD(&rxq->rx_free);
1089 INIT_LIST_HEAD(&rxq->rx_used);
1090 /* Fill the rx_used queue with _all_ of the Rx buffers */
1091 for (i = 0; i < RX_FREE_BUFFERS + RX_QUEUE_SIZE; i++) {
1092 /* In the reset function, these buffers may have been allocated
1093 * to an SKB, so we need to unmap and free potential storage */
1094 if (rxq->pool[i].page != NULL) {
1095 pci_unmap_page(priv->pci_dev, rxq->pool[i].page_dma,
1096 PAGE_SIZE << priv->hw_params.rx_page_order,
1097 PCI_DMA_FROMDEVICE);
1098 __iwl_legacy_free_pages(priv, rxq->pool[i].page);
1099 rxq->pool[i].page = NULL;
1100 }
1101 list_add_tail(&rxq->pool[i].list, &rxq->rx_used);
1102 }
1103
1104 /* Set us so that we have processed and used all buffers, but have
1105 * not restocked the Rx queue with fresh buffers */
1106 rxq->read = rxq->write = 0;
1107 rxq->write_actual = 0;
1108 rxq->free_count = 0;
1109 spin_unlock_irqrestore(&rxq->lock, flags);
1110}
1111
1112void iwl3945_rx_replenish(void *data)
1113{
1114 struct iwl_priv *priv = data;
1115 unsigned long flags;
1116
1117 iwl3945_rx_allocate(priv, GFP_KERNEL);
1118
1119 spin_lock_irqsave(&priv->lock, flags);
1120 iwl3945_rx_queue_restock(priv);
1121 spin_unlock_irqrestore(&priv->lock, flags);
1122}
1123
1124static void iwl3945_rx_replenish_now(struct iwl_priv *priv)
1125{
1126 iwl3945_rx_allocate(priv, GFP_ATOMIC);
1127
1128 iwl3945_rx_queue_restock(priv);
1129}
1130
1131
1132/* Assumes that the skb field of the buffers in 'pool' is kept accurate.
1133 * If an SKB has been detached, the POOL needs to have its SKB set to NULL
1134 * This free routine walks the list of POOL entries and if SKB is set to
1135 * non NULL it is unmapped and freed
1136 */
1137static void iwl3945_rx_queue_free(struct iwl_priv *priv, struct iwl_rx_queue *rxq)
1138{
1139 int i;
1140 for (i = 0; i < RX_QUEUE_SIZE + RX_FREE_BUFFERS; i++) {
1141 if (rxq->pool[i].page != NULL) {
1142 pci_unmap_page(priv->pci_dev, rxq->pool[i].page_dma,
1143 PAGE_SIZE << priv->hw_params.rx_page_order,
1144 PCI_DMA_FROMDEVICE);
1145 __iwl_legacy_free_pages(priv, rxq->pool[i].page);
1146 rxq->pool[i].page = NULL;
1147 }
1148 }
1149
1150 dma_free_coherent(&priv->pci_dev->dev, 4 * RX_QUEUE_SIZE, rxq->bd,
1151 rxq->bd_dma);
1152 dma_free_coherent(&priv->pci_dev->dev, sizeof(struct iwl_rb_status),
1153 rxq->rb_stts, rxq->rb_stts_dma);
1154 rxq->bd = NULL;
1155 rxq->rb_stts = NULL;
1156}
1157
1158
1159/* Convert linear signal-to-noise ratio into dB */
1160static u8 ratio2dB[100] = {
1161/* 0 1 2 3 4 5 6 7 8 9 */
1162 0, 0, 6, 10, 12, 14, 16, 17, 18, 19, /* 00 - 09 */
1163 20, 21, 22, 22, 23, 23, 24, 25, 26, 26, /* 10 - 19 */
1164 26, 26, 26, 27, 27, 28, 28, 28, 29, 29, /* 20 - 29 */
1165 29, 30, 30, 30, 31, 31, 31, 31, 32, 32, /* 30 - 39 */
1166 32, 32, 32, 33, 33, 33, 33, 33, 34, 34, /* 40 - 49 */
1167 34, 34, 34, 34, 35, 35, 35, 35, 35, 35, /* 50 - 59 */
1168 36, 36, 36, 36, 36, 36, 36, 37, 37, 37, /* 60 - 69 */
1169 37, 37, 37, 37, 37, 38, 38, 38, 38, 38, /* 70 - 79 */
1170 38, 38, 38, 38, 38, 39, 39, 39, 39, 39, /* 80 - 89 */
1171 39, 39, 39, 39, 39, 40, 40, 40, 40, 40 /* 90 - 99 */
1172};
1173
1174/* Calculates a relative dB value from a ratio of linear
1175 * (i.e. not dB) signal levels.
1176 * Conversion assumes that levels are voltages (20*log), not powers (10*log). */
1177int iwl3945_calc_db_from_ratio(int sig_ratio)
1178{
1179 /* 1000:1 or higher just report as 60 dB */
1180 if (sig_ratio >= 1000)
1181 return 60;
1182
1183 /* 100:1 or higher, divide by 10 and use table,
1184 * add 20 dB to make up for divide by 10 */
1185 if (sig_ratio >= 100)
1186 return 20 + (int)ratio2dB[sig_ratio/10];
1187
1188 /* We shouldn't see this */
1189 if (sig_ratio < 1)
1190 return 0;
1191
1192 /* Use table for ratios 1:1 - 99:1 */
1193 return (int)ratio2dB[sig_ratio];
1194}
1195
1196/**
1197 * iwl3945_rx_handle - Main entry function for receiving responses from uCode
1198 *
1199 * Uses the priv->rx_handlers callback function array to invoke
1200 * the appropriate handlers, including command responses,
1201 * frame-received notifications, and other notifications.
1202 */
1203static void iwl3945_rx_handle(struct iwl_priv *priv)
1204{
1205 struct iwl_rx_mem_buffer *rxb;
1206 struct iwl_rx_packet *pkt;
1207 struct iwl_rx_queue *rxq = &priv->rxq;
1208 u32 r, i;
1209 int reclaim;
1210 unsigned long flags;
1211 u8 fill_rx = 0;
1212 u32 count = 8;
1213 int total_empty = 0;
1214
1215 /* uCode's read index (stored in shared DRAM) indicates the last Rx
1216 * buffer that the driver may process (last buffer filled by ucode). */
1217 r = le16_to_cpu(rxq->rb_stts->closed_rb_num) & 0x0FFF;
1218 i = rxq->read;
1219
1220 /* calculate total frames need to be restock after handling RX */
1221 total_empty = r - rxq->write_actual;
1222 if (total_empty < 0)
1223 total_empty += RX_QUEUE_SIZE;
1224
1225 if (total_empty > (RX_QUEUE_SIZE / 2))
1226 fill_rx = 1;
1227 /* Rx interrupt, but nothing sent from uCode */
1228 if (i == r)
1229 IWL_DEBUG_RX(priv, "r = %d, i = %d\n", r, i);
1230
1231 while (i != r) {
1232 int len;
1233
1234 rxb = rxq->queue[i];
1235
1236 /* If an RXB doesn't have a Rx queue slot associated with it,
1237 * then a bug has been introduced in the queue refilling
1238 * routines -- catch it here */
1239 BUG_ON(rxb == NULL);
1240
1241 rxq->queue[i] = NULL;
1242
1243 pci_unmap_page(priv->pci_dev, rxb->page_dma,
1244 PAGE_SIZE << priv->hw_params.rx_page_order,
1245 PCI_DMA_FROMDEVICE);
1246 pkt = rxb_addr(rxb);
1247
1248 len = le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK;
1249 len += sizeof(u32); /* account for status word */
1250 trace_iwlwifi_legacy_dev_rx(priv, pkt, len);
1251
1252 /* Reclaim a command buffer only if this packet is a response
1253 * to a (driver-originated) command.
1254 * If the packet (e.g. Rx frame) originated from uCode,
1255 * there is no command buffer to reclaim.
1256 * Ucode should set SEQ_RX_FRAME bit if ucode-originated,
1257 * but apparently a few don't get set; catch them here. */
1258 reclaim = !(pkt->hdr.sequence & SEQ_RX_FRAME) &&
1259 (pkt->hdr.cmd != STATISTICS_NOTIFICATION) &&
1260 (pkt->hdr.cmd != REPLY_TX);
1261
1262 /* Based on type of command response or notification,
1263 * handle those that need handling via function in
1264 * rx_handlers table. See iwl3945_setup_rx_handlers() */
1265 if (priv->rx_handlers[pkt->hdr.cmd]) {
1266 IWL_DEBUG_RX(priv, "r = %d, i = %d, %s, 0x%02x\n", r, i,
1267 iwl_legacy_get_cmd_string(pkt->hdr.cmd), pkt->hdr.cmd);
1268 priv->isr_stats.rx_handlers[pkt->hdr.cmd]++;
1269 priv->rx_handlers[pkt->hdr.cmd] (priv, rxb);
1270 } else {
1271 /* No handling needed */
1272 IWL_DEBUG_RX(priv,
1273 "r %d i %d No handler needed for %s, 0x%02x\n",
1274 r, i, iwl_legacy_get_cmd_string(pkt->hdr.cmd),
1275 pkt->hdr.cmd);
1276 }
1277
1278 /*
1279 * XXX: After here, we should always check rxb->page
1280 * against NULL before touching it or its virtual
1281 * memory (pkt). Because some rx_handler might have
1282 * already taken or freed the pages.
1283 */
1284
1285 if (reclaim) {
1286 /* Invoke any callbacks, transfer the buffer to caller,
1287 * and fire off the (possibly) blocking iwl_legacy_send_cmd()
1288 * as we reclaim the driver command queue */
1289 if (rxb->page)
1290 iwl_legacy_tx_cmd_complete(priv, rxb);
1291 else
1292 IWL_WARN(priv, "Claim null rxb?\n");
1293 }
1294
1295 /* Reuse the page if possible. For notification packets and
1296 * SKBs that fail to Rx correctly, add them back into the
1297 * rx_free list for reuse later. */
1298 spin_lock_irqsave(&rxq->lock, flags);
1299 if (rxb->page != NULL) {
1300 rxb->page_dma = pci_map_page(priv->pci_dev, rxb->page,
1301 0, PAGE_SIZE << priv->hw_params.rx_page_order,
1302 PCI_DMA_FROMDEVICE);
1303 list_add_tail(&rxb->list, &rxq->rx_free);
1304 rxq->free_count++;
1305 } else
1306 list_add_tail(&rxb->list, &rxq->rx_used);
1307
1308 spin_unlock_irqrestore(&rxq->lock, flags);
1309
1310 i = (i + 1) & RX_QUEUE_MASK;
1311 /* If there are a lot of unused frames,
1312 * restock the Rx queue so ucode won't assert. */
1313 if (fill_rx) {
1314 count++;
1315 if (count >= 8) {
1316 rxq->read = i;
1317 iwl3945_rx_replenish_now(priv);
1318 count = 0;
1319 }
1320 }
1321 }
1322
1323 /* Backtrack one entry */
1324 rxq->read = i;
1325 if (fill_rx)
1326 iwl3945_rx_replenish_now(priv);
1327 else
1328 iwl3945_rx_queue_restock(priv);
1329}
1330
1331/* call this function to flush any scheduled tasklet */
1332static inline void iwl3945_synchronize_irq(struct iwl_priv *priv)
1333{
1334 /* wait to make sure we flush pending tasklet*/
1335 synchronize_irq(priv->pci_dev->irq);
1336 tasklet_kill(&priv->irq_tasklet);
1337}
1338
1339static const char *iwl3945_desc_lookup(int i)
1340{
1341 switch (i) {
1342 case 1:
1343 return "FAIL";
1344 case 2:
1345 return "BAD_PARAM";
1346 case 3:
1347 return "BAD_CHECKSUM";
1348 case 4:
1349 return "NMI_INTERRUPT";
1350 case 5:
1351 return "SYSASSERT";
1352 case 6:
1353 return "FATAL_ERROR";
1354 }
1355
1356 return "UNKNOWN";
1357}
1358
1359#define ERROR_START_OFFSET (1 * sizeof(u32))
1360#define ERROR_ELEM_SIZE (7 * sizeof(u32))
1361
1362void iwl3945_dump_nic_error_log(struct iwl_priv *priv)
1363{
1364 u32 i;
1365 u32 desc, time, count, base, data1;
1366 u32 blink1, blink2, ilink1, ilink2;
1367
1368 base = le32_to_cpu(priv->card_alive.error_event_table_ptr);
1369
1370 if (!iwl3945_hw_valid_rtc_data_addr(base)) {
1371 IWL_ERR(priv, "Not valid error log pointer 0x%08X\n", base);
1372 return;
1373 }
1374
1375
1376 count = iwl_legacy_read_targ_mem(priv, base);
1377
1378 if (ERROR_START_OFFSET <= count * ERROR_ELEM_SIZE) {
1379 IWL_ERR(priv, "Start IWL Error Log Dump:\n");
1380 IWL_ERR(priv, "Status: 0x%08lX, count: %d\n",
1381 priv->status, count);
1382 }
1383
1384 IWL_ERR(priv, "Desc Time asrtPC blink2 "
1385 "ilink1 nmiPC Line\n");
1386 for (i = ERROR_START_OFFSET;
1387 i < (count * ERROR_ELEM_SIZE) + ERROR_START_OFFSET;
1388 i += ERROR_ELEM_SIZE) {
1389 desc = iwl_legacy_read_targ_mem(priv, base + i);
1390 time =
1391 iwl_legacy_read_targ_mem(priv, base + i + 1 * sizeof(u32));
1392 blink1 =
1393 iwl_legacy_read_targ_mem(priv, base + i + 2 * sizeof(u32));
1394 blink2 =
1395 iwl_legacy_read_targ_mem(priv, base + i + 3 * sizeof(u32));
1396 ilink1 =
1397 iwl_legacy_read_targ_mem(priv, base + i + 4 * sizeof(u32));
1398 ilink2 =
1399 iwl_legacy_read_targ_mem(priv, base + i + 5 * sizeof(u32));
1400 data1 =
1401 iwl_legacy_read_targ_mem(priv, base + i + 6 * sizeof(u32));
1402
1403 IWL_ERR(priv,
1404 "%-13s (0x%X) %010u 0x%05X 0x%05X 0x%05X 0x%05X %u\n\n",
1405 iwl3945_desc_lookup(desc), desc, time, blink1, blink2,
1406 ilink1, ilink2, data1);
1407 trace_iwlwifi_legacy_dev_ucode_error(priv, desc, time, data1, 0,
1408 0, blink1, blink2, ilink1, ilink2);
1409 }
1410}
1411
1412static void iwl3945_irq_tasklet(struct iwl_priv *priv)
1413{
1414 u32 inta, handled = 0;
1415 u32 inta_fh;
1416 unsigned long flags;
1417#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
1418 u32 inta_mask;
1419#endif
1420
1421 spin_lock_irqsave(&priv->lock, flags);
1422
1423 /* Ack/clear/reset pending uCode interrupts.
1424 * Note: Some bits in CSR_INT are "OR" of bits in CSR_FH_INT_STATUS,
1425 * and will clear only when CSR_FH_INT_STATUS gets cleared. */
1426 inta = iwl_read32(priv, CSR_INT);
1427 iwl_write32(priv, CSR_INT, inta);
1428
1429 /* Ack/clear/reset pending flow-handler (DMA) interrupts.
1430 * Any new interrupts that happen after this, either while we're
1431 * in this tasklet, or later, will show up in next ISR/tasklet. */
1432 inta_fh = iwl_read32(priv, CSR_FH_INT_STATUS);
1433 iwl_write32(priv, CSR_FH_INT_STATUS, inta_fh);
1434
1435#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
1436 if (iwl_legacy_get_debug_level(priv) & IWL_DL_ISR) {
1437 /* just for debug */
1438 inta_mask = iwl_read32(priv, CSR_INT_MASK);
1439 IWL_DEBUG_ISR(priv, "inta 0x%08x, enabled 0x%08x, fh 0x%08x\n",
1440 inta, inta_mask, inta_fh);
1441 }
1442#endif
1443
1444 spin_unlock_irqrestore(&priv->lock, flags);
1445
1446 /* Since CSR_INT and CSR_FH_INT_STATUS reads and clears are not
1447 * atomic, make sure that inta covers all the interrupts that
1448 * we've discovered, even if FH interrupt came in just after
1449 * reading CSR_INT. */
1450 if (inta_fh & CSR39_FH_INT_RX_MASK)
1451 inta |= CSR_INT_BIT_FH_RX;
1452 if (inta_fh & CSR39_FH_INT_TX_MASK)
1453 inta |= CSR_INT_BIT_FH_TX;
1454
1455 /* Now service all interrupt bits discovered above. */
1456 if (inta & CSR_INT_BIT_HW_ERR) {
1457 IWL_ERR(priv, "Hardware error detected. Restarting.\n");
1458
1459 /* Tell the device to stop sending interrupts */
1460 iwl_legacy_disable_interrupts(priv);
1461
1462 priv->isr_stats.hw++;
1463 iwl_legacy_irq_handle_error(priv);
1464
1465 handled |= CSR_INT_BIT_HW_ERR;
1466
1467 return;
1468 }
1469
1470#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
1471 if (iwl_legacy_get_debug_level(priv) & (IWL_DL_ISR)) {
1472 /* NIC fires this, but we don't use it, redundant with WAKEUP */
1473 if (inta & CSR_INT_BIT_SCD) {
1474 IWL_DEBUG_ISR(priv, "Scheduler finished to transmit "
1475 "the frame/frames.\n");
1476 priv->isr_stats.sch++;
1477 }
1478
1479 /* Alive notification via Rx interrupt will do the real work */
1480 if (inta & CSR_INT_BIT_ALIVE) {
1481 IWL_DEBUG_ISR(priv, "Alive interrupt\n");
1482 priv->isr_stats.alive++;
1483 }
1484 }
1485#endif
1486 /* Safely ignore these bits for debug checks below */
1487 inta &= ~(CSR_INT_BIT_SCD | CSR_INT_BIT_ALIVE);
1488
1489 /* Error detected by uCode */
1490 if (inta & CSR_INT_BIT_SW_ERR) {
1491 IWL_ERR(priv, "Microcode SW error detected. "
1492 "Restarting 0x%X.\n", inta);
1493 priv->isr_stats.sw++;
1494 iwl_legacy_irq_handle_error(priv);
1495 handled |= CSR_INT_BIT_SW_ERR;
1496 }
1497
1498 /* uCode wakes up after power-down sleep */
1499 if (inta & CSR_INT_BIT_WAKEUP) {
1500 IWL_DEBUG_ISR(priv, "Wakeup interrupt\n");
1501 iwl_legacy_rx_queue_update_write_ptr(priv, &priv->rxq);
1502 iwl_legacy_txq_update_write_ptr(priv, &priv->txq[0]);
1503 iwl_legacy_txq_update_write_ptr(priv, &priv->txq[1]);
1504 iwl_legacy_txq_update_write_ptr(priv, &priv->txq[2]);
1505 iwl_legacy_txq_update_write_ptr(priv, &priv->txq[3]);
1506 iwl_legacy_txq_update_write_ptr(priv, &priv->txq[4]);
1507 iwl_legacy_txq_update_write_ptr(priv, &priv->txq[5]);
1508
1509 priv->isr_stats.wakeup++;
1510 handled |= CSR_INT_BIT_WAKEUP;
1511 }
1512
1513 /* All uCode command responses, including Tx command responses,
1514 * Rx "responses" (frame-received notification), and other
1515 * notifications from uCode come through here*/
1516 if (inta & (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX)) {
1517 iwl3945_rx_handle(priv);
1518 priv->isr_stats.rx++;
1519 handled |= (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX);
1520 }
1521
1522 if (inta & CSR_INT_BIT_FH_TX) {
1523 IWL_DEBUG_ISR(priv, "Tx interrupt\n");
1524 priv->isr_stats.tx++;
1525
1526 iwl_write32(priv, CSR_FH_INT_STATUS, (1 << 6));
1527 iwl_legacy_write_direct32(priv, FH39_TCSR_CREDIT
1528 (FH39_SRVC_CHNL), 0x0);
1529 handled |= CSR_INT_BIT_FH_TX;
1530 }
1531
1532 if (inta & ~handled) {
1533 IWL_ERR(priv, "Unhandled INTA bits 0x%08x\n", inta & ~handled);
1534 priv->isr_stats.unhandled++;
1535 }
1536
1537 if (inta & ~priv->inta_mask) {
1538 IWL_WARN(priv, "Disabled INTA bits 0x%08x were pending\n",
1539 inta & ~priv->inta_mask);
1540 IWL_WARN(priv, " with FH_INT = 0x%08x\n", inta_fh);
1541 }
1542
1543 /* Re-enable all interrupts */
1544 /* only Re-enable if disabled by irq */
1545 if (test_bit(STATUS_INT_ENABLED, &priv->status))
1546 iwl_legacy_enable_interrupts(priv);
1547
1548#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
1549 if (iwl_legacy_get_debug_level(priv) & (IWL_DL_ISR)) {
1550 inta = iwl_read32(priv, CSR_INT);
1551 inta_mask = iwl_read32(priv, CSR_INT_MASK);
1552 inta_fh = iwl_read32(priv, CSR_FH_INT_STATUS);
1553 IWL_DEBUG_ISR(priv, "End inta 0x%08x, enabled 0x%08x, fh 0x%08x, "
1554 "flags 0x%08lx\n", inta, inta_mask, inta_fh, flags);
1555 }
1556#endif
1557}
1558
1559static int iwl3945_get_channels_for_scan(struct iwl_priv *priv,
1560 enum ieee80211_band band,
1561 u8 is_active, u8 n_probes,
1562 struct iwl3945_scan_channel *scan_ch,
1563 struct ieee80211_vif *vif)
1564{
1565 struct ieee80211_channel *chan;
1566 const struct ieee80211_supported_band *sband;
1567 const struct iwl_channel_info *ch_info;
1568 u16 passive_dwell = 0;
1569 u16 active_dwell = 0;
1570 int added, i;
1571
1572 sband = iwl_get_hw_mode(priv, band);
1573 if (!sband)
1574 return 0;
1575
1576 active_dwell = iwl_legacy_get_active_dwell_time(priv, band, n_probes);
1577 passive_dwell = iwl_legacy_get_passive_dwell_time(priv, band, vif);
1578
1579 if (passive_dwell <= active_dwell)
1580 passive_dwell = active_dwell + 1;
1581
1582 for (i = 0, added = 0; i < priv->scan_request->n_channels; i++) {
1583 chan = priv->scan_request->channels[i];
1584
1585 if (chan->band != band)
1586 continue;
1587
1588 scan_ch->channel = chan->hw_value;
1589
1590 ch_info = iwl_legacy_get_channel_info(priv, band,
1591 scan_ch->channel);
1592 if (!iwl_legacy_is_channel_valid(ch_info)) {
1593 IWL_DEBUG_SCAN(priv,
1594 "Channel %d is INVALID for this band.\n",
1595 scan_ch->channel);
1596 continue;
1597 }
1598
1599 scan_ch->active_dwell = cpu_to_le16(active_dwell);
1600 scan_ch->passive_dwell = cpu_to_le16(passive_dwell);
1601 /* If passive , set up for auto-switch
1602 * and use long active_dwell time.
1603 */
1604 if (!is_active || iwl_legacy_is_channel_passive(ch_info) ||
1605 (chan->flags & IEEE80211_CHAN_PASSIVE_SCAN)) {
1606 scan_ch->type = 0; /* passive */
1607 if (IWL_UCODE_API(priv->ucode_ver) == 1)
1608 scan_ch->active_dwell = cpu_to_le16(passive_dwell - 1);
1609 } else {
1610 scan_ch->type = 1; /* active */
1611 }
1612
1613 /* Set direct probe bits. These may be used both for active
1614 * scan channels (probes gets sent right away),
1615 * or for passive channels (probes get se sent only after
1616 * hearing clear Rx packet).*/
1617 if (IWL_UCODE_API(priv->ucode_ver) >= 2) {
1618 if (n_probes)
1619 scan_ch->type |= IWL39_SCAN_PROBE_MASK(n_probes);
1620 } else {
1621 /* uCode v1 does not allow setting direct probe bits on
1622 * passive channel. */
1623 if ((scan_ch->type & 1) && n_probes)
1624 scan_ch->type |= IWL39_SCAN_PROBE_MASK(n_probes);
1625 }
1626
1627 /* Set txpower levels to defaults */
1628 scan_ch->tpc.dsp_atten = 110;
1629 /* scan_pwr_info->tpc.dsp_atten; */
1630
1631 /*scan_pwr_info->tpc.tx_gain; */
1632 if (band == IEEE80211_BAND_5GHZ)
1633 scan_ch->tpc.tx_gain = ((1 << 5) | (3 << 3)) | 3;
1634 else {
1635 scan_ch->tpc.tx_gain = ((1 << 5) | (5 << 3));
1636 /* NOTE: if we were doing 6Mb OFDM for scans we'd use
1637 * power level:
1638 * scan_ch->tpc.tx_gain = ((1 << 5) | (2 << 3)) | 3;
1639 */
1640 }
1641
1642 IWL_DEBUG_SCAN(priv, "Scanning %d [%s %d]\n",
1643 scan_ch->channel,
1644 (scan_ch->type & 1) ? "ACTIVE" : "PASSIVE",
1645 (scan_ch->type & 1) ?
1646 active_dwell : passive_dwell);
1647
1648 scan_ch++;
1649 added++;
1650 }
1651
1652 IWL_DEBUG_SCAN(priv, "total channels to scan %d\n", added);
1653 return added;
1654}
1655
1656static void iwl3945_init_hw_rates(struct iwl_priv *priv,
1657 struct ieee80211_rate *rates)
1658{
1659 int i;
1660
1661 for (i = 0; i < IWL_RATE_COUNT_LEGACY; i++) {
1662 rates[i].bitrate = iwl3945_rates[i].ieee * 5;
1663 rates[i].hw_value = i; /* Rate scaling will work on indexes */
1664 rates[i].hw_value_short = i;
1665 rates[i].flags = 0;
1666 if ((i > IWL39_LAST_OFDM_RATE) || (i < IWL_FIRST_OFDM_RATE)) {
1667 /*
1668 * If CCK != 1M then set short preamble rate flag.
1669 */
1670 rates[i].flags |= (iwl3945_rates[i].plcp == 10) ?
1671 0 : IEEE80211_RATE_SHORT_PREAMBLE;
1672 }
1673 }
1674}
1675
1676/******************************************************************************
1677 *
1678 * uCode download functions
1679 *
1680 ******************************************************************************/
1681
1682static void iwl3945_dealloc_ucode_pci(struct iwl_priv *priv)
1683{
1684 iwl_legacy_free_fw_desc(priv->pci_dev, &priv->ucode_code);
1685 iwl_legacy_free_fw_desc(priv->pci_dev, &priv->ucode_data);
1686 iwl_legacy_free_fw_desc(priv->pci_dev, &priv->ucode_data_backup);
1687 iwl_legacy_free_fw_desc(priv->pci_dev, &priv->ucode_init);
1688 iwl_legacy_free_fw_desc(priv->pci_dev, &priv->ucode_init_data);
1689 iwl_legacy_free_fw_desc(priv->pci_dev, &priv->ucode_boot);
1690}
1691
1692/**
1693 * iwl3945_verify_inst_full - verify runtime uCode image in card vs. host,
1694 * looking at all data.
1695 */
1696static int iwl3945_verify_inst_full(struct iwl_priv *priv, __le32 *image, u32 len)
1697{
1698 u32 val;
1699 u32 save_len = len;
1700 int rc = 0;
1701 u32 errcnt;
1702
1703 IWL_DEBUG_INFO(priv, "ucode inst image size is %u\n", len);
1704
1705 iwl_legacy_write_direct32(priv, HBUS_TARG_MEM_RADDR,
1706 IWL39_RTC_INST_LOWER_BOUND);
1707
1708 errcnt = 0;
1709 for (; len > 0; len -= sizeof(u32), image++) {
1710 /* read data comes through single port, auto-incr addr */
1711 /* NOTE: Use the debugless read so we don't flood kernel log
1712 * if IWL_DL_IO is set */
1713 val = _iwl_legacy_read_direct32(priv, HBUS_TARG_MEM_RDAT);
1714 if (val != le32_to_cpu(*image)) {
1715 IWL_ERR(priv, "uCode INST section is invalid at "
1716 "offset 0x%x, is 0x%x, s/b 0x%x\n",
1717 save_len - len, val, le32_to_cpu(*image));
1718 rc = -EIO;
1719 errcnt++;
1720 if (errcnt >= 20)
1721 break;
1722 }
1723 }
1724
1725
1726 if (!errcnt)
1727 IWL_DEBUG_INFO(priv,
1728 "ucode image in INSTRUCTION memory is good\n");
1729
1730 return rc;
1731}
1732
1733
1734/**
1735 * iwl3945_verify_inst_sparse - verify runtime uCode image in card vs. host,
1736 * using sample data 100 bytes apart. If these sample points are good,
1737 * it's a pretty good bet that everything between them is good, too.
1738 */
1739static int iwl3945_verify_inst_sparse(struct iwl_priv *priv, __le32 *image, u32 len)
1740{
1741 u32 val;
1742 int rc = 0;
1743 u32 errcnt = 0;
1744 u32 i;
1745
1746 IWL_DEBUG_INFO(priv, "ucode inst image size is %u\n", len);
1747
1748 for (i = 0; i < len; i += 100, image += 100/sizeof(u32)) {
1749 /* read data comes through single port, auto-incr addr */
1750 /* NOTE: Use the debugless read so we don't flood kernel log
1751 * if IWL_DL_IO is set */
1752 iwl_legacy_write_direct32(priv, HBUS_TARG_MEM_RADDR,
1753 i + IWL39_RTC_INST_LOWER_BOUND);
1754 val = _iwl_legacy_read_direct32(priv, HBUS_TARG_MEM_RDAT);
1755 if (val != le32_to_cpu(*image)) {
1756#if 0 /* Enable this if you want to see details */
1757 IWL_ERR(priv, "uCode INST section is invalid at "
1758 "offset 0x%x, is 0x%x, s/b 0x%x\n",
1759 i, val, *image);
1760#endif
1761 rc = -EIO;
1762 errcnt++;
1763 if (errcnt >= 3)
1764 break;
1765 }
1766 }
1767
1768 return rc;
1769}
1770
1771
1772/**
1773 * iwl3945_verify_ucode - determine which instruction image is in SRAM,
1774 * and verify its contents
1775 */
1776static int iwl3945_verify_ucode(struct iwl_priv *priv)
1777{
1778 __le32 *image;
1779 u32 len;
1780 int rc = 0;
1781
1782 /* Try bootstrap */
1783 image = (__le32 *)priv->ucode_boot.v_addr;
1784 len = priv->ucode_boot.len;
1785 rc = iwl3945_verify_inst_sparse(priv, image, len);
1786 if (rc == 0) {
1787 IWL_DEBUG_INFO(priv, "Bootstrap uCode is good in inst SRAM\n");
1788 return 0;
1789 }
1790
1791 /* Try initialize */
1792 image = (__le32 *)priv->ucode_init.v_addr;
1793 len = priv->ucode_init.len;
1794 rc = iwl3945_verify_inst_sparse(priv, image, len);
1795 if (rc == 0) {
1796 IWL_DEBUG_INFO(priv, "Initialize uCode is good in inst SRAM\n");
1797 return 0;
1798 }
1799
1800 /* Try runtime/protocol */
1801 image = (__le32 *)priv->ucode_code.v_addr;
1802 len = priv->ucode_code.len;
1803 rc = iwl3945_verify_inst_sparse(priv, image, len);
1804 if (rc == 0) {
1805 IWL_DEBUG_INFO(priv, "Runtime uCode is good in inst SRAM\n");
1806 return 0;
1807 }
1808
1809 IWL_ERR(priv, "NO VALID UCODE IMAGE IN INSTRUCTION SRAM!!\n");
1810
1811 /* Since nothing seems to match, show first several data entries in
1812 * instruction SRAM, so maybe visual inspection will give a clue.
1813 * Selection of bootstrap image (vs. other images) is arbitrary. */
1814 image = (__le32 *)priv->ucode_boot.v_addr;
1815 len = priv->ucode_boot.len;
1816 rc = iwl3945_verify_inst_full(priv, image, len);
1817
1818 return rc;
1819}
1820
1821static void iwl3945_nic_start(struct iwl_priv *priv)
1822{
1823 /* Remove all resets to allow NIC to operate */
1824 iwl_write32(priv, CSR_RESET, 0);
1825}
1826
1827#define IWL3945_UCODE_GET(item) \
1828static u32 iwl3945_ucode_get_##item(const struct iwl_ucode_header *ucode)\
1829{ \
1830 return le32_to_cpu(ucode->v1.item); \
1831}
1832
1833static u32 iwl3945_ucode_get_header_size(u32 api_ver)
1834{
1835 return 24;
1836}
1837
1838static u8 *iwl3945_ucode_get_data(const struct iwl_ucode_header *ucode)
1839{
1840 return (u8 *) ucode->v1.data;
1841}
1842
1843IWL3945_UCODE_GET(inst_size);
1844IWL3945_UCODE_GET(data_size);
1845IWL3945_UCODE_GET(init_size);
1846IWL3945_UCODE_GET(init_data_size);
1847IWL3945_UCODE_GET(boot_size);
1848
1849/**
1850 * iwl3945_read_ucode - Read uCode images from disk file.
1851 *
1852 * Copy into buffers for card to fetch via bus-mastering
1853 */
1854static int iwl3945_read_ucode(struct iwl_priv *priv)
1855{
1856 const struct iwl_ucode_header *ucode;
1857 int ret = -EINVAL, index;
1858 const struct firmware *ucode_raw;
1859 /* firmware file name contains uCode/driver compatibility version */
1860 const char *name_pre = priv->cfg->fw_name_pre;
1861 const unsigned int api_max = priv->cfg->ucode_api_max;
1862 const unsigned int api_min = priv->cfg->ucode_api_min;
1863 char buf[25];
1864 u8 *src;
1865 size_t len;
1866 u32 api_ver, inst_size, data_size, init_size, init_data_size, boot_size;
1867
1868 /* Ask kernel firmware_class module to get the boot firmware off disk.
1869 * request_firmware() is synchronous, file is in memory on return. */
1870 for (index = api_max; index >= api_min; index--) {
1871 sprintf(buf, "%s%u%s", name_pre, index, ".ucode");
1872 ret = request_firmware(&ucode_raw, buf, &priv->pci_dev->dev);
1873 if (ret < 0) {
1874 IWL_ERR(priv, "%s firmware file req failed: %d\n",
1875 buf, ret);
1876 if (ret == -ENOENT)
1877 continue;
1878 else
1879 goto error;
1880 } else {
1881 if (index < api_max)
1882 IWL_ERR(priv, "Loaded firmware %s, "
1883 "which is deprecated. "
1884 " Please use API v%u instead.\n",
1885 buf, api_max);
1886 IWL_DEBUG_INFO(priv, "Got firmware '%s' file "
1887 "(%zd bytes) from disk\n",
1888 buf, ucode_raw->size);
1889 break;
1890 }
1891 }
1892
1893 if (ret < 0)
1894 goto error;
1895
1896 /* Make sure that we got at least our header! */
1897 if (ucode_raw->size < iwl3945_ucode_get_header_size(1)) {
1898 IWL_ERR(priv, "File size way too small!\n");
1899 ret = -EINVAL;
1900 goto err_release;
1901 }
1902
1903 /* Data from ucode file: header followed by uCode images */
1904 ucode = (struct iwl_ucode_header *)ucode_raw->data;
1905
1906 priv->ucode_ver = le32_to_cpu(ucode->ver);
1907 api_ver = IWL_UCODE_API(priv->ucode_ver);
1908 inst_size = iwl3945_ucode_get_inst_size(ucode);
1909 data_size = iwl3945_ucode_get_data_size(ucode);
1910 init_size = iwl3945_ucode_get_init_size(ucode);
1911 init_data_size = iwl3945_ucode_get_init_data_size(ucode);
1912 boot_size = iwl3945_ucode_get_boot_size(ucode);
1913 src = iwl3945_ucode_get_data(ucode);
1914
1915 /* api_ver should match the api version forming part of the
1916 * firmware filename ... but we don't check for that and only rely
1917 * on the API version read from firmware header from here on forward */
1918
1919 if (api_ver < api_min || api_ver > api_max) {
1920 IWL_ERR(priv, "Driver unable to support your firmware API. "
1921 "Driver supports v%u, firmware is v%u.\n",
1922 api_max, api_ver);
1923 priv->ucode_ver = 0;
1924 ret = -EINVAL;
1925 goto err_release;
1926 }
1927 if (api_ver != api_max)
1928 IWL_ERR(priv, "Firmware has old API version. Expected %u, "
1929 "got %u. New firmware can be obtained "
1930 "from http://www.intellinuxwireless.org.\n",
1931 api_max, api_ver);
1932
1933 IWL_INFO(priv, "loaded firmware version %u.%u.%u.%u\n",
1934 IWL_UCODE_MAJOR(priv->ucode_ver),
1935 IWL_UCODE_MINOR(priv->ucode_ver),
1936 IWL_UCODE_API(priv->ucode_ver),
1937 IWL_UCODE_SERIAL(priv->ucode_ver));
1938
1939 snprintf(priv->hw->wiphy->fw_version,
1940 sizeof(priv->hw->wiphy->fw_version),
1941 "%u.%u.%u.%u",
1942 IWL_UCODE_MAJOR(priv->ucode_ver),
1943 IWL_UCODE_MINOR(priv->ucode_ver),
1944 IWL_UCODE_API(priv->ucode_ver),
1945 IWL_UCODE_SERIAL(priv->ucode_ver));
1946
1947 IWL_DEBUG_INFO(priv, "f/w package hdr ucode version raw = 0x%x\n",
1948 priv->ucode_ver);
1949 IWL_DEBUG_INFO(priv, "f/w package hdr runtime inst size = %u\n",
1950 inst_size);
1951 IWL_DEBUG_INFO(priv, "f/w package hdr runtime data size = %u\n",
1952 data_size);
1953 IWL_DEBUG_INFO(priv, "f/w package hdr init inst size = %u\n",
1954 init_size);
1955 IWL_DEBUG_INFO(priv, "f/w package hdr init data size = %u\n",
1956 init_data_size);
1957 IWL_DEBUG_INFO(priv, "f/w package hdr boot inst size = %u\n",
1958 boot_size);
1959
1960
1961 /* Verify size of file vs. image size info in file's header */
1962 if (ucode_raw->size != iwl3945_ucode_get_header_size(api_ver) +
1963 inst_size + data_size + init_size +
1964 init_data_size + boot_size) {
1965
1966 IWL_DEBUG_INFO(priv,
1967 "uCode file size %zd does not match expected size\n",
1968 ucode_raw->size);
1969 ret = -EINVAL;
1970 goto err_release;
1971 }
1972
1973 /* Verify that uCode images will fit in card's SRAM */
1974 if (inst_size > IWL39_MAX_INST_SIZE) {
1975 IWL_DEBUG_INFO(priv, "uCode instr len %d too large to fit in\n",
1976 inst_size);
1977 ret = -EINVAL;
1978 goto err_release;
1979 }
1980
1981 if (data_size > IWL39_MAX_DATA_SIZE) {
1982 IWL_DEBUG_INFO(priv, "uCode data len %d too large to fit in\n",
1983 data_size);
1984 ret = -EINVAL;
1985 goto err_release;
1986 }
1987 if (init_size > IWL39_MAX_INST_SIZE) {
1988 IWL_DEBUG_INFO(priv,
1989 "uCode init instr len %d too large to fit in\n",
1990 init_size);
1991 ret = -EINVAL;
1992 goto err_release;
1993 }
1994 if (init_data_size > IWL39_MAX_DATA_SIZE) {
1995 IWL_DEBUG_INFO(priv,
1996 "uCode init data len %d too large to fit in\n",
1997 init_data_size);
1998 ret = -EINVAL;
1999 goto err_release;
2000 }
2001 if (boot_size > IWL39_MAX_BSM_SIZE) {
2002 IWL_DEBUG_INFO(priv,
2003 "uCode boot instr len %d too large to fit in\n",
2004 boot_size);
2005 ret = -EINVAL;
2006 goto err_release;
2007 }
2008
2009 /* Allocate ucode buffers for card's bus-master loading ... */
2010
2011 /* Runtime instructions and 2 copies of data:
2012 * 1) unmodified from disk
2013 * 2) backup cache for save/restore during power-downs */
2014 priv->ucode_code.len = inst_size;
2015 iwl_legacy_alloc_fw_desc(priv->pci_dev, &priv->ucode_code);
2016
2017 priv->ucode_data.len = data_size;
2018 iwl_legacy_alloc_fw_desc(priv->pci_dev, &priv->ucode_data);
2019
2020 priv->ucode_data_backup.len = data_size;
2021 iwl_legacy_alloc_fw_desc(priv->pci_dev, &priv->ucode_data_backup);
2022
2023 if (!priv->ucode_code.v_addr || !priv->ucode_data.v_addr ||
2024 !priv->ucode_data_backup.v_addr)
2025 goto err_pci_alloc;
2026
2027 /* Initialization instructions and data */
2028 if (init_size && init_data_size) {
2029 priv->ucode_init.len = init_size;
2030 iwl_legacy_alloc_fw_desc(priv->pci_dev, &priv->ucode_init);
2031
2032 priv->ucode_init_data.len = init_data_size;
2033 iwl_legacy_alloc_fw_desc(priv->pci_dev, &priv->ucode_init_data);
2034
2035 if (!priv->ucode_init.v_addr || !priv->ucode_init_data.v_addr)
2036 goto err_pci_alloc;
2037 }
2038
2039 /* Bootstrap (instructions only, no data) */
2040 if (boot_size) {
2041 priv->ucode_boot.len = boot_size;
2042 iwl_legacy_alloc_fw_desc(priv->pci_dev, &priv->ucode_boot);
2043
2044 if (!priv->ucode_boot.v_addr)
2045 goto err_pci_alloc;
2046 }
2047
2048 /* Copy images into buffers for card's bus-master reads ... */
2049
2050 /* Runtime instructions (first block of data in file) */
2051 len = inst_size;
2052 IWL_DEBUG_INFO(priv,
2053 "Copying (but not loading) uCode instr len %zd\n", len);
2054 memcpy(priv->ucode_code.v_addr, src, len);
2055 src += len;
2056
2057 IWL_DEBUG_INFO(priv, "uCode instr buf vaddr = 0x%p, paddr = 0x%08x\n",
2058 priv->ucode_code.v_addr, (u32)priv->ucode_code.p_addr);
2059
2060 /* Runtime data (2nd block)
2061 * NOTE: Copy into backup buffer will be done in iwl3945_up() */
2062 len = data_size;
2063 IWL_DEBUG_INFO(priv,
2064 "Copying (but not loading) uCode data len %zd\n", len);
2065 memcpy(priv->ucode_data.v_addr, src, len);
2066 memcpy(priv->ucode_data_backup.v_addr, src, len);
2067 src += len;
2068
2069 /* Initialization instructions (3rd block) */
2070 if (init_size) {
2071 len = init_size;
2072 IWL_DEBUG_INFO(priv,
2073 "Copying (but not loading) init instr len %zd\n", len);
2074 memcpy(priv->ucode_init.v_addr, src, len);
2075 src += len;
2076 }
2077
2078 /* Initialization data (4th block) */
2079 if (init_data_size) {
2080 len = init_data_size;
2081 IWL_DEBUG_INFO(priv,
2082 "Copying (but not loading) init data len %zd\n", len);
2083 memcpy(priv->ucode_init_data.v_addr, src, len);
2084 src += len;
2085 }
2086
2087 /* Bootstrap instructions (5th block) */
2088 len = boot_size;
2089 IWL_DEBUG_INFO(priv,
2090 "Copying (but not loading) boot instr len %zd\n", len);
2091 memcpy(priv->ucode_boot.v_addr, src, len);
2092
2093 /* We have our copies now, allow OS release its copies */
2094 release_firmware(ucode_raw);
2095 return 0;
2096
2097 err_pci_alloc:
2098 IWL_ERR(priv, "failed to allocate pci memory\n");
2099 ret = -ENOMEM;
2100 iwl3945_dealloc_ucode_pci(priv);
2101
2102 err_release:
2103 release_firmware(ucode_raw);
2104
2105 error:
2106 return ret;
2107}
2108
2109
2110/**
2111 * iwl3945_set_ucode_ptrs - Set uCode address location
2112 *
2113 * Tell initialization uCode where to find runtime uCode.
2114 *
2115 * BSM registers initially contain pointers to initialization uCode.
2116 * We need to replace them to load runtime uCode inst and data,
2117 * and to save runtime data when powering down.
2118 */
2119static int iwl3945_set_ucode_ptrs(struct iwl_priv *priv)
2120{
2121 dma_addr_t pinst;
2122 dma_addr_t pdata;
2123
2124 /* bits 31:0 for 3945 */
2125 pinst = priv->ucode_code.p_addr;
2126 pdata = priv->ucode_data_backup.p_addr;
2127
2128 /* Tell bootstrap uCode where to find image to load */
2129 iwl_legacy_write_prph(priv, BSM_DRAM_INST_PTR_REG, pinst);
2130 iwl_legacy_write_prph(priv, BSM_DRAM_DATA_PTR_REG, pdata);
2131 iwl_legacy_write_prph(priv, BSM_DRAM_DATA_BYTECOUNT_REG,
2132 priv->ucode_data.len);
2133
2134 /* Inst byte count must be last to set up, bit 31 signals uCode
2135 * that all new ptr/size info is in place */
2136 iwl_legacy_write_prph(priv, BSM_DRAM_INST_BYTECOUNT_REG,
2137 priv->ucode_code.len | BSM_DRAM_INST_LOAD);
2138
2139 IWL_DEBUG_INFO(priv, "Runtime uCode pointers are set.\n");
2140
2141 return 0;
2142}
2143
2144/**
2145 * iwl3945_init_alive_start - Called after REPLY_ALIVE notification received
2146 *
2147 * Called after REPLY_ALIVE notification received from "initialize" uCode.
2148 *
2149 * Tell "initialize" uCode to go ahead and load the runtime uCode.
2150 */
2151static void iwl3945_init_alive_start(struct iwl_priv *priv)
2152{
2153 /* Check alive response for "valid" sign from uCode */
2154 if (priv->card_alive_init.is_valid != UCODE_VALID_OK) {
2155 /* We had an error bringing up the hardware, so take it
2156 * all the way back down so we can try again */
2157 IWL_DEBUG_INFO(priv, "Initialize Alive failed.\n");
2158 goto restart;
2159 }
2160
2161 /* Bootstrap uCode has loaded initialize uCode ... verify inst image.
2162 * This is a paranoid check, because we would not have gotten the
2163 * "initialize" alive if code weren't properly loaded. */
2164 if (iwl3945_verify_ucode(priv)) {
2165 /* Runtime instruction load was bad;
2166 * take it all the way back down so we can try again */
2167 IWL_DEBUG_INFO(priv, "Bad \"initialize\" uCode load.\n");
2168 goto restart;
2169 }
2170
2171 /* Send pointers to protocol/runtime uCode image ... init code will
2172 * load and launch runtime uCode, which will send us another "Alive"
2173 * notification. */
2174 IWL_DEBUG_INFO(priv, "Initialization Alive received.\n");
2175 if (iwl3945_set_ucode_ptrs(priv)) {
2176 /* Runtime instruction load won't happen;
2177 * take it all the way back down so we can try again */
2178 IWL_DEBUG_INFO(priv, "Couldn't set up uCode pointers.\n");
2179 goto restart;
2180 }
2181 return;
2182
2183 restart:
2184 queue_work(priv->workqueue, &priv->restart);
2185}
2186
2187/**
2188 * iwl3945_alive_start - called after REPLY_ALIVE notification received
2189 * from protocol/runtime uCode (initialization uCode's
2190 * Alive gets handled by iwl3945_init_alive_start()).
2191 */
2192static void iwl3945_alive_start(struct iwl_priv *priv)
2193{
2194 int thermal_spin = 0;
2195 u32 rfkill;
2196 struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
2197
2198 IWL_DEBUG_INFO(priv, "Runtime Alive received.\n");
2199
2200 if (priv->card_alive.is_valid != UCODE_VALID_OK) {
2201 /* We had an error bringing up the hardware, so take it
2202 * all the way back down so we can try again */
2203 IWL_DEBUG_INFO(priv, "Alive failed.\n");
2204 goto restart;
2205 }
2206
2207 /* Initialize uCode has loaded Runtime uCode ... verify inst image.
2208 * This is a paranoid check, because we would not have gotten the
2209 * "runtime" alive if code weren't properly loaded. */
2210 if (iwl3945_verify_ucode(priv)) {
2211 /* Runtime instruction load was bad;
2212 * take it all the way back down so we can try again */
2213 IWL_DEBUG_INFO(priv, "Bad runtime uCode load.\n");
2214 goto restart;
2215 }
2216
2217 rfkill = iwl_legacy_read_prph(priv, APMG_RFKILL_REG);
2218 IWL_DEBUG_INFO(priv, "RFKILL status: 0x%x\n", rfkill);
2219
2220 if (rfkill & 0x1) {
2221 clear_bit(STATUS_RF_KILL_HW, &priv->status);
2222 /* if RFKILL is not on, then wait for thermal
2223 * sensor in adapter to kick in */
2224 while (iwl3945_hw_get_temperature(priv) == 0) {
2225 thermal_spin++;
2226 udelay(10);
2227 }
2228
2229 if (thermal_spin)
2230 IWL_DEBUG_INFO(priv, "Thermal calibration took %dus\n",
2231 thermal_spin * 10);
2232 } else
2233 set_bit(STATUS_RF_KILL_HW, &priv->status);
2234
2235 /* After the ALIVE response, we can send commands to 3945 uCode */
2236 set_bit(STATUS_ALIVE, &priv->status);
2237
2238 /* Enable watchdog to monitor the driver tx queues */
2239 iwl_legacy_setup_watchdog(priv);
2240
2241 if (iwl_legacy_is_rfkill(priv))
2242 return;
2243
2244 ieee80211_wake_queues(priv->hw);
2245
2246 priv->active_rate = IWL_RATES_MASK_3945;
2247
2248 iwl_legacy_power_update_mode(priv, true);
2249
2250 if (iwl_legacy_is_associated(priv, IWL_RXON_CTX_BSS)) {
2251 struct iwl3945_rxon_cmd *active_rxon =
2252 (struct iwl3945_rxon_cmd *)(&ctx->active);
2253
2254 ctx->staging.filter_flags |= RXON_FILTER_ASSOC_MSK;
2255 active_rxon->filter_flags &= ~RXON_FILTER_ASSOC_MSK;
2256 } else {
2257 /* Initialize our rx_config data */
2258 iwl_legacy_connection_init_rx_config(priv, ctx);
2259 }
2260
2261 /* Configure Bluetooth device coexistence support */
2262 iwl_legacy_send_bt_config(priv);
2263
2264 set_bit(STATUS_READY, &priv->status);
2265
2266 /* Configure the adapter for unassociated operation */
2267 iwl3945_commit_rxon(priv, ctx);
2268
2269 iwl3945_reg_txpower_periodic(priv);
2270
2271 IWL_DEBUG_INFO(priv, "ALIVE processing complete.\n");
2272 wake_up(&priv->wait_command_queue);
2273
2274 return;
2275
2276 restart:
2277 queue_work(priv->workqueue, &priv->restart);
2278}
2279
2280static void iwl3945_cancel_deferred_work(struct iwl_priv *priv);
2281
2282static void __iwl3945_down(struct iwl_priv *priv)
2283{
2284 unsigned long flags;
2285 int exit_pending;
2286
2287 IWL_DEBUG_INFO(priv, DRV_NAME " is going down\n");
2288
2289 iwl_legacy_scan_cancel_timeout(priv, 200);
2290
2291 exit_pending = test_and_set_bit(STATUS_EXIT_PENDING, &priv->status);
2292
2293 /* Stop TX queues watchdog. We need to have STATUS_EXIT_PENDING bit set
2294 * to prevent rearm timer */
2295 del_timer_sync(&priv->watchdog);
2296
2297 /* Station information will now be cleared in device */
2298 iwl_legacy_clear_ucode_stations(priv, NULL);
2299 iwl_legacy_dealloc_bcast_stations(priv);
2300 iwl_legacy_clear_driver_stations(priv);
2301
2302 /* Unblock any waiting calls */
2303 wake_up_all(&priv->wait_command_queue);
2304
2305 /* Wipe out the EXIT_PENDING status bit if we are not actually
2306 * exiting the module */
2307 if (!exit_pending)
2308 clear_bit(STATUS_EXIT_PENDING, &priv->status);
2309
2310 /* stop and reset the on-board processor */
2311 iwl_write32(priv, CSR_RESET, CSR_RESET_REG_FLAG_NEVO_RESET);
2312
2313 /* tell the device to stop sending interrupts */
2314 spin_lock_irqsave(&priv->lock, flags);
2315 iwl_legacy_disable_interrupts(priv);
2316 spin_unlock_irqrestore(&priv->lock, flags);
2317 iwl3945_synchronize_irq(priv);
2318
2319 if (priv->mac80211_registered)
2320 ieee80211_stop_queues(priv->hw);
2321
2322 /* If we have not previously called iwl3945_init() then
2323 * clear all bits but the RF Kill bits and return */
2324 if (!iwl_legacy_is_init(priv)) {
2325 priv->status = test_bit(STATUS_RF_KILL_HW, &priv->status) <<
2326 STATUS_RF_KILL_HW |
2327 test_bit(STATUS_GEO_CONFIGURED, &priv->status) <<
2328 STATUS_GEO_CONFIGURED |
2329 test_bit(STATUS_EXIT_PENDING, &priv->status) <<
2330 STATUS_EXIT_PENDING;
2331 goto exit;
2332 }
2333
2334 /* ...otherwise clear out all the status bits but the RF Kill
2335 * bit and continue taking the NIC down. */
2336 priv->status &= test_bit(STATUS_RF_KILL_HW, &priv->status) <<
2337 STATUS_RF_KILL_HW |
2338 test_bit(STATUS_GEO_CONFIGURED, &priv->status) <<
2339 STATUS_GEO_CONFIGURED |
2340 test_bit(STATUS_FW_ERROR, &priv->status) <<
2341 STATUS_FW_ERROR |
2342 test_bit(STATUS_EXIT_PENDING, &priv->status) <<
2343 STATUS_EXIT_PENDING;
2344
2345 iwl3945_hw_txq_ctx_stop(priv);
2346 iwl3945_hw_rxq_stop(priv);
2347
2348 /* Power-down device's busmaster DMA clocks */
2349 iwl_legacy_write_prph(priv, APMG_CLK_DIS_REG, APMG_CLK_VAL_DMA_CLK_RQT);
2350 udelay(5);
2351
2352 /* Stop the device, and put it in low power state */
2353 iwl_legacy_apm_stop(priv);
2354
2355 exit:
2356 memset(&priv->card_alive, 0, sizeof(struct iwl_alive_resp));
2357
2358 if (priv->beacon_skb)
2359 dev_kfree_skb(priv->beacon_skb);
2360 priv->beacon_skb = NULL;
2361
2362 /* clear out any free frames */
2363 iwl3945_clear_free_frames(priv);
2364}
2365
2366static void iwl3945_down(struct iwl_priv *priv)
2367{
2368 mutex_lock(&priv->mutex);
2369 __iwl3945_down(priv);
2370 mutex_unlock(&priv->mutex);
2371
2372 iwl3945_cancel_deferred_work(priv);
2373}
2374
2375#define MAX_HW_RESTARTS 5
2376
2377static int iwl3945_alloc_bcast_station(struct iwl_priv *priv)
2378{
2379 struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
2380 unsigned long flags;
2381 u8 sta_id;
2382
2383 spin_lock_irqsave(&priv->sta_lock, flags);
2384 sta_id = iwl_legacy_prep_station(priv, ctx,
2385 iwlegacy_bcast_addr, false, NULL);
2386 if (sta_id == IWL_INVALID_STATION) {
2387 IWL_ERR(priv, "Unable to prepare broadcast station\n");
2388 spin_unlock_irqrestore(&priv->sta_lock, flags);
2389
2390 return -EINVAL;
2391 }
2392
2393 priv->stations[sta_id].used |= IWL_STA_DRIVER_ACTIVE;
2394 priv->stations[sta_id].used |= IWL_STA_BCAST;
2395 spin_unlock_irqrestore(&priv->sta_lock, flags);
2396
2397 return 0;
2398}
2399
2400static int __iwl3945_up(struct iwl_priv *priv)
2401{
2402 int rc, i;
2403
2404 rc = iwl3945_alloc_bcast_station(priv);
2405 if (rc)
2406 return rc;
2407
2408 if (test_bit(STATUS_EXIT_PENDING, &priv->status)) {
2409 IWL_WARN(priv, "Exit pending; will not bring the NIC up\n");
2410 return -EIO;
2411 }
2412
2413 if (!priv->ucode_data_backup.v_addr || !priv->ucode_data.v_addr) {
2414 IWL_ERR(priv, "ucode not available for device bring up\n");
2415 return -EIO;
2416 }
2417
2418 /* If platform's RF_KILL switch is NOT set to KILL */
2419 if (iwl_read32(priv, CSR_GP_CNTRL) &
2420 CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW)
2421 clear_bit(STATUS_RF_KILL_HW, &priv->status);
2422 else {
2423 set_bit(STATUS_RF_KILL_HW, &priv->status);
2424 IWL_WARN(priv, "Radio disabled by HW RF Kill switch\n");
2425 return -ENODEV;
2426 }
2427
2428 iwl_write32(priv, CSR_INT, 0xFFFFFFFF);
2429
2430 rc = iwl3945_hw_nic_init(priv);
2431 if (rc) {
2432 IWL_ERR(priv, "Unable to int nic\n");
2433 return rc;
2434 }
2435
2436 /* make sure rfkill handshake bits are cleared */
2437 iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
2438 iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR,
2439 CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED);
2440
2441 /* clear (again), then enable host interrupts */
2442 iwl_write32(priv, CSR_INT, 0xFFFFFFFF);
2443 iwl_legacy_enable_interrupts(priv);
2444
2445 /* really make sure rfkill handshake bits are cleared */
2446 iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
2447 iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
2448
2449 /* Copy original ucode data image from disk into backup cache.
2450 * This will be used to initialize the on-board processor's
2451 * data SRAM for a clean start when the runtime program first loads. */
2452 memcpy(priv->ucode_data_backup.v_addr, priv->ucode_data.v_addr,
2453 priv->ucode_data.len);
2454
2455 /* We return success when we resume from suspend and rf_kill is on. */
2456 if (test_bit(STATUS_RF_KILL_HW, &priv->status))
2457 return 0;
2458
2459 for (i = 0; i < MAX_HW_RESTARTS; i++) {
2460
2461 /* load bootstrap state machine,
2462 * load bootstrap program into processor's memory,
2463 * prepare to load the "initialize" uCode */
2464 rc = priv->cfg->ops->lib->load_ucode(priv);
2465
2466 if (rc) {
2467 IWL_ERR(priv,
2468 "Unable to set up bootstrap uCode: %d\n", rc);
2469 continue;
2470 }
2471
2472 /* start card; "initialize" will load runtime ucode */
2473 iwl3945_nic_start(priv);
2474
2475 IWL_DEBUG_INFO(priv, DRV_NAME " is coming up\n");
2476
2477 return 0;
2478 }
2479
2480 set_bit(STATUS_EXIT_PENDING, &priv->status);
2481 __iwl3945_down(priv);
2482 clear_bit(STATUS_EXIT_PENDING, &priv->status);
2483
2484 /* tried to restart and config the device for as long as our
2485 * patience could withstand */
2486 IWL_ERR(priv, "Unable to initialize device after %d attempts.\n", i);
2487 return -EIO;
2488}
2489
2490
2491/*****************************************************************************
2492 *
2493 * Workqueue callbacks
2494 *
2495 *****************************************************************************/
2496
2497static void iwl3945_bg_init_alive_start(struct work_struct *data)
2498{
2499 struct iwl_priv *priv =
2500 container_of(data, struct iwl_priv, init_alive_start.work);
2501
2502 mutex_lock(&priv->mutex);
2503 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
2504 goto out;
2505
2506 iwl3945_init_alive_start(priv);
2507out:
2508 mutex_unlock(&priv->mutex);
2509}
2510
2511static void iwl3945_bg_alive_start(struct work_struct *data)
2512{
2513 struct iwl_priv *priv =
2514 container_of(data, struct iwl_priv, alive_start.work);
2515
2516 mutex_lock(&priv->mutex);
2517 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
2518 goto out;
2519
2520 iwl3945_alive_start(priv);
2521out:
2522 mutex_unlock(&priv->mutex);
2523}
2524
2525/*
2526 * 3945 cannot interrupt driver when hardware rf kill switch toggles;
2527 * driver must poll CSR_GP_CNTRL_REG register for change. This register
2528 * *is* readable even when device has been SW_RESET into low power mode
2529 * (e.g. during RF KILL).
2530 */
2531static void iwl3945_rfkill_poll(struct work_struct *data)
2532{
2533 struct iwl_priv *priv =
2534 container_of(data, struct iwl_priv, _3945.rfkill_poll.work);
2535 bool old_rfkill = test_bit(STATUS_RF_KILL_HW, &priv->status);
2536 bool new_rfkill = !(iwl_read32(priv, CSR_GP_CNTRL)
2537 & CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW);
2538
2539 if (new_rfkill != old_rfkill) {
2540 if (new_rfkill)
2541 set_bit(STATUS_RF_KILL_HW, &priv->status);
2542 else
2543 clear_bit(STATUS_RF_KILL_HW, &priv->status);
2544
2545 wiphy_rfkill_set_hw_state(priv->hw->wiphy, new_rfkill);
2546
2547 IWL_DEBUG_RF_KILL(priv, "RF_KILL bit toggled to %s.\n",
2548 new_rfkill ? "disable radio" : "enable radio");
2549 }
2550
2551 /* Keep this running, even if radio now enabled. This will be
2552 * cancelled in mac_start() if system decides to start again */
2553 queue_delayed_work(priv->workqueue, &priv->_3945.rfkill_poll,
2554 round_jiffies_relative(2 * HZ));
2555
2556}
2557
2558int iwl3945_request_scan(struct iwl_priv *priv, struct ieee80211_vif *vif)
2559{
2560 struct iwl_host_cmd cmd = {
2561 .id = REPLY_SCAN_CMD,
2562 .len = sizeof(struct iwl3945_scan_cmd),
2563 .flags = CMD_SIZE_HUGE,
2564 };
2565 struct iwl3945_scan_cmd *scan;
2566 u8 n_probes = 0;
2567 enum ieee80211_band band;
2568 bool is_active = false;
2569 int ret;
2570 u16 len;
2571
2572 lockdep_assert_held(&priv->mutex);
2573
2574 if (!priv->scan_cmd) {
2575 priv->scan_cmd = kmalloc(sizeof(struct iwl3945_scan_cmd) +
2576 IWL_MAX_SCAN_SIZE, GFP_KERNEL);
2577 if (!priv->scan_cmd) {
2578 IWL_DEBUG_SCAN(priv, "Fail to allocate scan memory\n");
2579 return -ENOMEM;
2580 }
2581 }
2582 scan = priv->scan_cmd;
2583 memset(scan, 0, sizeof(struct iwl3945_scan_cmd) + IWL_MAX_SCAN_SIZE);
2584
2585 scan->quiet_plcp_th = IWL_PLCP_QUIET_THRESH;
2586 scan->quiet_time = IWL_ACTIVE_QUIET_TIME;
2587
2588 if (iwl_legacy_is_associated(priv, IWL_RXON_CTX_BSS)) {
2589 u16 interval;
2590 u32 extra;
2591 u32 suspend_time = 100;
2592 u32 scan_suspend_time = 100;
2593
2594 IWL_DEBUG_INFO(priv, "Scanning while associated...\n");
2595
2596 interval = vif->bss_conf.beacon_int;
2597
2598 scan->suspend_time = 0;
2599 scan->max_out_time = cpu_to_le32(200 * 1024);
2600 if (!interval)
2601 interval = suspend_time;
2602 /*
2603 * suspend time format:
2604 * 0-19: beacon interval in usec (time before exec.)
2605 * 20-23: 0
2606 * 24-31: number of beacons (suspend between channels)
2607 */
2608
2609 extra = (suspend_time / interval) << 24;
2610 scan_suspend_time = 0xFF0FFFFF &
2611 (extra | ((suspend_time % interval) * 1024));
2612
2613 scan->suspend_time = cpu_to_le32(scan_suspend_time);
2614 IWL_DEBUG_SCAN(priv, "suspend_time 0x%X beacon interval %d\n",
2615 scan_suspend_time, interval);
2616 }
2617
2618 if (priv->scan_request->n_ssids) {
2619 int i, p = 0;
2620 IWL_DEBUG_SCAN(priv, "Kicking off active scan\n");
2621 for (i = 0; i < priv->scan_request->n_ssids; i++) {
2622 /* always does wildcard anyway */
2623 if (!priv->scan_request->ssids[i].ssid_len)
2624 continue;
2625 scan->direct_scan[p].id = WLAN_EID_SSID;
2626 scan->direct_scan[p].len =
2627 priv->scan_request->ssids[i].ssid_len;
2628 memcpy(scan->direct_scan[p].ssid,
2629 priv->scan_request->ssids[i].ssid,
2630 priv->scan_request->ssids[i].ssid_len);
2631 n_probes++;
2632 p++;
2633 }
2634 is_active = true;
2635 } else
2636 IWL_DEBUG_SCAN(priv, "Kicking off passive scan.\n");
2637
2638 /* We don't build a direct scan probe request; the uCode will do
2639 * that based on the direct_mask added to each channel entry */
2640 scan->tx_cmd.tx_flags = TX_CMD_FLG_SEQ_CTL_MSK;
2641 scan->tx_cmd.sta_id = priv->contexts[IWL_RXON_CTX_BSS].bcast_sta_id;
2642 scan->tx_cmd.stop_time.life_time = TX_CMD_LIFE_TIME_INFINITE;
2643
2644 /* flags + rate selection */
2645
2646 switch (priv->scan_band) {
2647 case IEEE80211_BAND_2GHZ:
2648 scan->flags = RXON_FLG_BAND_24G_MSK | RXON_FLG_AUTO_DETECT_MSK;
2649 scan->tx_cmd.rate = IWL_RATE_1M_PLCP;
2650 band = IEEE80211_BAND_2GHZ;
2651 break;
2652 case IEEE80211_BAND_5GHZ:
2653 scan->tx_cmd.rate = IWL_RATE_6M_PLCP;
2654 band = IEEE80211_BAND_5GHZ;
2655 break;
2656 default:
2657 IWL_WARN(priv, "Invalid scan band\n");
2658 return -EIO;
2659 }
2660
2661 /*
2662 * If active scaning is requested but a certain channel
2663 * is marked passive, we can do active scanning if we
2664 * detect transmissions.
2665 */
2666 scan->good_CRC_th = is_active ? IWL_GOOD_CRC_TH_DEFAULT :
2667 IWL_GOOD_CRC_TH_DISABLED;
2668
2669 len = iwl_legacy_fill_probe_req(priv, (struct ieee80211_mgmt *)scan->data,
2670 vif->addr, priv->scan_request->ie,
2671 priv->scan_request->ie_len,
2672 IWL_MAX_SCAN_SIZE - sizeof(*scan));
2673 scan->tx_cmd.len = cpu_to_le16(len);
2674
2675 /* select Rx antennas */
2676 scan->flags |= iwl3945_get_antenna_flags(priv);
2677
2678 scan->channel_count = iwl3945_get_channels_for_scan(priv, band, is_active, n_probes,
2679 (void *)&scan->data[len], vif);
2680 if (scan->channel_count == 0) {
2681 IWL_DEBUG_SCAN(priv, "channel count %d\n", scan->channel_count);
2682 return -EIO;
2683 }
2684
2685 cmd.len += le16_to_cpu(scan->tx_cmd.len) +
2686 scan->channel_count * sizeof(struct iwl3945_scan_channel);
2687 cmd.data = scan;
2688 scan->len = cpu_to_le16(cmd.len);
2689
2690 set_bit(STATUS_SCAN_HW, &priv->status);
2691 ret = iwl_legacy_send_cmd_sync(priv, &cmd);
2692 if (ret)
2693 clear_bit(STATUS_SCAN_HW, &priv->status);
2694 return ret;
2695}
2696
2697void iwl3945_post_scan(struct iwl_priv *priv)
2698{
2699 struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
2700
2701 /*
2702 * Since setting the RXON may have been deferred while
2703 * performing the scan, fire one off if needed
2704 */
2705 if (memcmp(&ctx->staging, &ctx->active, sizeof(ctx->staging)))
2706 iwl3945_commit_rxon(priv, ctx);
2707}
2708
2709static void iwl3945_bg_restart(struct work_struct *data)
2710{
2711 struct iwl_priv *priv = container_of(data, struct iwl_priv, restart);
2712
2713 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
2714 return;
2715
2716 if (test_and_clear_bit(STATUS_FW_ERROR, &priv->status)) {
2717 struct iwl_rxon_context *ctx;
2718 mutex_lock(&priv->mutex);
2719 for_each_context(priv, ctx)
2720 ctx->vif = NULL;
2721 priv->is_open = 0;
2722 mutex_unlock(&priv->mutex);
2723 iwl3945_down(priv);
2724 ieee80211_restart_hw(priv->hw);
2725 } else {
2726 iwl3945_down(priv);
2727
2728 mutex_lock(&priv->mutex);
2729 if (test_bit(STATUS_EXIT_PENDING, &priv->status)) {
2730 mutex_unlock(&priv->mutex);
2731 return;
2732 }
2733
2734 __iwl3945_up(priv);
2735 mutex_unlock(&priv->mutex);
2736 }
2737}
2738
2739static void iwl3945_bg_rx_replenish(struct work_struct *data)
2740{
2741 struct iwl_priv *priv =
2742 container_of(data, struct iwl_priv, rx_replenish);
2743
2744 mutex_lock(&priv->mutex);
2745 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
2746 goto out;
2747
2748 iwl3945_rx_replenish(priv);
2749out:
2750 mutex_unlock(&priv->mutex);
2751}
2752
2753void iwl3945_post_associate(struct iwl_priv *priv)
2754{
2755 int rc = 0;
2756 struct ieee80211_conf *conf = NULL;
2757 struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
2758
2759 if (!ctx->vif || !priv->is_open)
2760 return;
2761
2762 IWL_DEBUG_ASSOC(priv, "Associated as %d to: %pM\n",
2763 ctx->vif->bss_conf.aid, ctx->active.bssid_addr);
2764
2765 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
2766 return;
2767
2768 iwl_legacy_scan_cancel_timeout(priv, 200);
2769
2770 conf = iwl_legacy_ieee80211_get_hw_conf(priv->hw);
2771
2772 ctx->staging.filter_flags &= ~RXON_FILTER_ASSOC_MSK;
2773 iwl3945_commit_rxon(priv, ctx);
2774
2775 rc = iwl_legacy_send_rxon_timing(priv, ctx);
2776 if (rc)
2777 IWL_WARN(priv, "REPLY_RXON_TIMING failed - "
2778 "Attempting to continue.\n");
2779
2780 ctx->staging.filter_flags |= RXON_FILTER_ASSOC_MSK;
2781
2782 ctx->staging.assoc_id = cpu_to_le16(ctx->vif->bss_conf.aid);
2783
2784 IWL_DEBUG_ASSOC(priv, "assoc id %d beacon interval %d\n",
2785 ctx->vif->bss_conf.aid, ctx->vif->bss_conf.beacon_int);
2786
2787 if (ctx->vif->bss_conf.use_short_preamble)
2788 ctx->staging.flags |= RXON_FLG_SHORT_PREAMBLE_MSK;
2789 else
2790 ctx->staging.flags &= ~RXON_FLG_SHORT_PREAMBLE_MSK;
2791
2792 if (ctx->staging.flags & RXON_FLG_BAND_24G_MSK) {
2793 if (ctx->vif->bss_conf.use_short_slot)
2794 ctx->staging.flags |= RXON_FLG_SHORT_SLOT_MSK;
2795 else
2796 ctx->staging.flags &= ~RXON_FLG_SHORT_SLOT_MSK;
2797 }
2798
2799 iwl3945_commit_rxon(priv, ctx);
2800
2801 switch (ctx->vif->type) {
2802 case NL80211_IFTYPE_STATION:
2803 iwl3945_rate_scale_init(priv->hw, IWL_AP_ID);
2804 break;
2805 case NL80211_IFTYPE_ADHOC:
2806 iwl3945_send_beacon_cmd(priv);
2807 break;
2808 default:
2809 IWL_ERR(priv, "%s Should not be called in %d mode\n",
2810 __func__, ctx->vif->type);
2811 break;
2812 }
2813}
2814
2815/*****************************************************************************
2816 *
2817 * mac80211 entry point functions
2818 *
2819 *****************************************************************************/
2820
2821#define UCODE_READY_TIMEOUT (2 * HZ)
2822
2823static int iwl3945_mac_start(struct ieee80211_hw *hw)
2824{
2825 struct iwl_priv *priv = hw->priv;
2826 int ret;
2827
2828 IWL_DEBUG_MAC80211(priv, "enter\n");
2829
2830 /* we should be verifying the device is ready to be opened */
2831 mutex_lock(&priv->mutex);
2832
2833 /* fetch ucode file from disk, alloc and copy to bus-master buffers ...
2834 * ucode filename and max sizes are card-specific. */
2835
2836 if (!priv->ucode_code.len) {
2837 ret = iwl3945_read_ucode(priv);
2838 if (ret) {
2839 IWL_ERR(priv, "Could not read microcode: %d\n", ret);
2840 mutex_unlock(&priv->mutex);
2841 goto out_release_irq;
2842 }
2843 }
2844
2845 ret = __iwl3945_up(priv);
2846
2847 mutex_unlock(&priv->mutex);
2848
2849 if (ret)
2850 goto out_release_irq;
2851
2852 IWL_DEBUG_INFO(priv, "Start UP work.\n");
2853
2854 /* Wait for START_ALIVE from ucode. Otherwise callbacks from
2855 * mac80211 will not be run successfully. */
2856 ret = wait_event_timeout(priv->wait_command_queue,
2857 test_bit(STATUS_READY, &priv->status),
2858 UCODE_READY_TIMEOUT);
2859 if (!ret) {
2860 if (!test_bit(STATUS_READY, &priv->status)) {
2861 IWL_ERR(priv,
2862 "Wait for START_ALIVE timeout after %dms.\n",
2863 jiffies_to_msecs(UCODE_READY_TIMEOUT));
2864 ret = -ETIMEDOUT;
2865 goto out_release_irq;
2866 }
2867 }
2868
2869 /* ucode is running and will send rfkill notifications,
2870 * no need to poll the killswitch state anymore */
2871 cancel_delayed_work(&priv->_3945.rfkill_poll);
2872
2873 priv->is_open = 1;
2874 IWL_DEBUG_MAC80211(priv, "leave\n");
2875 return 0;
2876
2877out_release_irq:
2878 priv->is_open = 0;
2879 IWL_DEBUG_MAC80211(priv, "leave - failed\n");
2880 return ret;
2881}
2882
2883static void iwl3945_mac_stop(struct ieee80211_hw *hw)
2884{
2885 struct iwl_priv *priv = hw->priv;
2886
2887 IWL_DEBUG_MAC80211(priv, "enter\n");
2888
2889 if (!priv->is_open) {
2890 IWL_DEBUG_MAC80211(priv, "leave - skip\n");
2891 return;
2892 }
2893
2894 priv->is_open = 0;
2895
2896 iwl3945_down(priv);
2897
2898 flush_workqueue(priv->workqueue);
2899
2900 /* start polling the killswitch state again */
2901 queue_delayed_work(priv->workqueue, &priv->_3945.rfkill_poll,
2902 round_jiffies_relative(2 * HZ));
2903
2904 IWL_DEBUG_MAC80211(priv, "leave\n");
2905}
2906
2907static void iwl3945_mac_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
2908{
2909 struct iwl_priv *priv = hw->priv;
2910
2911 IWL_DEBUG_MAC80211(priv, "enter\n");
2912
2913 IWL_DEBUG_TX(priv, "dev->xmit(%d bytes) at rate 0x%02x\n", skb->len,
2914 ieee80211_get_tx_rate(hw, IEEE80211_SKB_CB(skb))->bitrate);
2915
2916 if (iwl3945_tx_skb(priv, skb))
2917 dev_kfree_skb_any(skb);
2918
2919 IWL_DEBUG_MAC80211(priv, "leave\n");
2920}
2921
2922void iwl3945_config_ap(struct iwl_priv *priv)
2923{
2924 struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
2925 struct ieee80211_vif *vif = ctx->vif;
2926 int rc = 0;
2927
2928 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
2929 return;
2930
2931 /* The following should be done only at AP bring up */
2932 if (!(iwl_legacy_is_associated(priv, IWL_RXON_CTX_BSS))) {
2933
2934 /* RXON - unassoc (to set timing command) */
2935 ctx->staging.filter_flags &= ~RXON_FILTER_ASSOC_MSK;
2936 iwl3945_commit_rxon(priv, ctx);
2937
2938 /* RXON Timing */
2939 rc = iwl_legacy_send_rxon_timing(priv, ctx);
2940 if (rc)
2941 IWL_WARN(priv, "REPLY_RXON_TIMING failed - "
2942 "Attempting to continue.\n");
2943
2944 ctx->staging.assoc_id = 0;
2945
2946 if (vif->bss_conf.use_short_preamble)
2947 ctx->staging.flags |=
2948 RXON_FLG_SHORT_PREAMBLE_MSK;
2949 else
2950 ctx->staging.flags &=
2951 ~RXON_FLG_SHORT_PREAMBLE_MSK;
2952
2953 if (ctx->staging.flags & RXON_FLG_BAND_24G_MSK) {
2954 if (vif->bss_conf.use_short_slot)
2955 ctx->staging.flags |=
2956 RXON_FLG_SHORT_SLOT_MSK;
2957 else
2958 ctx->staging.flags &=
2959 ~RXON_FLG_SHORT_SLOT_MSK;
2960 }
2961 /* restore RXON assoc */
2962 ctx->staging.filter_flags |= RXON_FILTER_ASSOC_MSK;
2963 iwl3945_commit_rxon(priv, ctx);
2964 }
2965 iwl3945_send_beacon_cmd(priv);
2966}
2967
2968static int iwl3945_mac_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
2969 struct ieee80211_vif *vif,
2970 struct ieee80211_sta *sta,
2971 struct ieee80211_key_conf *key)
2972{
2973 struct iwl_priv *priv = hw->priv;
2974 int ret = 0;
2975 u8 sta_id = IWL_INVALID_STATION;
2976 u8 static_key;
2977
2978 IWL_DEBUG_MAC80211(priv, "enter\n");
2979
2980 if (iwl3945_mod_params.sw_crypto) {
2981 IWL_DEBUG_MAC80211(priv, "leave - hwcrypto disabled\n");
2982 return -EOPNOTSUPP;
2983 }
2984
2985 /*
2986 * To support IBSS RSN, don't program group keys in IBSS, the
2987 * hardware will then not attempt to decrypt the frames.
2988 */
2989 if (vif->type == NL80211_IFTYPE_ADHOC &&
2990 !(key->flags & IEEE80211_KEY_FLAG_PAIRWISE))
2991 return -EOPNOTSUPP;
2992
2993 static_key = !iwl_legacy_is_associated(priv, IWL_RXON_CTX_BSS);
2994
2995 if (!static_key) {
2996 sta_id = iwl_legacy_sta_id_or_broadcast(
2997 priv, &priv->contexts[IWL_RXON_CTX_BSS], sta);
2998 if (sta_id == IWL_INVALID_STATION)
2999 return -EINVAL;
3000 }
3001
3002 mutex_lock(&priv->mutex);
3003 iwl_legacy_scan_cancel_timeout(priv, 100);
3004
3005 switch (cmd) {
3006 case SET_KEY:
3007 if (static_key)
3008 ret = iwl3945_set_static_key(priv, key);
3009 else
3010 ret = iwl3945_set_dynamic_key(priv, key, sta_id);
3011 IWL_DEBUG_MAC80211(priv, "enable hwcrypto key\n");
3012 break;
3013 case DISABLE_KEY:
3014 if (static_key)
3015 ret = iwl3945_remove_static_key(priv);
3016 else
3017 ret = iwl3945_clear_sta_key_info(priv, sta_id);
3018 IWL_DEBUG_MAC80211(priv, "disable hwcrypto key\n");
3019 break;
3020 default:
3021 ret = -EINVAL;
3022 }
3023
3024 mutex_unlock(&priv->mutex);
3025 IWL_DEBUG_MAC80211(priv, "leave\n");
3026
3027 return ret;
3028}
3029
3030static int iwl3945_mac_sta_add(struct ieee80211_hw *hw,
3031 struct ieee80211_vif *vif,
3032 struct ieee80211_sta *sta)
3033{
3034 struct iwl_priv *priv = hw->priv;
3035 struct iwl3945_sta_priv *sta_priv = (void *)sta->drv_priv;
3036 int ret;
3037 bool is_ap = vif->type == NL80211_IFTYPE_STATION;
3038 u8 sta_id;
3039
3040 IWL_DEBUG_INFO(priv, "received request to add station %pM\n",
3041 sta->addr);
3042 mutex_lock(&priv->mutex);
3043 IWL_DEBUG_INFO(priv, "proceeding to add station %pM\n",
3044 sta->addr);
3045 sta_priv->common.sta_id = IWL_INVALID_STATION;
3046
3047
3048 ret = iwl_legacy_add_station_common(priv,
3049 &priv->contexts[IWL_RXON_CTX_BSS],
3050 sta->addr, is_ap, sta, &sta_id);
3051 if (ret) {
3052 IWL_ERR(priv, "Unable to add station %pM (%d)\n",
3053 sta->addr, ret);
3054 /* Should we return success if return code is EEXIST ? */
3055 mutex_unlock(&priv->mutex);
3056 return ret;
3057 }
3058
3059 sta_priv->common.sta_id = sta_id;
3060
3061 /* Initialize rate scaling */
3062 IWL_DEBUG_INFO(priv, "Initializing rate scaling for station %pM\n",
3063 sta->addr);
3064 iwl3945_rs_rate_init(priv, sta, sta_id);
3065 mutex_unlock(&priv->mutex);
3066
3067 return 0;
3068}
3069
3070static void iwl3945_configure_filter(struct ieee80211_hw *hw,
3071 unsigned int changed_flags,
3072 unsigned int *total_flags,
3073 u64 multicast)
3074{
3075 struct iwl_priv *priv = hw->priv;
3076 __le32 filter_or = 0, filter_nand = 0;
3077 struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
3078
3079#define CHK(test, flag) do { \
3080 if (*total_flags & (test)) \
3081 filter_or |= (flag); \
3082 else \
3083 filter_nand |= (flag); \
3084 } while (0)
3085
3086 IWL_DEBUG_MAC80211(priv, "Enter: changed: 0x%x, total: 0x%x\n",
3087 changed_flags, *total_flags);
3088
3089 CHK(FIF_OTHER_BSS | FIF_PROMISC_IN_BSS, RXON_FILTER_PROMISC_MSK);
3090 CHK(FIF_CONTROL, RXON_FILTER_CTL2HOST_MSK);
3091 CHK(FIF_BCN_PRBRESP_PROMISC, RXON_FILTER_BCON_AWARE_MSK);
3092
3093#undef CHK
3094
3095 mutex_lock(&priv->mutex);
3096
3097 ctx->staging.filter_flags &= ~filter_nand;
3098 ctx->staging.filter_flags |= filter_or;
3099
3100 /*
3101 * Not committing directly because hardware can perform a scan,
3102 * but even if hw is ready, committing here breaks for some reason,
3103 * we'll eventually commit the filter flags change anyway.
3104 */
3105
3106 mutex_unlock(&priv->mutex);
3107
3108 /*
3109 * Receiving all multicast frames is always enabled by the
3110 * default flags setup in iwl_legacy_connection_init_rx_config()
3111 * since we currently do not support programming multicast
3112 * filters into the device.
3113 */
3114 *total_flags &= FIF_OTHER_BSS | FIF_ALLMULTI | FIF_PROMISC_IN_BSS |
3115 FIF_BCN_PRBRESP_PROMISC | FIF_CONTROL;
3116}
3117
3118
3119/*****************************************************************************
3120 *
3121 * sysfs attributes
3122 *
3123 *****************************************************************************/
3124
3125#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
3126
3127/*
3128 * The following adds a new attribute to the sysfs representation
3129 * of this device driver (i.e. a new file in /sys/bus/pci/drivers/iwl/)
3130 * used for controlling the debug level.
3131 *
3132 * See the level definitions in iwl for details.
3133 *
3134 * The debug_level being managed using sysfs below is a per device debug
3135 * level that is used instead of the global debug level if it (the per
3136 * device debug level) is set.
3137 */
3138static ssize_t iwl3945_show_debug_level(struct device *d,
3139 struct device_attribute *attr, char *buf)
3140{
3141 struct iwl_priv *priv = dev_get_drvdata(d);
3142 return sprintf(buf, "0x%08X\n", iwl_legacy_get_debug_level(priv));
3143}
3144static ssize_t iwl3945_store_debug_level(struct device *d,
3145 struct device_attribute *attr,
3146 const char *buf, size_t count)
3147{
3148 struct iwl_priv *priv = dev_get_drvdata(d);
3149 unsigned long val;
3150 int ret;
3151
3152 ret = strict_strtoul(buf, 0, &val);
3153 if (ret)
3154 IWL_INFO(priv, "%s is not in hex or decimal form.\n", buf);
3155 else {
3156 priv->debug_level = val;
3157 if (iwl_legacy_alloc_traffic_mem(priv))
3158 IWL_ERR(priv,
3159 "Not enough memory to generate traffic log\n");
3160 }
3161 return strnlen(buf, count);
3162}
3163
3164static DEVICE_ATTR(debug_level, S_IWUSR | S_IRUGO,
3165 iwl3945_show_debug_level, iwl3945_store_debug_level);
3166
3167#endif /* CONFIG_IWLWIFI_LEGACY_DEBUG */
3168
3169static ssize_t iwl3945_show_temperature(struct device *d,
3170 struct device_attribute *attr, char *buf)
3171{
3172 struct iwl_priv *priv = dev_get_drvdata(d);
3173
3174 if (!iwl_legacy_is_alive(priv))
3175 return -EAGAIN;
3176
3177 return sprintf(buf, "%d\n", iwl3945_hw_get_temperature(priv));
3178}
3179
3180static DEVICE_ATTR(temperature, S_IRUGO, iwl3945_show_temperature, NULL);
3181
3182static ssize_t iwl3945_show_tx_power(struct device *d,
3183 struct device_attribute *attr, char *buf)
3184{
3185 struct iwl_priv *priv = dev_get_drvdata(d);
3186 return sprintf(buf, "%d\n", priv->tx_power_user_lmt);
3187}
3188
3189static ssize_t iwl3945_store_tx_power(struct device *d,
3190 struct device_attribute *attr,
3191 const char *buf, size_t count)
3192{
3193 struct iwl_priv *priv = dev_get_drvdata(d);
3194 char *p = (char *)buf;
3195 u32 val;
3196
3197 val = simple_strtoul(p, &p, 10);
3198 if (p == buf)
3199 IWL_INFO(priv, ": %s is not in decimal form.\n", buf);
3200 else
3201 iwl3945_hw_reg_set_txpower(priv, val);
3202
3203 return count;
3204}
3205
3206static DEVICE_ATTR(tx_power, S_IWUSR | S_IRUGO, iwl3945_show_tx_power, iwl3945_store_tx_power);
3207
3208static ssize_t iwl3945_show_flags(struct device *d,
3209 struct device_attribute *attr, char *buf)
3210{
3211 struct iwl_priv *priv = dev_get_drvdata(d);
3212 struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
3213
3214 return sprintf(buf, "0x%04X\n", ctx->active.flags);
3215}
3216
3217static ssize_t iwl3945_store_flags(struct device *d,
3218 struct device_attribute *attr,
3219 const char *buf, size_t count)
3220{
3221 struct iwl_priv *priv = dev_get_drvdata(d);
3222 u32 flags = simple_strtoul(buf, NULL, 0);
3223 struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
3224
3225 mutex_lock(&priv->mutex);
3226 if (le32_to_cpu(ctx->staging.flags) != flags) {
3227 /* Cancel any currently running scans... */
3228 if (iwl_legacy_scan_cancel_timeout(priv, 100))
3229 IWL_WARN(priv, "Could not cancel scan.\n");
3230 else {
3231 IWL_DEBUG_INFO(priv, "Committing rxon.flags = 0x%04X\n",
3232 flags);
3233 ctx->staging.flags = cpu_to_le32(flags);
3234 iwl3945_commit_rxon(priv, ctx);
3235 }
3236 }
3237 mutex_unlock(&priv->mutex);
3238
3239 return count;
3240}
3241
3242static DEVICE_ATTR(flags, S_IWUSR | S_IRUGO, iwl3945_show_flags, iwl3945_store_flags);
3243
3244static ssize_t iwl3945_show_filter_flags(struct device *d,
3245 struct device_attribute *attr, char *buf)
3246{
3247 struct iwl_priv *priv = dev_get_drvdata(d);
3248 struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
3249
3250 return sprintf(buf, "0x%04X\n",
3251 le32_to_cpu(ctx->active.filter_flags));
3252}
3253
3254static ssize_t iwl3945_store_filter_flags(struct device *d,
3255 struct device_attribute *attr,
3256 const char *buf, size_t count)
3257{
3258 struct iwl_priv *priv = dev_get_drvdata(d);
3259 struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
3260 u32 filter_flags = simple_strtoul(buf, NULL, 0);
3261
3262 mutex_lock(&priv->mutex);
3263 if (le32_to_cpu(ctx->staging.filter_flags) != filter_flags) {
3264 /* Cancel any currently running scans... */
3265 if (iwl_legacy_scan_cancel_timeout(priv, 100))
3266 IWL_WARN(priv, "Could not cancel scan.\n");
3267 else {
3268 IWL_DEBUG_INFO(priv, "Committing rxon.filter_flags = "
3269 "0x%04X\n", filter_flags);
3270 ctx->staging.filter_flags =
3271 cpu_to_le32(filter_flags);
3272 iwl3945_commit_rxon(priv, ctx);
3273 }
3274 }
3275 mutex_unlock(&priv->mutex);
3276
3277 return count;
3278}
3279
3280static DEVICE_ATTR(filter_flags, S_IWUSR | S_IRUGO, iwl3945_show_filter_flags,
3281 iwl3945_store_filter_flags);
3282
3283static ssize_t iwl3945_show_measurement(struct device *d,
3284 struct device_attribute *attr, char *buf)
3285{
3286 struct iwl_priv *priv = dev_get_drvdata(d);
3287 struct iwl_spectrum_notification measure_report;
3288 u32 size = sizeof(measure_report), len = 0, ofs = 0;
3289 u8 *data = (u8 *)&measure_report;
3290 unsigned long flags;
3291
3292 spin_lock_irqsave(&priv->lock, flags);
3293 if (!(priv->measurement_status & MEASUREMENT_READY)) {
3294 spin_unlock_irqrestore(&priv->lock, flags);
3295 return 0;
3296 }
3297 memcpy(&measure_report, &priv->measure_report, size);
3298 priv->measurement_status = 0;
3299 spin_unlock_irqrestore(&priv->lock, flags);
3300
3301 while (size && (PAGE_SIZE - len)) {
3302 hex_dump_to_buffer(data + ofs, size, 16, 1, buf + len,
3303 PAGE_SIZE - len, 1);
3304 len = strlen(buf);
3305 if (PAGE_SIZE - len)
3306 buf[len++] = '\n';
3307
3308 ofs += 16;
3309 size -= min(size, 16U);
3310 }
3311
3312 return len;
3313}
3314
3315static ssize_t iwl3945_store_measurement(struct device *d,
3316 struct device_attribute *attr,
3317 const char *buf, size_t count)
3318{
3319 struct iwl_priv *priv = dev_get_drvdata(d);
3320 struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
3321 struct ieee80211_measurement_params params = {
3322 .channel = le16_to_cpu(ctx->active.channel),
3323 .start_time = cpu_to_le64(priv->_3945.last_tsf),
3324 .duration = cpu_to_le16(1),
3325 };
3326 u8 type = IWL_MEASURE_BASIC;
3327 u8 buffer[32];
3328 u8 channel;
3329
3330 if (count) {
3331 char *p = buffer;
3332 strncpy(buffer, buf, min(sizeof(buffer), count));
3333 channel = simple_strtoul(p, NULL, 0);
3334 if (channel)
3335 params.channel = channel;
3336
3337 p = buffer;
3338 while (*p && *p != ' ')
3339 p++;
3340 if (*p)
3341 type = simple_strtoul(p + 1, NULL, 0);
3342 }
3343
3344 IWL_DEBUG_INFO(priv, "Invoking measurement of type %d on "
3345 "channel %d (for '%s')\n", type, params.channel, buf);
3346 iwl3945_get_measurement(priv, &params, type);
3347
3348 return count;
3349}
3350
3351static DEVICE_ATTR(measurement, S_IRUSR | S_IWUSR,
3352 iwl3945_show_measurement, iwl3945_store_measurement);
3353
3354static ssize_t iwl3945_store_retry_rate(struct device *d,
3355 struct device_attribute *attr,
3356 const char *buf, size_t count)
3357{
3358 struct iwl_priv *priv = dev_get_drvdata(d);
3359
3360 priv->retry_rate = simple_strtoul(buf, NULL, 0);
3361 if (priv->retry_rate <= 0)
3362 priv->retry_rate = 1;
3363
3364 return count;
3365}
3366
3367static ssize_t iwl3945_show_retry_rate(struct device *d,
3368 struct device_attribute *attr, char *buf)
3369{
3370 struct iwl_priv *priv = dev_get_drvdata(d);
3371 return sprintf(buf, "%d", priv->retry_rate);
3372}
3373
3374static DEVICE_ATTR(retry_rate, S_IWUSR | S_IRUSR, iwl3945_show_retry_rate,
3375 iwl3945_store_retry_rate);
3376
3377
3378static ssize_t iwl3945_show_channels(struct device *d,
3379 struct device_attribute *attr, char *buf)
3380{
3381 /* all this shit doesn't belong into sysfs anyway */
3382 return 0;
3383}
3384
3385static DEVICE_ATTR(channels, S_IRUSR, iwl3945_show_channels, NULL);
3386
3387static ssize_t iwl3945_show_antenna(struct device *d,
3388 struct device_attribute *attr, char *buf)
3389{
3390 struct iwl_priv *priv = dev_get_drvdata(d);
3391
3392 if (!iwl_legacy_is_alive(priv))
3393 return -EAGAIN;
3394
3395 return sprintf(buf, "%d\n", iwl3945_mod_params.antenna);
3396}
3397
3398static ssize_t iwl3945_store_antenna(struct device *d,
3399 struct device_attribute *attr,
3400 const char *buf, size_t count)
3401{
3402 struct iwl_priv *priv __maybe_unused = dev_get_drvdata(d);
3403 int ant;
3404
3405 if (count == 0)
3406 return 0;
3407
3408 if (sscanf(buf, "%1i", &ant) != 1) {
3409 IWL_DEBUG_INFO(priv, "not in hex or decimal form.\n");
3410 return count;
3411 }
3412
3413 if ((ant >= 0) && (ant <= 2)) {
3414 IWL_DEBUG_INFO(priv, "Setting antenna select to %d.\n", ant);
3415 iwl3945_mod_params.antenna = (enum iwl3945_antenna)ant;
3416 } else
3417 IWL_DEBUG_INFO(priv, "Bad antenna select value %d.\n", ant);
3418
3419
3420 return count;
3421}
3422
3423static DEVICE_ATTR(antenna, S_IWUSR | S_IRUGO, iwl3945_show_antenna, iwl3945_store_antenna);
3424
3425static ssize_t iwl3945_show_status(struct device *d,
3426 struct device_attribute *attr, char *buf)
3427{
3428 struct iwl_priv *priv = dev_get_drvdata(d);
3429 if (!iwl_legacy_is_alive(priv))
3430 return -EAGAIN;
3431 return sprintf(buf, "0x%08x\n", (int)priv->status);
3432}
3433
3434static DEVICE_ATTR(status, S_IRUGO, iwl3945_show_status, NULL);
3435
3436static ssize_t iwl3945_dump_error_log(struct device *d,
3437 struct device_attribute *attr,
3438 const char *buf, size_t count)
3439{
3440 struct iwl_priv *priv = dev_get_drvdata(d);
3441 char *p = (char *)buf;
3442
3443 if (p[0] == '1')
3444 iwl3945_dump_nic_error_log(priv);
3445
3446 return strnlen(buf, count);
3447}
3448
3449static DEVICE_ATTR(dump_errors, S_IWUSR, NULL, iwl3945_dump_error_log);
3450
3451/*****************************************************************************
3452 *
3453 * driver setup and tear down
3454 *
3455 *****************************************************************************/
3456
3457static void iwl3945_setup_deferred_work(struct iwl_priv *priv)
3458{
3459 priv->workqueue = create_singlethread_workqueue(DRV_NAME);
3460
3461 init_waitqueue_head(&priv->wait_command_queue);
3462
3463 INIT_WORK(&priv->restart, iwl3945_bg_restart);
3464 INIT_WORK(&priv->rx_replenish, iwl3945_bg_rx_replenish);
3465 INIT_DELAYED_WORK(&priv->init_alive_start, iwl3945_bg_init_alive_start);
3466 INIT_DELAYED_WORK(&priv->alive_start, iwl3945_bg_alive_start);
3467 INIT_DELAYED_WORK(&priv->_3945.rfkill_poll, iwl3945_rfkill_poll);
3468
3469 iwl_legacy_setup_scan_deferred_work(priv);
3470
3471 iwl3945_hw_setup_deferred_work(priv);
3472
3473 init_timer(&priv->watchdog);
3474 priv->watchdog.data = (unsigned long)priv;
3475 priv->watchdog.function = iwl_legacy_bg_watchdog;
3476
3477 tasklet_init(&priv->irq_tasklet, (void (*)(unsigned long))
3478 iwl3945_irq_tasklet, (unsigned long)priv);
3479}
3480
3481static void iwl3945_cancel_deferred_work(struct iwl_priv *priv)
3482{
3483 iwl3945_hw_cancel_deferred_work(priv);
3484
3485 cancel_delayed_work_sync(&priv->init_alive_start);
3486 cancel_delayed_work(&priv->alive_start);
3487
3488 iwl_legacy_cancel_scan_deferred_work(priv);
3489}
3490
3491static struct attribute *iwl3945_sysfs_entries[] = {
3492 &dev_attr_antenna.attr,
3493 &dev_attr_channels.attr,
3494 &dev_attr_dump_errors.attr,
3495 &dev_attr_flags.attr,
3496 &dev_attr_filter_flags.attr,
3497 &dev_attr_measurement.attr,
3498 &dev_attr_retry_rate.attr,
3499 &dev_attr_status.attr,
3500 &dev_attr_temperature.attr,
3501 &dev_attr_tx_power.attr,
3502#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
3503 &dev_attr_debug_level.attr,
3504#endif
3505 NULL
3506};
3507
3508static struct attribute_group iwl3945_attribute_group = {
3509 .name = NULL, /* put in device directory */
3510 .attrs = iwl3945_sysfs_entries,
3511};
3512
3513struct ieee80211_ops iwl3945_hw_ops = {
3514 .tx = iwl3945_mac_tx,
3515 .start = iwl3945_mac_start,
3516 .stop = iwl3945_mac_stop,
3517 .add_interface = iwl_legacy_mac_add_interface,
3518 .remove_interface = iwl_legacy_mac_remove_interface,
3519 .change_interface = iwl_legacy_mac_change_interface,
3520 .config = iwl_legacy_mac_config,
3521 .configure_filter = iwl3945_configure_filter,
3522 .set_key = iwl3945_mac_set_key,
3523 .conf_tx = iwl_legacy_mac_conf_tx,
3524 .reset_tsf = iwl_legacy_mac_reset_tsf,
3525 .bss_info_changed = iwl_legacy_mac_bss_info_changed,
3526 .hw_scan = iwl_legacy_mac_hw_scan,
3527 .sta_add = iwl3945_mac_sta_add,
3528 .sta_remove = iwl_legacy_mac_sta_remove,
3529 .tx_last_beacon = iwl_legacy_mac_tx_last_beacon,
3530};
3531
3532static int iwl3945_init_drv(struct iwl_priv *priv)
3533{
3534 int ret;
3535 struct iwl3945_eeprom *eeprom = (struct iwl3945_eeprom *)priv->eeprom;
3536
3537 priv->retry_rate = 1;
3538 priv->beacon_skb = NULL;
3539
3540 spin_lock_init(&priv->sta_lock);
3541 spin_lock_init(&priv->hcmd_lock);
3542
3543 INIT_LIST_HEAD(&priv->free_frames);
3544
3545 mutex_init(&priv->mutex);
3546
3547 priv->ieee_channels = NULL;
3548 priv->ieee_rates = NULL;
3549 priv->band = IEEE80211_BAND_2GHZ;
3550
3551 priv->iw_mode = NL80211_IFTYPE_STATION;
3552 priv->missed_beacon_threshold = IWL_MISSED_BEACON_THRESHOLD_DEF;
3553
3554 /* initialize force reset */
3555 priv->force_reset.reset_duration = IWL_DELAY_NEXT_FORCE_FW_RELOAD;
3556
3557 if (eeprom->version < EEPROM_3945_EEPROM_VERSION) {
3558 IWL_WARN(priv, "Unsupported EEPROM version: 0x%04X\n",
3559 eeprom->version);
3560 ret = -EINVAL;
3561 goto err;
3562 }
3563 ret = iwl_legacy_init_channel_map(priv);
3564 if (ret) {
3565 IWL_ERR(priv, "initializing regulatory failed: %d\n", ret);
3566 goto err;
3567 }
3568
3569 /* Set up txpower settings in driver for all channels */
3570 if (iwl3945_txpower_set_from_eeprom(priv)) {
3571 ret = -EIO;
3572 goto err_free_channel_map;
3573 }
3574
3575 ret = iwl_legacy_init_geos(priv);
3576 if (ret) {
3577 IWL_ERR(priv, "initializing geos failed: %d\n", ret);
3578 goto err_free_channel_map;
3579 }
3580 iwl3945_init_hw_rates(priv, priv->ieee_rates);
3581
3582 return 0;
3583
3584err_free_channel_map:
3585 iwl_legacy_free_channel_map(priv);
3586err:
3587 return ret;
3588}
3589
3590#define IWL3945_MAX_PROBE_REQUEST 200
3591
3592static int iwl3945_setup_mac(struct iwl_priv *priv)
3593{
3594 int ret;
3595 struct ieee80211_hw *hw = priv->hw;
3596
3597 hw->rate_control_algorithm = "iwl-3945-rs";
3598 hw->sta_data_size = sizeof(struct iwl3945_sta_priv);
3599 hw->vif_data_size = sizeof(struct iwl_vif_priv);
3600
3601 /* Tell mac80211 our characteristics */
3602 hw->flags = IEEE80211_HW_SIGNAL_DBM |
3603 IEEE80211_HW_SPECTRUM_MGMT;
3604
3605 hw->wiphy->interface_modes =
3606 priv->contexts[IWL_RXON_CTX_BSS].interface_modes;
3607
3608 hw->wiphy->flags |= WIPHY_FLAG_CUSTOM_REGULATORY |
3609 WIPHY_FLAG_DISABLE_BEACON_HINTS |
3610 WIPHY_FLAG_IBSS_RSN;
3611
3612 hw->wiphy->max_scan_ssids = PROBE_OPTION_MAX_3945;
3613 /* we create the 802.11 header and a zero-length SSID element */
3614 hw->wiphy->max_scan_ie_len = IWL3945_MAX_PROBE_REQUEST - 24 - 2;
3615
3616 /* Default value; 4 EDCA QOS priorities */
3617 hw->queues = 4;
3618
3619 if (priv->bands[IEEE80211_BAND_2GHZ].n_channels)
3620 priv->hw->wiphy->bands[IEEE80211_BAND_2GHZ] =
3621 &priv->bands[IEEE80211_BAND_2GHZ];
3622
3623 if (priv->bands[IEEE80211_BAND_5GHZ].n_channels)
3624 priv->hw->wiphy->bands[IEEE80211_BAND_5GHZ] =
3625 &priv->bands[IEEE80211_BAND_5GHZ];
3626
3627 iwl_legacy_leds_init(priv);
3628
3629 ret = ieee80211_register_hw(priv->hw);
3630 if (ret) {
3631 IWL_ERR(priv, "Failed to register hw (error %d)\n", ret);
3632 return ret;
3633 }
3634 priv->mac80211_registered = 1;
3635
3636 return 0;
3637}
3638
3639static int iwl3945_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
3640{
3641 int err = 0, i;
3642 struct iwl_priv *priv;
3643 struct ieee80211_hw *hw;
3644 struct iwl_cfg *cfg = (struct iwl_cfg *)(ent->driver_data);
3645 struct iwl3945_eeprom *eeprom;
3646 unsigned long flags;
3647
3648 /***********************
3649 * 1. Allocating HW data
3650 * ********************/
3651
3652 /* mac80211 allocates memory for this device instance, including
3653 * space for this driver's private structure */
3654 hw = iwl_legacy_alloc_all(cfg);
3655 if (hw == NULL) {
3656 pr_err("Can not allocate network device\n");
3657 err = -ENOMEM;
3658 goto out;
3659 }
3660 priv = hw->priv;
3661 SET_IEEE80211_DEV(hw, &pdev->dev);
3662
3663 priv->cmd_queue = IWL39_CMD_QUEUE_NUM;
3664
3665 /* 3945 has only one valid context */
3666 priv->valid_contexts = BIT(IWL_RXON_CTX_BSS);
3667
3668 for (i = 0; i < NUM_IWL_RXON_CTX; i++)
3669 priv->contexts[i].ctxid = i;
3670
3671 priv->contexts[IWL_RXON_CTX_BSS].rxon_cmd = REPLY_RXON;
3672 priv->contexts[IWL_RXON_CTX_BSS].rxon_timing_cmd = REPLY_RXON_TIMING;
3673 priv->contexts[IWL_RXON_CTX_BSS].rxon_assoc_cmd = REPLY_RXON_ASSOC;
3674 priv->contexts[IWL_RXON_CTX_BSS].qos_cmd = REPLY_QOS_PARAM;
3675 priv->contexts[IWL_RXON_CTX_BSS].ap_sta_id = IWL_AP_ID;
3676 priv->contexts[IWL_RXON_CTX_BSS].wep_key_cmd = REPLY_WEPKEY;
3677 priv->contexts[IWL_RXON_CTX_BSS].interface_modes =
3678 BIT(NL80211_IFTYPE_STATION) |
3679 BIT(NL80211_IFTYPE_ADHOC);
3680 priv->contexts[IWL_RXON_CTX_BSS].ibss_devtype = RXON_DEV_TYPE_IBSS;
3681 priv->contexts[IWL_RXON_CTX_BSS].station_devtype = RXON_DEV_TYPE_ESS;
3682 priv->contexts[IWL_RXON_CTX_BSS].unused_devtype = RXON_DEV_TYPE_ESS;
3683
3684 /*
3685 * Disabling hardware scan means that mac80211 will perform scans
3686 * "the hard way", rather than using device's scan.
3687 */
3688 if (iwl3945_mod_params.disable_hw_scan) {
3689 IWL_DEBUG_INFO(priv, "Disabling hw_scan\n");
3690 iwl3945_hw_ops.hw_scan = NULL;
3691 }
3692
3693 IWL_DEBUG_INFO(priv, "*** LOAD DRIVER ***\n");
3694 priv->cfg = cfg;
3695 priv->pci_dev = pdev;
3696 priv->inta_mask = CSR_INI_SET_MASK;
3697
3698 if (iwl_legacy_alloc_traffic_mem(priv))
3699 IWL_ERR(priv, "Not enough memory to generate traffic log\n");
3700
3701 /***************************
3702 * 2. Initializing PCI bus
3703 * *************************/
3704 pci_disable_link_state(pdev, PCIE_LINK_STATE_L0S | PCIE_LINK_STATE_L1 |
3705 PCIE_LINK_STATE_CLKPM);
3706
3707 if (pci_enable_device(pdev)) {
3708 err = -ENODEV;
3709 goto out_ieee80211_free_hw;
3710 }
3711
3712 pci_set_master(pdev);
3713
3714 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
3715 if (!err)
3716 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
3717 if (err) {
3718 IWL_WARN(priv, "No suitable DMA available.\n");
3719 goto out_pci_disable_device;
3720 }
3721
3722 pci_set_drvdata(pdev, priv);
3723 err = pci_request_regions(pdev, DRV_NAME);
3724 if (err)
3725 goto out_pci_disable_device;
3726
3727 /***********************
3728 * 3. Read REV Register
3729 * ********************/
3730 priv->hw_base = pci_iomap(pdev, 0, 0);
3731 if (!priv->hw_base) {
3732 err = -ENODEV;
3733 goto out_pci_release_regions;
3734 }
3735
3736 IWL_DEBUG_INFO(priv, "pci_resource_len = 0x%08llx\n",
3737 (unsigned long long) pci_resource_len(pdev, 0));
3738 IWL_DEBUG_INFO(priv, "pci_resource_base = %p\n", priv->hw_base);
3739
3740 /* We disable the RETRY_TIMEOUT register (0x41) to keep
3741 * PCI Tx retries from interfering with C3 CPU state */
3742 pci_write_config_byte(pdev, 0x41, 0x00);
3743
3744 /* these spin locks will be used in apm_ops.init and EEPROM access
3745 * we should init now
3746 */
3747 spin_lock_init(&priv->reg_lock);
3748 spin_lock_init(&priv->lock);
3749
3750 /*
3751 * stop and reset the on-board processor just in case it is in a
3752 * strange state ... like being left stranded by a primary kernel
3753 * and this is now the kdump kernel trying to start up
3754 */
3755 iwl_write32(priv, CSR_RESET, CSR_RESET_REG_FLAG_NEVO_RESET);
3756
3757 /***********************
3758 * 4. Read EEPROM
3759 * ********************/
3760
3761 /* Read the EEPROM */
3762 err = iwl_legacy_eeprom_init(priv);
3763 if (err) {
3764 IWL_ERR(priv, "Unable to init EEPROM\n");
3765 goto out_iounmap;
3766 }
3767 /* MAC Address location in EEPROM same for 3945/4965 */
3768 eeprom = (struct iwl3945_eeprom *)priv->eeprom;
3769 IWL_DEBUG_INFO(priv, "MAC address: %pM\n", eeprom->mac_address);
3770 SET_IEEE80211_PERM_ADDR(priv->hw, eeprom->mac_address);
3771
3772 /***********************
3773 * 5. Setup HW Constants
3774 * ********************/
3775 /* Device-specific setup */
3776 if (iwl3945_hw_set_hw_params(priv)) {
3777 IWL_ERR(priv, "failed to set hw settings\n");
3778 goto out_eeprom_free;
3779 }
3780
3781 /***********************
3782 * 6. Setup priv
3783 * ********************/
3784
3785 err = iwl3945_init_drv(priv);
3786 if (err) {
3787 IWL_ERR(priv, "initializing driver failed\n");
3788 goto out_unset_hw_params;
3789 }
3790
3791 IWL_INFO(priv, "Detected Intel Wireless WiFi Link %s\n",
3792 priv->cfg->name);
3793
3794 /***********************
3795 * 7. Setup Services
3796 * ********************/
3797
3798 spin_lock_irqsave(&priv->lock, flags);
3799 iwl_legacy_disable_interrupts(priv);
3800 spin_unlock_irqrestore(&priv->lock, flags);
3801
3802 pci_enable_msi(priv->pci_dev);
3803
3804 err = request_irq(priv->pci_dev->irq, iwl_legacy_isr,
3805 IRQF_SHARED, DRV_NAME, priv);
3806 if (err) {
3807 IWL_ERR(priv, "Error allocating IRQ %d\n", priv->pci_dev->irq);
3808 goto out_disable_msi;
3809 }
3810
3811 err = sysfs_create_group(&pdev->dev.kobj, &iwl3945_attribute_group);
3812 if (err) {
3813 IWL_ERR(priv, "failed to create sysfs device attributes\n");
3814 goto out_release_irq;
3815 }
3816
3817 iwl_legacy_set_rxon_channel(priv,
3818 &priv->bands[IEEE80211_BAND_2GHZ].channels[5],
3819 &priv->contexts[IWL_RXON_CTX_BSS]);
3820 iwl3945_setup_deferred_work(priv);
3821 iwl3945_setup_rx_handlers(priv);
3822 iwl_legacy_power_initialize(priv);
3823
3824 /*********************************
3825 * 8. Setup and Register mac80211
3826 * *******************************/
3827
3828 iwl_legacy_enable_interrupts(priv);
3829
3830 err = iwl3945_setup_mac(priv);
3831 if (err)
3832 goto out_remove_sysfs;
3833
3834 err = iwl_legacy_dbgfs_register(priv, DRV_NAME);
3835 if (err)
3836 IWL_ERR(priv, "failed to create debugfs files. Ignoring error: %d\n", err);
3837
3838 /* Start monitoring the killswitch */
3839 queue_delayed_work(priv->workqueue, &priv->_3945.rfkill_poll,
3840 2 * HZ);
3841
3842 return 0;
3843
3844 out_remove_sysfs:
3845 destroy_workqueue(priv->workqueue);
3846 priv->workqueue = NULL;
3847 sysfs_remove_group(&pdev->dev.kobj, &iwl3945_attribute_group);
3848 out_release_irq:
3849 free_irq(priv->pci_dev->irq, priv);
3850 out_disable_msi:
3851 pci_disable_msi(priv->pci_dev);
3852 iwl_legacy_free_geos(priv);
3853 iwl_legacy_free_channel_map(priv);
3854 out_unset_hw_params:
3855 iwl3945_unset_hw_params(priv);
3856 out_eeprom_free:
3857 iwl_legacy_eeprom_free(priv);
3858 out_iounmap:
3859 pci_iounmap(pdev, priv->hw_base);
3860 out_pci_release_regions:
3861 pci_release_regions(pdev);
3862 out_pci_disable_device:
3863 pci_set_drvdata(pdev, NULL);
3864 pci_disable_device(pdev);
3865 out_ieee80211_free_hw:
3866 iwl_legacy_free_traffic_mem(priv);
3867 ieee80211_free_hw(priv->hw);
3868 out:
3869 return err;
3870}
3871
3872static void __devexit iwl3945_pci_remove(struct pci_dev *pdev)
3873{
3874 struct iwl_priv *priv = pci_get_drvdata(pdev);
3875 unsigned long flags;
3876
3877 if (!priv)
3878 return;
3879
3880 IWL_DEBUG_INFO(priv, "*** UNLOAD DRIVER ***\n");
3881
3882 iwl_legacy_dbgfs_unregister(priv);
3883
3884 set_bit(STATUS_EXIT_PENDING, &priv->status);
3885
3886 iwl_legacy_leds_exit(priv);
3887
3888 if (priv->mac80211_registered) {
3889 ieee80211_unregister_hw(priv->hw);
3890 priv->mac80211_registered = 0;
3891 } else {
3892 iwl3945_down(priv);
3893 }
3894
3895 /*
3896 * Make sure device is reset to low power before unloading driver.
3897 * This may be redundant with iwl_down(), but there are paths to
3898 * run iwl_down() without calling apm_ops.stop(), and there are
3899 * paths to avoid running iwl_down() at all before leaving driver.
3900 * This (inexpensive) call *makes sure* device is reset.
3901 */
3902 iwl_legacy_apm_stop(priv);
3903
3904 /* make sure we flush any pending irq or
3905 * tasklet for the driver
3906 */
3907 spin_lock_irqsave(&priv->lock, flags);
3908 iwl_legacy_disable_interrupts(priv);
3909 spin_unlock_irqrestore(&priv->lock, flags);
3910
3911 iwl3945_synchronize_irq(priv);
3912
3913 sysfs_remove_group(&pdev->dev.kobj, &iwl3945_attribute_group);
3914
3915 cancel_delayed_work_sync(&priv->_3945.rfkill_poll);
3916
3917 iwl3945_dealloc_ucode_pci(priv);
3918
3919 if (priv->rxq.bd)
3920 iwl3945_rx_queue_free(priv, &priv->rxq);
3921 iwl3945_hw_txq_ctx_free(priv);
3922
3923 iwl3945_unset_hw_params(priv);
3924
3925 /*netif_stop_queue(dev); */
3926 flush_workqueue(priv->workqueue);
3927
3928 /* ieee80211_unregister_hw calls iwl3945_mac_stop, which flushes
3929 * priv->workqueue... so we can't take down the workqueue
3930 * until now... */
3931 destroy_workqueue(priv->workqueue);
3932 priv->workqueue = NULL;
3933 iwl_legacy_free_traffic_mem(priv);
3934
3935 free_irq(pdev->irq, priv);
3936 pci_disable_msi(pdev);
3937
3938 pci_iounmap(pdev, priv->hw_base);
3939 pci_release_regions(pdev);
3940 pci_disable_device(pdev);
3941 pci_set_drvdata(pdev, NULL);
3942
3943 iwl_legacy_free_channel_map(priv);
3944 iwl_legacy_free_geos(priv);
3945 kfree(priv->scan_cmd);
3946 if (priv->beacon_skb)
3947 dev_kfree_skb(priv->beacon_skb);
3948
3949 ieee80211_free_hw(priv->hw);
3950}
3951
3952
3953/*****************************************************************************
3954 *
3955 * driver and module entry point
3956 *
3957 *****************************************************************************/
3958
3959static struct pci_driver iwl3945_driver = {
3960 .name = DRV_NAME,
3961 .id_table = iwl3945_hw_card_ids,
3962 .probe = iwl3945_pci_probe,
3963 .remove = __devexit_p(iwl3945_pci_remove),
3964 .driver.pm = IWL_LEGACY_PM_OPS,
3965};
3966
3967static int __init iwl3945_init(void)
3968{
3969
3970 int ret;
3971 pr_info(DRV_DESCRIPTION ", " DRV_VERSION "\n");
3972 pr_info(DRV_COPYRIGHT "\n");
3973
3974 ret = iwl3945_rate_control_register();
3975 if (ret) {
3976 pr_err("Unable to register rate control algorithm: %d\n", ret);
3977 return ret;
3978 }
3979
3980 ret = pci_register_driver(&iwl3945_driver);
3981 if (ret) {
3982 pr_err("Unable to initialize PCI module\n");
3983 goto error_register;
3984 }
3985
3986 return ret;
3987
3988error_register:
3989 iwl3945_rate_control_unregister();
3990 return ret;
3991}
3992
3993static void __exit iwl3945_exit(void)
3994{
3995 pci_unregister_driver(&iwl3945_driver);
3996 iwl3945_rate_control_unregister();
3997}
3998
3999MODULE_FIRMWARE(IWL3945_MODULE_FIRMWARE(IWL3945_UCODE_API_MAX));
4000
4001module_param_named(antenna, iwl3945_mod_params.antenna, int, S_IRUGO);
4002MODULE_PARM_DESC(antenna, "select antenna (1=Main, 2=Aux, default 0 [both])");
4003module_param_named(swcrypto, iwl3945_mod_params.sw_crypto, int, S_IRUGO);
4004MODULE_PARM_DESC(swcrypto,
4005 "using software crypto (default 1 [software])");
4006module_param_named(disable_hw_scan, iwl3945_mod_params.disable_hw_scan,
4007 int, S_IRUGO);
4008MODULE_PARM_DESC(disable_hw_scan, "disable hardware scanning (default 1)");
4009#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
4010module_param_named(debug, iwlegacy_debug_level, uint, S_IRUGO | S_IWUSR);
4011MODULE_PARM_DESC(debug, "debug output mask");
4012#endif
4013module_param_named(fw_restart, iwl3945_mod_params.restart_fw, int, S_IRUGO);
4014MODULE_PARM_DESC(fw_restart, "restart firmware in case of error");
4015
4016module_exit(iwl3945_exit);
4017module_init(iwl3945_init);
diff --git a/drivers/net/wireless/iwlegacy/iwl4965-base.c b/drivers/net/wireless/iwlegacy/iwl4965-base.c
new file mode 100644
index 00000000000..aa0c2539761
--- /dev/null
+++ b/drivers/net/wireless/iwlegacy/iwl4965-base.c
@@ -0,0 +1,3282 @@
1/******************************************************************************
2 *
3 * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved.
4 *
5 * Portions of this file are derived from the ipw3945 project, as well
6 * as portions of the ieee80211 subsystem header files.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of version 2 of the GNU General Public License as
10 * published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it will be useful, but WITHOUT
13 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 * more details.
16 *
17 * You should have received a copy of the GNU General Public License along with
18 * this program; if not, write to the Free Software Foundation, Inc.,
19 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
20 *
21 * The full GNU General Public License is included in this distribution in the
22 * file called LICENSE.
23 *
24 * Contact Information:
25 * Intel Linux Wireless <ilw@linux.intel.com>
26 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
27 *
28 *****************************************************************************/
29
30#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
31
32#include <linux/kernel.h>
33#include <linux/module.h>
34#include <linux/init.h>
35#include <linux/pci.h>
36#include <linux/pci-aspm.h>
37#include <linux/slab.h>
38#include <linux/dma-mapping.h>
39#include <linux/delay.h>
40#include <linux/sched.h>
41#include <linux/skbuff.h>
42#include <linux/netdevice.h>
43#include <linux/wireless.h>
44#include <linux/firmware.h>
45#include <linux/etherdevice.h>
46#include <linux/if_arp.h>
47
48#include <net/mac80211.h>
49
50#include <asm/div64.h>
51
52#define DRV_NAME "iwl4965"
53
54#include "iwl-eeprom.h"
55#include "iwl-dev.h"
56#include "iwl-core.h"
57#include "iwl-io.h"
58#include "iwl-helpers.h"
59#include "iwl-sta.h"
60#include "iwl-4965-calib.h"
61#include "iwl-4965.h"
62#include "iwl-4965-led.h"
63
64
65/******************************************************************************
66 *
67 * module boiler plate
68 *
69 ******************************************************************************/
70
71/*
72 * module name, copyright, version, etc.
73 */
74#define DRV_DESCRIPTION "Intel(R) Wireless WiFi 4965 driver for Linux"
75
76#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
77#define VD "d"
78#else
79#define VD
80#endif
81
82#define DRV_VERSION IWLWIFI_VERSION VD
83
84
85MODULE_DESCRIPTION(DRV_DESCRIPTION);
86MODULE_VERSION(DRV_VERSION);
87MODULE_AUTHOR(DRV_COPYRIGHT " " DRV_AUTHOR);
88MODULE_LICENSE("GPL");
89MODULE_ALIAS("iwl4965");
90
91void iwl4965_update_chain_flags(struct iwl_priv *priv)
92{
93 struct iwl_rxon_context *ctx;
94
95 if (priv->cfg->ops->hcmd->set_rxon_chain) {
96 for_each_context(priv, ctx) {
97 priv->cfg->ops->hcmd->set_rxon_chain(priv, ctx);
98 if (ctx->active.rx_chain != ctx->staging.rx_chain)
99 iwl_legacy_commit_rxon(priv, ctx);
100 }
101 }
102}
103
104static void iwl4965_clear_free_frames(struct iwl_priv *priv)
105{
106 struct list_head *element;
107
108 IWL_DEBUG_INFO(priv, "%d frames on pre-allocated heap on clear.\n",
109 priv->frames_count);
110
111 while (!list_empty(&priv->free_frames)) {
112 element = priv->free_frames.next;
113 list_del(element);
114 kfree(list_entry(element, struct iwl_frame, list));
115 priv->frames_count--;
116 }
117
118 if (priv->frames_count) {
119 IWL_WARN(priv, "%d frames still in use. Did we lose one?\n",
120 priv->frames_count);
121 priv->frames_count = 0;
122 }
123}
124
125static struct iwl_frame *iwl4965_get_free_frame(struct iwl_priv *priv)
126{
127 struct iwl_frame *frame;
128 struct list_head *element;
129 if (list_empty(&priv->free_frames)) {
130 frame = kzalloc(sizeof(*frame), GFP_KERNEL);
131 if (!frame) {
132 IWL_ERR(priv, "Could not allocate frame!\n");
133 return NULL;
134 }
135
136 priv->frames_count++;
137 return frame;
138 }
139
140 element = priv->free_frames.next;
141 list_del(element);
142 return list_entry(element, struct iwl_frame, list);
143}
144
145static void iwl4965_free_frame(struct iwl_priv *priv, struct iwl_frame *frame)
146{
147 memset(frame, 0, sizeof(*frame));
148 list_add(&frame->list, &priv->free_frames);
149}
150
151static u32 iwl4965_fill_beacon_frame(struct iwl_priv *priv,
152 struct ieee80211_hdr *hdr,
153 int left)
154{
155 lockdep_assert_held(&priv->mutex);
156
157 if (!priv->beacon_skb)
158 return 0;
159
160 if (priv->beacon_skb->len > left)
161 return 0;
162
163 memcpy(hdr, priv->beacon_skb->data, priv->beacon_skb->len);
164
165 return priv->beacon_skb->len;
166}
167
168/* Parse the beacon frame to find the TIM element and set tim_idx & tim_size */
169static void iwl4965_set_beacon_tim(struct iwl_priv *priv,
170 struct iwl_tx_beacon_cmd *tx_beacon_cmd,
171 u8 *beacon, u32 frame_size)
172{
173 u16 tim_idx;
174 struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *)beacon;
175
176 /*
177 * The index is relative to frame start but we start looking at the
178 * variable-length part of the beacon.
179 */
180 tim_idx = mgmt->u.beacon.variable - beacon;
181
182 /* Parse variable-length elements of beacon to find WLAN_EID_TIM */
183 while ((tim_idx < (frame_size - 2)) &&
184 (beacon[tim_idx] != WLAN_EID_TIM))
185 tim_idx += beacon[tim_idx+1] + 2;
186
187 /* If TIM field was found, set variables */
188 if ((tim_idx < (frame_size - 1)) && (beacon[tim_idx] == WLAN_EID_TIM)) {
189 tx_beacon_cmd->tim_idx = cpu_to_le16(tim_idx);
190 tx_beacon_cmd->tim_size = beacon[tim_idx+1];
191 } else
192 IWL_WARN(priv, "Unable to find TIM Element in beacon\n");
193}
194
195static unsigned int iwl4965_hw_get_beacon_cmd(struct iwl_priv *priv,
196 struct iwl_frame *frame)
197{
198 struct iwl_tx_beacon_cmd *tx_beacon_cmd;
199 u32 frame_size;
200 u32 rate_flags;
201 u32 rate;
202 /*
203 * We have to set up the TX command, the TX Beacon command, and the
204 * beacon contents.
205 */
206
207 lockdep_assert_held(&priv->mutex);
208
209 if (!priv->beacon_ctx) {
210 IWL_ERR(priv, "trying to build beacon w/o beacon context!\n");
211 return 0;
212 }
213
214 /* Initialize memory */
215 tx_beacon_cmd = &frame->u.beacon;
216 memset(tx_beacon_cmd, 0, sizeof(*tx_beacon_cmd));
217
218 /* Set up TX beacon contents */
219 frame_size = iwl4965_fill_beacon_frame(priv, tx_beacon_cmd->frame,
220 sizeof(frame->u) - sizeof(*tx_beacon_cmd));
221 if (WARN_ON_ONCE(frame_size > MAX_MPDU_SIZE))
222 return 0;
223 if (!frame_size)
224 return 0;
225
226 /* Set up TX command fields */
227 tx_beacon_cmd->tx.len = cpu_to_le16((u16)frame_size);
228 tx_beacon_cmd->tx.sta_id = priv->beacon_ctx->bcast_sta_id;
229 tx_beacon_cmd->tx.stop_time.life_time = TX_CMD_LIFE_TIME_INFINITE;
230 tx_beacon_cmd->tx.tx_flags = TX_CMD_FLG_SEQ_CTL_MSK |
231 TX_CMD_FLG_TSF_MSK | TX_CMD_FLG_STA_RATE_MSK;
232
233 /* Set up TX beacon command fields */
234 iwl4965_set_beacon_tim(priv, tx_beacon_cmd, (u8 *)tx_beacon_cmd->frame,
235 frame_size);
236
237 /* Set up packet rate and flags */
238 rate = iwl_legacy_get_lowest_plcp(priv, priv->beacon_ctx);
239 priv->mgmt_tx_ant = iwl4965_toggle_tx_ant(priv, priv->mgmt_tx_ant,
240 priv->hw_params.valid_tx_ant);
241 rate_flags = iwl4965_ant_idx_to_flags(priv->mgmt_tx_ant);
242 if ((rate >= IWL_FIRST_CCK_RATE) && (rate <= IWL_LAST_CCK_RATE))
243 rate_flags |= RATE_MCS_CCK_MSK;
244 tx_beacon_cmd->tx.rate_n_flags = iwl4965_hw_set_rate_n_flags(rate,
245 rate_flags);
246
247 return sizeof(*tx_beacon_cmd) + frame_size;
248}
249
250int iwl4965_send_beacon_cmd(struct iwl_priv *priv)
251{
252 struct iwl_frame *frame;
253 unsigned int frame_size;
254 int rc;
255
256 frame = iwl4965_get_free_frame(priv);
257 if (!frame) {
258 IWL_ERR(priv, "Could not obtain free frame buffer for beacon "
259 "command.\n");
260 return -ENOMEM;
261 }
262
263 frame_size = iwl4965_hw_get_beacon_cmd(priv, frame);
264 if (!frame_size) {
265 IWL_ERR(priv, "Error configuring the beacon command\n");
266 iwl4965_free_frame(priv, frame);
267 return -EINVAL;
268 }
269
270 rc = iwl_legacy_send_cmd_pdu(priv, REPLY_TX_BEACON, frame_size,
271 &frame->u.cmd[0]);
272
273 iwl4965_free_frame(priv, frame);
274
275 return rc;
276}
277
278static inline dma_addr_t iwl4965_tfd_tb_get_addr(struct iwl_tfd *tfd, u8 idx)
279{
280 struct iwl_tfd_tb *tb = &tfd->tbs[idx];
281
282 dma_addr_t addr = get_unaligned_le32(&tb->lo);
283 if (sizeof(dma_addr_t) > sizeof(u32))
284 addr |=
285 ((dma_addr_t)(le16_to_cpu(tb->hi_n_len) & 0xF) << 16) << 16;
286
287 return addr;
288}
289
290static inline u16 iwl4965_tfd_tb_get_len(struct iwl_tfd *tfd, u8 idx)
291{
292 struct iwl_tfd_tb *tb = &tfd->tbs[idx];
293
294 return le16_to_cpu(tb->hi_n_len) >> 4;
295}
296
297static inline void iwl4965_tfd_set_tb(struct iwl_tfd *tfd, u8 idx,
298 dma_addr_t addr, u16 len)
299{
300 struct iwl_tfd_tb *tb = &tfd->tbs[idx];
301 u16 hi_n_len = len << 4;
302
303 put_unaligned_le32(addr, &tb->lo);
304 if (sizeof(dma_addr_t) > sizeof(u32))
305 hi_n_len |= ((addr >> 16) >> 16) & 0xF;
306
307 tb->hi_n_len = cpu_to_le16(hi_n_len);
308
309 tfd->num_tbs = idx + 1;
310}
311
312static inline u8 iwl4965_tfd_get_num_tbs(struct iwl_tfd *tfd)
313{
314 return tfd->num_tbs & 0x1f;
315}
316
317/**
318 * iwl4965_hw_txq_free_tfd - Free all chunks referenced by TFD [txq->q.read_ptr]
319 * @priv - driver private data
320 * @txq - tx queue
321 *
322 * Does NOT advance any TFD circular buffer read/write indexes
323 * Does NOT free the TFD itself (which is within circular buffer)
324 */
325void iwl4965_hw_txq_free_tfd(struct iwl_priv *priv, struct iwl_tx_queue *txq)
326{
327 struct iwl_tfd *tfd_tmp = (struct iwl_tfd *)txq->tfds;
328 struct iwl_tfd *tfd;
329 struct pci_dev *dev = priv->pci_dev;
330 int index = txq->q.read_ptr;
331 int i;
332 int num_tbs;
333
334 tfd = &tfd_tmp[index];
335
336 /* Sanity check on number of chunks */
337 num_tbs = iwl4965_tfd_get_num_tbs(tfd);
338
339 if (num_tbs >= IWL_NUM_OF_TBS) {
340 IWL_ERR(priv, "Too many chunks: %i\n", num_tbs);
341 /* @todo issue fatal error, it is quite serious situation */
342 return;
343 }
344
345 /* Unmap tx_cmd */
346 if (num_tbs)
347 pci_unmap_single(dev,
348 dma_unmap_addr(&txq->meta[index], mapping),
349 dma_unmap_len(&txq->meta[index], len),
350 PCI_DMA_BIDIRECTIONAL);
351
352 /* Unmap chunks, if any. */
353 for (i = 1; i < num_tbs; i++)
354 pci_unmap_single(dev, iwl4965_tfd_tb_get_addr(tfd, i),
355 iwl4965_tfd_tb_get_len(tfd, i),
356 PCI_DMA_TODEVICE);
357
358 /* free SKB */
359 if (txq->txb) {
360 struct sk_buff *skb;
361
362 skb = txq->txb[txq->q.read_ptr].skb;
363
364 /* can be called from irqs-disabled context */
365 if (skb) {
366 dev_kfree_skb_any(skb);
367 txq->txb[txq->q.read_ptr].skb = NULL;
368 }
369 }
370}
371
372int iwl4965_hw_txq_attach_buf_to_tfd(struct iwl_priv *priv,
373 struct iwl_tx_queue *txq,
374 dma_addr_t addr, u16 len,
375 u8 reset, u8 pad)
376{
377 struct iwl_queue *q;
378 struct iwl_tfd *tfd, *tfd_tmp;
379 u32 num_tbs;
380
381 q = &txq->q;
382 tfd_tmp = (struct iwl_tfd *)txq->tfds;
383 tfd = &tfd_tmp[q->write_ptr];
384
385 if (reset)
386 memset(tfd, 0, sizeof(*tfd));
387
388 num_tbs = iwl4965_tfd_get_num_tbs(tfd);
389
390 /* Each TFD can point to a maximum 20 Tx buffers */
391 if (num_tbs >= IWL_NUM_OF_TBS) {
392 IWL_ERR(priv, "Error can not send more than %d chunks\n",
393 IWL_NUM_OF_TBS);
394 return -EINVAL;
395 }
396
397 BUG_ON(addr & ~DMA_BIT_MASK(36));
398 if (unlikely(addr & ~IWL_TX_DMA_MASK))
399 IWL_ERR(priv, "Unaligned address = %llx\n",
400 (unsigned long long)addr);
401
402 iwl4965_tfd_set_tb(tfd, num_tbs, addr, len);
403
404 return 0;
405}
406
407/*
408 * Tell nic where to find circular buffer of Tx Frame Descriptors for
409 * given Tx queue, and enable the DMA channel used for that queue.
410 *
411 * 4965 supports up to 16 Tx queues in DRAM, mapped to up to 8 Tx DMA
412 * channels supported in hardware.
413 */
414int iwl4965_hw_tx_queue_init(struct iwl_priv *priv,
415 struct iwl_tx_queue *txq)
416{
417 int txq_id = txq->q.id;
418
419 /* Circular buffer (TFD queue in DRAM) physical base address */
420 iwl_legacy_write_direct32(priv, FH_MEM_CBBC_QUEUE(txq_id),
421 txq->q.dma_addr >> 8);
422
423 return 0;
424}
425
426/******************************************************************************
427 *
428 * Generic RX handler implementations
429 *
430 ******************************************************************************/
431static void iwl4965_rx_reply_alive(struct iwl_priv *priv,
432 struct iwl_rx_mem_buffer *rxb)
433{
434 struct iwl_rx_packet *pkt = rxb_addr(rxb);
435 struct iwl_alive_resp *palive;
436 struct delayed_work *pwork;
437
438 palive = &pkt->u.alive_frame;
439
440 IWL_DEBUG_INFO(priv, "Alive ucode status 0x%08X revision "
441 "0x%01X 0x%01X\n",
442 palive->is_valid, palive->ver_type,
443 palive->ver_subtype);
444
445 if (palive->ver_subtype == INITIALIZE_SUBTYPE) {
446 IWL_DEBUG_INFO(priv, "Initialization Alive received.\n");
447 memcpy(&priv->card_alive_init,
448 &pkt->u.alive_frame,
449 sizeof(struct iwl_init_alive_resp));
450 pwork = &priv->init_alive_start;
451 } else {
452 IWL_DEBUG_INFO(priv, "Runtime Alive received.\n");
453 memcpy(&priv->card_alive, &pkt->u.alive_frame,
454 sizeof(struct iwl_alive_resp));
455 pwork = &priv->alive_start;
456 }
457
458 /* We delay the ALIVE response by 5ms to
459 * give the HW RF Kill time to activate... */
460 if (palive->is_valid == UCODE_VALID_OK)
461 queue_delayed_work(priv->workqueue, pwork,
462 msecs_to_jiffies(5));
463 else
464 IWL_WARN(priv, "uCode did not respond OK.\n");
465}
466
467/**
468 * iwl4965_bg_statistics_periodic - Timer callback to queue statistics
469 *
470 * This callback is provided in order to send a statistics request.
471 *
472 * This timer function is continually reset to execute within
473 * REG_RECALIB_PERIOD seconds since the last STATISTICS_NOTIFICATION
474 * was received. We need to ensure we receive the statistics in order
475 * to update the temperature used for calibrating the TXPOWER.
476 */
477static void iwl4965_bg_statistics_periodic(unsigned long data)
478{
479 struct iwl_priv *priv = (struct iwl_priv *)data;
480
481 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
482 return;
483
484 /* dont send host command if rf-kill is on */
485 if (!iwl_legacy_is_ready_rf(priv))
486 return;
487
488 iwl_legacy_send_statistics_request(priv, CMD_ASYNC, false);
489}
490
491static void iwl4965_rx_beacon_notif(struct iwl_priv *priv,
492 struct iwl_rx_mem_buffer *rxb)
493{
494 struct iwl_rx_packet *pkt = rxb_addr(rxb);
495 struct iwl4965_beacon_notif *beacon =
496 (struct iwl4965_beacon_notif *)pkt->u.raw;
497#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
498 u8 rate = iwl4965_hw_get_rate(beacon->beacon_notify_hdr.rate_n_flags);
499
500 IWL_DEBUG_RX(priv, "beacon status %x retries %d iss %d "
501 "tsf %d %d rate %d\n",
502 le32_to_cpu(beacon->beacon_notify_hdr.u.status) & TX_STATUS_MSK,
503 beacon->beacon_notify_hdr.failure_frame,
504 le32_to_cpu(beacon->ibss_mgr_status),
505 le32_to_cpu(beacon->high_tsf),
506 le32_to_cpu(beacon->low_tsf), rate);
507#endif
508
509 priv->ibss_manager = le32_to_cpu(beacon->ibss_mgr_status);
510}
511
512static void iwl4965_perform_ct_kill_task(struct iwl_priv *priv)
513{
514 unsigned long flags;
515
516 IWL_DEBUG_POWER(priv, "Stop all queues\n");
517
518 if (priv->mac80211_registered)
519 ieee80211_stop_queues(priv->hw);
520
521 iwl_write32(priv, CSR_UCODE_DRV_GP1_SET,
522 CSR_UCODE_DRV_GP1_REG_BIT_CT_KILL_EXIT);
523 iwl_read32(priv, CSR_UCODE_DRV_GP1);
524
525 spin_lock_irqsave(&priv->reg_lock, flags);
526 if (!iwl_grab_nic_access(priv))
527 iwl_release_nic_access(priv);
528 spin_unlock_irqrestore(&priv->reg_lock, flags);
529}
530
531/* Handle notification from uCode that card's power state is changing
532 * due to software, hardware, or critical temperature RFKILL */
533static void iwl4965_rx_card_state_notif(struct iwl_priv *priv,
534 struct iwl_rx_mem_buffer *rxb)
535{
536 struct iwl_rx_packet *pkt = rxb_addr(rxb);
537 u32 flags = le32_to_cpu(pkt->u.card_state_notif.flags);
538 unsigned long status = priv->status;
539
540 IWL_DEBUG_RF_KILL(priv, "Card state received: HW:%s SW:%s CT:%s\n",
541 (flags & HW_CARD_DISABLED) ? "Kill" : "On",
542 (flags & SW_CARD_DISABLED) ? "Kill" : "On",
543 (flags & CT_CARD_DISABLED) ?
544 "Reached" : "Not reached");
545
546 if (flags & (SW_CARD_DISABLED | HW_CARD_DISABLED |
547 CT_CARD_DISABLED)) {
548
549 iwl_write32(priv, CSR_UCODE_DRV_GP1_SET,
550 CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED);
551
552 iwl_legacy_write_direct32(priv, HBUS_TARG_MBX_C,
553 HBUS_TARG_MBX_C_REG_BIT_CMD_BLOCKED);
554
555 if (!(flags & RXON_CARD_DISABLED)) {
556 iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR,
557 CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED);
558 iwl_legacy_write_direct32(priv, HBUS_TARG_MBX_C,
559 HBUS_TARG_MBX_C_REG_BIT_CMD_BLOCKED);
560 }
561 }
562
563 if (flags & CT_CARD_DISABLED)
564 iwl4965_perform_ct_kill_task(priv);
565
566 if (flags & HW_CARD_DISABLED)
567 set_bit(STATUS_RF_KILL_HW, &priv->status);
568 else
569 clear_bit(STATUS_RF_KILL_HW, &priv->status);
570
571 if (!(flags & RXON_CARD_DISABLED))
572 iwl_legacy_scan_cancel(priv);
573
574 if ((test_bit(STATUS_RF_KILL_HW, &status) !=
575 test_bit(STATUS_RF_KILL_HW, &priv->status)))
576 wiphy_rfkill_set_hw_state(priv->hw->wiphy,
577 test_bit(STATUS_RF_KILL_HW, &priv->status));
578 else
579 wake_up(&priv->wait_command_queue);
580}
581
582/**
583 * iwl4965_setup_rx_handlers - Initialize Rx handler callbacks
584 *
585 * Setup the RX handlers for each of the reply types sent from the uCode
586 * to the host.
587 *
588 * This function chains into the hardware specific files for them to setup
589 * any hardware specific handlers as well.
590 */
591static void iwl4965_setup_rx_handlers(struct iwl_priv *priv)
592{
593 priv->rx_handlers[REPLY_ALIVE] = iwl4965_rx_reply_alive;
594 priv->rx_handlers[REPLY_ERROR] = iwl_legacy_rx_reply_error;
595 priv->rx_handlers[CHANNEL_SWITCH_NOTIFICATION] = iwl_legacy_rx_csa;
596 priv->rx_handlers[SPECTRUM_MEASURE_NOTIFICATION] =
597 iwl_legacy_rx_spectrum_measure_notif;
598 priv->rx_handlers[PM_SLEEP_NOTIFICATION] = iwl_legacy_rx_pm_sleep_notif;
599 priv->rx_handlers[PM_DEBUG_STATISTIC_NOTIFIC] =
600 iwl_legacy_rx_pm_debug_statistics_notif;
601 priv->rx_handlers[BEACON_NOTIFICATION] = iwl4965_rx_beacon_notif;
602
603 /*
604 * The same handler is used for both the REPLY to a discrete
605 * statistics request from the host as well as for the periodic
606 * statistics notifications (after received beacons) from the uCode.
607 */
608 priv->rx_handlers[REPLY_STATISTICS_CMD] = iwl4965_reply_statistics;
609 priv->rx_handlers[STATISTICS_NOTIFICATION] = iwl4965_rx_statistics;
610
611 iwl_legacy_setup_rx_scan_handlers(priv);
612
613 /* status change handler */
614 priv->rx_handlers[CARD_STATE_NOTIFICATION] =
615 iwl4965_rx_card_state_notif;
616
617 priv->rx_handlers[MISSED_BEACONS_NOTIFICATION] =
618 iwl4965_rx_missed_beacon_notif;
619 /* Rx handlers */
620 priv->rx_handlers[REPLY_RX_PHY_CMD] = iwl4965_rx_reply_rx_phy;
621 priv->rx_handlers[REPLY_RX_MPDU_CMD] = iwl4965_rx_reply_rx;
622 /* block ack */
623 priv->rx_handlers[REPLY_COMPRESSED_BA] = iwl4965_rx_reply_compressed_ba;
624 /* Set up hardware specific Rx handlers */
625 priv->cfg->ops->lib->rx_handler_setup(priv);
626}
627
628/**
629 * iwl4965_rx_handle - Main entry function for receiving responses from uCode
630 *
631 * Uses the priv->rx_handlers callback function array to invoke
632 * the appropriate handlers, including command responses,
633 * frame-received notifications, and other notifications.
634 */
635void iwl4965_rx_handle(struct iwl_priv *priv)
636{
637 struct iwl_rx_mem_buffer *rxb;
638 struct iwl_rx_packet *pkt;
639 struct iwl_rx_queue *rxq = &priv->rxq;
640 u32 r, i;
641 int reclaim;
642 unsigned long flags;
643 u8 fill_rx = 0;
644 u32 count = 8;
645 int total_empty;
646
647 /* uCode's read index (stored in shared DRAM) indicates the last Rx
648 * buffer that the driver may process (last buffer filled by ucode). */
649 r = le16_to_cpu(rxq->rb_stts->closed_rb_num) & 0x0FFF;
650 i = rxq->read;
651
652 /* Rx interrupt, but nothing sent from uCode */
653 if (i == r)
654 IWL_DEBUG_RX(priv, "r = %d, i = %d\n", r, i);
655
656 /* calculate total frames need to be restock after handling RX */
657 total_empty = r - rxq->write_actual;
658 if (total_empty < 0)
659 total_empty += RX_QUEUE_SIZE;
660
661 if (total_empty > (RX_QUEUE_SIZE / 2))
662 fill_rx = 1;
663
664 while (i != r) {
665 int len;
666
667 rxb = rxq->queue[i];
668
669 /* If an RXB doesn't have a Rx queue slot associated with it,
670 * then a bug has been introduced in the queue refilling
671 * routines -- catch it here */
672 BUG_ON(rxb == NULL);
673
674 rxq->queue[i] = NULL;
675
676 pci_unmap_page(priv->pci_dev, rxb->page_dma,
677 PAGE_SIZE << priv->hw_params.rx_page_order,
678 PCI_DMA_FROMDEVICE);
679 pkt = rxb_addr(rxb);
680
681 len = le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK;
682 len += sizeof(u32); /* account for status word */
683 trace_iwlwifi_legacy_dev_rx(priv, pkt, len);
684
685 /* Reclaim a command buffer only if this packet is a response
686 * to a (driver-originated) command.
687 * If the packet (e.g. Rx frame) originated from uCode,
688 * there is no command buffer to reclaim.
689 * Ucode should set SEQ_RX_FRAME bit if ucode-originated,
690 * but apparently a few don't get set; catch them here. */
691 reclaim = !(pkt->hdr.sequence & SEQ_RX_FRAME) &&
692 (pkt->hdr.cmd != REPLY_RX_PHY_CMD) &&
693 (pkt->hdr.cmd != REPLY_RX) &&
694 (pkt->hdr.cmd != REPLY_RX_MPDU_CMD) &&
695 (pkt->hdr.cmd != REPLY_COMPRESSED_BA) &&
696 (pkt->hdr.cmd != STATISTICS_NOTIFICATION) &&
697 (pkt->hdr.cmd != REPLY_TX);
698
699 /* Based on type of command response or notification,
700 * handle those that need handling via function in
701 * rx_handlers table. See iwl4965_setup_rx_handlers() */
702 if (priv->rx_handlers[pkt->hdr.cmd]) {
703 IWL_DEBUG_RX(priv, "r = %d, i = %d, %s, 0x%02x\n", r,
704 i, iwl_legacy_get_cmd_string(pkt->hdr.cmd),
705 pkt->hdr.cmd);
706 priv->isr_stats.rx_handlers[pkt->hdr.cmd]++;
707 priv->rx_handlers[pkt->hdr.cmd] (priv, rxb);
708 } else {
709 /* No handling needed */
710 IWL_DEBUG_RX(priv,
711 "r %d i %d No handler needed for %s, 0x%02x\n",
712 r, i, iwl_legacy_get_cmd_string(pkt->hdr.cmd),
713 pkt->hdr.cmd);
714 }
715
716 /*
717 * XXX: After here, we should always check rxb->page
718 * against NULL before touching it or its virtual
719 * memory (pkt). Because some rx_handler might have
720 * already taken or freed the pages.
721 */
722
723 if (reclaim) {
724 /* Invoke any callbacks, transfer the buffer to caller,
725 * and fire off the (possibly) blocking iwl_legacy_send_cmd()
726 * as we reclaim the driver command queue */
727 if (rxb->page)
728 iwl_legacy_tx_cmd_complete(priv, rxb);
729 else
730 IWL_WARN(priv, "Claim null rxb?\n");
731 }
732
733 /* Reuse the page if possible. For notification packets and
734 * SKBs that fail to Rx correctly, add them back into the
735 * rx_free list for reuse later. */
736 spin_lock_irqsave(&rxq->lock, flags);
737 if (rxb->page != NULL) {
738 rxb->page_dma = pci_map_page(priv->pci_dev, rxb->page,
739 0, PAGE_SIZE << priv->hw_params.rx_page_order,
740 PCI_DMA_FROMDEVICE);
741 list_add_tail(&rxb->list, &rxq->rx_free);
742 rxq->free_count++;
743 } else
744 list_add_tail(&rxb->list, &rxq->rx_used);
745
746 spin_unlock_irqrestore(&rxq->lock, flags);
747
748 i = (i + 1) & RX_QUEUE_MASK;
749 /* If there are a lot of unused frames,
750 * restock the Rx queue so ucode wont assert. */
751 if (fill_rx) {
752 count++;
753 if (count >= 8) {
754 rxq->read = i;
755 iwl4965_rx_replenish_now(priv);
756 count = 0;
757 }
758 }
759 }
760
761 /* Backtrack one entry */
762 rxq->read = i;
763 if (fill_rx)
764 iwl4965_rx_replenish_now(priv);
765 else
766 iwl4965_rx_queue_restock(priv);
767}
768
769/* call this function to flush any scheduled tasklet */
770static inline void iwl4965_synchronize_irq(struct iwl_priv *priv)
771{
772 /* wait to make sure we flush pending tasklet*/
773 synchronize_irq(priv->pci_dev->irq);
774 tasklet_kill(&priv->irq_tasklet);
775}
776
777static void iwl4965_irq_tasklet(struct iwl_priv *priv)
778{
779 u32 inta, handled = 0;
780 u32 inta_fh;
781 unsigned long flags;
782 u32 i;
783#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
784 u32 inta_mask;
785#endif
786
787 spin_lock_irqsave(&priv->lock, flags);
788
789 /* Ack/clear/reset pending uCode interrupts.
790 * Note: Some bits in CSR_INT are "OR" of bits in CSR_FH_INT_STATUS,
791 * and will clear only when CSR_FH_INT_STATUS gets cleared. */
792 inta = iwl_read32(priv, CSR_INT);
793 iwl_write32(priv, CSR_INT, inta);
794
795 /* Ack/clear/reset pending flow-handler (DMA) interrupts.
796 * Any new interrupts that happen after this, either while we're
797 * in this tasklet, or later, will show up in next ISR/tasklet. */
798 inta_fh = iwl_read32(priv, CSR_FH_INT_STATUS);
799 iwl_write32(priv, CSR_FH_INT_STATUS, inta_fh);
800
801#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
802 if (iwl_legacy_get_debug_level(priv) & IWL_DL_ISR) {
803 /* just for debug */
804 inta_mask = iwl_read32(priv, CSR_INT_MASK);
805 IWL_DEBUG_ISR(priv, "inta 0x%08x, enabled 0x%08x, fh 0x%08x\n",
806 inta, inta_mask, inta_fh);
807 }
808#endif
809
810 spin_unlock_irqrestore(&priv->lock, flags);
811
812 /* Since CSR_INT and CSR_FH_INT_STATUS reads and clears are not
813 * atomic, make sure that inta covers all the interrupts that
814 * we've discovered, even if FH interrupt came in just after
815 * reading CSR_INT. */
816 if (inta_fh & CSR49_FH_INT_RX_MASK)
817 inta |= CSR_INT_BIT_FH_RX;
818 if (inta_fh & CSR49_FH_INT_TX_MASK)
819 inta |= CSR_INT_BIT_FH_TX;
820
821 /* Now service all interrupt bits discovered above. */
822 if (inta & CSR_INT_BIT_HW_ERR) {
823 IWL_ERR(priv, "Hardware error detected. Restarting.\n");
824
825 /* Tell the device to stop sending interrupts */
826 iwl_legacy_disable_interrupts(priv);
827
828 priv->isr_stats.hw++;
829 iwl_legacy_irq_handle_error(priv);
830
831 handled |= CSR_INT_BIT_HW_ERR;
832
833 return;
834 }
835
836#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
837 if (iwl_legacy_get_debug_level(priv) & (IWL_DL_ISR)) {
838 /* NIC fires this, but we don't use it, redundant with WAKEUP */
839 if (inta & CSR_INT_BIT_SCD) {
840 IWL_DEBUG_ISR(priv, "Scheduler finished to transmit "
841 "the frame/frames.\n");
842 priv->isr_stats.sch++;
843 }
844
845 /* Alive notification via Rx interrupt will do the real work */
846 if (inta & CSR_INT_BIT_ALIVE) {
847 IWL_DEBUG_ISR(priv, "Alive interrupt\n");
848 priv->isr_stats.alive++;
849 }
850 }
851#endif
852 /* Safely ignore these bits for debug checks below */
853 inta &= ~(CSR_INT_BIT_SCD | CSR_INT_BIT_ALIVE);
854
855 /* HW RF KILL switch toggled */
856 if (inta & CSR_INT_BIT_RF_KILL) {
857 int hw_rf_kill = 0;
858 if (!(iwl_read32(priv, CSR_GP_CNTRL) &
859 CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW))
860 hw_rf_kill = 1;
861
862 IWL_WARN(priv, "RF_KILL bit toggled to %s.\n",
863 hw_rf_kill ? "disable radio" : "enable radio");
864
865 priv->isr_stats.rfkill++;
866
867 /* driver only loads ucode once setting the interface up.
868 * the driver allows loading the ucode even if the radio
869 * is killed. Hence update the killswitch state here. The
870 * rfkill handler will care about restarting if needed.
871 */
872 if (!test_bit(STATUS_ALIVE, &priv->status)) {
873 if (hw_rf_kill)
874 set_bit(STATUS_RF_KILL_HW, &priv->status);
875 else
876 clear_bit(STATUS_RF_KILL_HW, &priv->status);
877 wiphy_rfkill_set_hw_state(priv->hw->wiphy, hw_rf_kill);
878 }
879
880 handled |= CSR_INT_BIT_RF_KILL;
881 }
882
883 /* Chip got too hot and stopped itself */
884 if (inta & CSR_INT_BIT_CT_KILL) {
885 IWL_ERR(priv, "Microcode CT kill error detected.\n");
886 priv->isr_stats.ctkill++;
887 handled |= CSR_INT_BIT_CT_KILL;
888 }
889
890 /* Error detected by uCode */
891 if (inta & CSR_INT_BIT_SW_ERR) {
892 IWL_ERR(priv, "Microcode SW error detected. "
893 " Restarting 0x%X.\n", inta);
894 priv->isr_stats.sw++;
895 iwl_legacy_irq_handle_error(priv);
896 handled |= CSR_INT_BIT_SW_ERR;
897 }
898
899 /*
900 * uCode wakes up after power-down sleep.
901 * Tell device about any new tx or host commands enqueued,
902 * and about any Rx buffers made available while asleep.
903 */
904 if (inta & CSR_INT_BIT_WAKEUP) {
905 IWL_DEBUG_ISR(priv, "Wakeup interrupt\n");
906 iwl_legacy_rx_queue_update_write_ptr(priv, &priv->rxq);
907 for (i = 0; i < priv->hw_params.max_txq_num; i++)
908 iwl_legacy_txq_update_write_ptr(priv, &priv->txq[i]);
909 priv->isr_stats.wakeup++;
910 handled |= CSR_INT_BIT_WAKEUP;
911 }
912
913 /* All uCode command responses, including Tx command responses,
914 * Rx "responses" (frame-received notification), and other
915 * notifications from uCode come through here*/
916 if (inta & (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX)) {
917 iwl4965_rx_handle(priv);
918 priv->isr_stats.rx++;
919 handled |= (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX);
920 }
921
922 /* This "Tx" DMA channel is used only for loading uCode */
923 if (inta & CSR_INT_BIT_FH_TX) {
924 IWL_DEBUG_ISR(priv, "uCode load interrupt\n");
925 priv->isr_stats.tx++;
926 handled |= CSR_INT_BIT_FH_TX;
927 /* Wake up uCode load routine, now that load is complete */
928 priv->ucode_write_complete = 1;
929 wake_up(&priv->wait_command_queue);
930 }
931
932 if (inta & ~handled) {
933 IWL_ERR(priv, "Unhandled INTA bits 0x%08x\n", inta & ~handled);
934 priv->isr_stats.unhandled++;
935 }
936
937 if (inta & ~(priv->inta_mask)) {
938 IWL_WARN(priv, "Disabled INTA bits 0x%08x were pending\n",
939 inta & ~priv->inta_mask);
940 IWL_WARN(priv, " with FH_INT = 0x%08x\n", inta_fh);
941 }
942
943 /* Re-enable all interrupts */
944 /* only Re-enable if disabled by irq */
945 if (test_bit(STATUS_INT_ENABLED, &priv->status))
946 iwl_legacy_enable_interrupts(priv);
947 /* Re-enable RF_KILL if it occurred */
948 else if (handled & CSR_INT_BIT_RF_KILL)
949 iwl_legacy_enable_rfkill_int(priv);
950
951#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
952 if (iwl_legacy_get_debug_level(priv) & (IWL_DL_ISR)) {
953 inta = iwl_read32(priv, CSR_INT);
954 inta_mask = iwl_read32(priv, CSR_INT_MASK);
955 inta_fh = iwl_read32(priv, CSR_FH_INT_STATUS);
956 IWL_DEBUG_ISR(priv,
957 "End inta 0x%08x, enabled 0x%08x, fh 0x%08x, "
958 "flags 0x%08lx\n", inta, inta_mask, inta_fh, flags);
959 }
960#endif
961}
962
963/*****************************************************************************
964 *
965 * sysfs attributes
966 *
967 *****************************************************************************/
968
969#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
970
971/*
972 * The following adds a new attribute to the sysfs representation
973 * of this device driver (i.e. a new file in /sys/class/net/wlan0/device/)
974 * used for controlling the debug level.
975 *
976 * See the level definitions in iwl for details.
977 *
978 * The debug_level being managed using sysfs below is a per device debug
979 * level that is used instead of the global debug level if it (the per
980 * device debug level) is set.
981 */
982static ssize_t iwl4965_show_debug_level(struct device *d,
983 struct device_attribute *attr, char *buf)
984{
985 struct iwl_priv *priv = dev_get_drvdata(d);
986 return sprintf(buf, "0x%08X\n", iwl_legacy_get_debug_level(priv));
987}
988static ssize_t iwl4965_store_debug_level(struct device *d,
989 struct device_attribute *attr,
990 const char *buf, size_t count)
991{
992 struct iwl_priv *priv = dev_get_drvdata(d);
993 unsigned long val;
994 int ret;
995
996 ret = strict_strtoul(buf, 0, &val);
997 if (ret)
998 IWL_ERR(priv, "%s is not in hex or decimal form.\n", buf);
999 else {
1000 priv->debug_level = val;
1001 if (iwl_legacy_alloc_traffic_mem(priv))
1002 IWL_ERR(priv,
1003 "Not enough memory to generate traffic log\n");
1004 }
1005 return strnlen(buf, count);
1006}
1007
1008static DEVICE_ATTR(debug_level, S_IWUSR | S_IRUGO,
1009 iwl4965_show_debug_level, iwl4965_store_debug_level);
1010
1011
1012#endif /* CONFIG_IWLWIFI_LEGACY_DEBUG */
1013
1014
1015static ssize_t iwl4965_show_temperature(struct device *d,
1016 struct device_attribute *attr, char *buf)
1017{
1018 struct iwl_priv *priv = dev_get_drvdata(d);
1019
1020 if (!iwl_legacy_is_alive(priv))
1021 return -EAGAIN;
1022
1023 return sprintf(buf, "%d\n", priv->temperature);
1024}
1025
1026static DEVICE_ATTR(temperature, S_IRUGO, iwl4965_show_temperature, NULL);
1027
1028static ssize_t iwl4965_show_tx_power(struct device *d,
1029 struct device_attribute *attr, char *buf)
1030{
1031 struct iwl_priv *priv = dev_get_drvdata(d);
1032
1033 if (!iwl_legacy_is_ready_rf(priv))
1034 return sprintf(buf, "off\n");
1035 else
1036 return sprintf(buf, "%d\n", priv->tx_power_user_lmt);
1037}
1038
1039static ssize_t iwl4965_store_tx_power(struct device *d,
1040 struct device_attribute *attr,
1041 const char *buf, size_t count)
1042{
1043 struct iwl_priv *priv = dev_get_drvdata(d);
1044 unsigned long val;
1045 int ret;
1046
1047 ret = strict_strtoul(buf, 10, &val);
1048 if (ret)
1049 IWL_INFO(priv, "%s is not in decimal form.\n", buf);
1050 else {
1051 ret = iwl_legacy_set_tx_power(priv, val, false);
1052 if (ret)
1053 IWL_ERR(priv, "failed setting tx power (0x%d).\n",
1054 ret);
1055 else
1056 ret = count;
1057 }
1058 return ret;
1059}
1060
1061static DEVICE_ATTR(tx_power, S_IWUSR | S_IRUGO,
1062 iwl4965_show_tx_power, iwl4965_store_tx_power);
1063
1064static struct attribute *iwl_sysfs_entries[] = {
1065 &dev_attr_temperature.attr,
1066 &dev_attr_tx_power.attr,
1067#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
1068 &dev_attr_debug_level.attr,
1069#endif
1070 NULL
1071};
1072
1073static struct attribute_group iwl_attribute_group = {
1074 .name = NULL, /* put in device directory */
1075 .attrs = iwl_sysfs_entries,
1076};
1077
1078/******************************************************************************
1079 *
1080 * uCode download functions
1081 *
1082 ******************************************************************************/
1083
1084static void iwl4965_dealloc_ucode_pci(struct iwl_priv *priv)
1085{
1086 iwl_legacy_free_fw_desc(priv->pci_dev, &priv->ucode_code);
1087 iwl_legacy_free_fw_desc(priv->pci_dev, &priv->ucode_data);
1088 iwl_legacy_free_fw_desc(priv->pci_dev, &priv->ucode_data_backup);
1089 iwl_legacy_free_fw_desc(priv->pci_dev, &priv->ucode_init);
1090 iwl_legacy_free_fw_desc(priv->pci_dev, &priv->ucode_init_data);
1091 iwl_legacy_free_fw_desc(priv->pci_dev, &priv->ucode_boot);
1092}
1093
1094static void iwl4965_nic_start(struct iwl_priv *priv)
1095{
1096 /* Remove all resets to allow NIC to operate */
1097 iwl_write32(priv, CSR_RESET, 0);
1098}
1099
1100static void iwl4965_ucode_callback(const struct firmware *ucode_raw,
1101 void *context);
1102static int iwl4965_mac_setup_register(struct iwl_priv *priv,
1103 u32 max_probe_length);
1104
1105static int __must_check iwl4965_request_firmware(struct iwl_priv *priv, bool first)
1106{
1107 const char *name_pre = priv->cfg->fw_name_pre;
1108 char tag[8];
1109
1110 if (first) {
1111 priv->fw_index = priv->cfg->ucode_api_max;
1112 sprintf(tag, "%d", priv->fw_index);
1113 } else {
1114 priv->fw_index--;
1115 sprintf(tag, "%d", priv->fw_index);
1116 }
1117
1118 if (priv->fw_index < priv->cfg->ucode_api_min) {
1119 IWL_ERR(priv, "no suitable firmware found!\n");
1120 return -ENOENT;
1121 }
1122
1123 sprintf(priv->firmware_name, "%s%s%s", name_pre, tag, ".ucode");
1124
1125 IWL_DEBUG_INFO(priv, "attempting to load firmware '%s'\n",
1126 priv->firmware_name);
1127
1128 return request_firmware_nowait(THIS_MODULE, 1, priv->firmware_name,
1129 &priv->pci_dev->dev, GFP_KERNEL, priv,
1130 iwl4965_ucode_callback);
1131}
1132
1133struct iwl4965_firmware_pieces {
1134 const void *inst, *data, *init, *init_data, *boot;
1135 size_t inst_size, data_size, init_size, init_data_size, boot_size;
1136};
1137
1138static int iwl4965_load_firmware(struct iwl_priv *priv,
1139 const struct firmware *ucode_raw,
1140 struct iwl4965_firmware_pieces *pieces)
1141{
1142 struct iwl_ucode_header *ucode = (void *)ucode_raw->data;
1143 u32 api_ver, hdr_size;
1144 const u8 *src;
1145
1146 priv->ucode_ver = le32_to_cpu(ucode->ver);
1147 api_ver = IWL_UCODE_API(priv->ucode_ver);
1148
1149 switch (api_ver) {
1150 default:
1151 case 0:
1152 case 1:
1153 case 2:
1154 hdr_size = 24;
1155 if (ucode_raw->size < hdr_size) {
1156 IWL_ERR(priv, "File size too small!\n");
1157 return -EINVAL;
1158 }
1159 pieces->inst_size = le32_to_cpu(ucode->v1.inst_size);
1160 pieces->data_size = le32_to_cpu(ucode->v1.data_size);
1161 pieces->init_size = le32_to_cpu(ucode->v1.init_size);
1162 pieces->init_data_size =
1163 le32_to_cpu(ucode->v1.init_data_size);
1164 pieces->boot_size = le32_to_cpu(ucode->v1.boot_size);
1165 src = ucode->v1.data;
1166 break;
1167 }
1168
1169 /* Verify size of file vs. image size info in file's header */
1170 if (ucode_raw->size != hdr_size + pieces->inst_size +
1171 pieces->data_size + pieces->init_size +
1172 pieces->init_data_size + pieces->boot_size) {
1173
1174 IWL_ERR(priv,
1175 "uCode file size %d does not match expected size\n",
1176 (int)ucode_raw->size);
1177 return -EINVAL;
1178 }
1179
1180 pieces->inst = src;
1181 src += pieces->inst_size;
1182 pieces->data = src;
1183 src += pieces->data_size;
1184 pieces->init = src;
1185 src += pieces->init_size;
1186 pieces->init_data = src;
1187 src += pieces->init_data_size;
1188 pieces->boot = src;
1189 src += pieces->boot_size;
1190
1191 return 0;
1192}
1193
1194/**
1195 * iwl4965_ucode_callback - callback when firmware was loaded
1196 *
1197 * If loaded successfully, copies the firmware into buffers
1198 * for the card to fetch (via DMA).
1199 */
1200static void
1201iwl4965_ucode_callback(const struct firmware *ucode_raw, void *context)
1202{
1203 struct iwl_priv *priv = context;
1204 struct iwl_ucode_header *ucode;
1205 int err;
1206 struct iwl4965_firmware_pieces pieces;
1207 const unsigned int api_max = priv->cfg->ucode_api_max;
1208 const unsigned int api_min = priv->cfg->ucode_api_min;
1209 u32 api_ver;
1210
1211 u32 max_probe_length = 200;
1212 u32 standard_phy_calibration_size =
1213 IWL_DEFAULT_STANDARD_PHY_CALIBRATE_TBL_SIZE;
1214
1215 memset(&pieces, 0, sizeof(pieces));
1216
1217 if (!ucode_raw) {
1218 if (priv->fw_index <= priv->cfg->ucode_api_max)
1219 IWL_ERR(priv,
1220 "request for firmware file '%s' failed.\n",
1221 priv->firmware_name);
1222 goto try_again;
1223 }
1224
1225 IWL_DEBUG_INFO(priv, "Loaded firmware file '%s' (%zd bytes).\n",
1226 priv->firmware_name, ucode_raw->size);
1227
1228 /* Make sure that we got at least the API version number */
1229 if (ucode_raw->size < 4) {
1230 IWL_ERR(priv, "File size way too small!\n");
1231 goto try_again;
1232 }
1233
1234 /* Data from ucode file: header followed by uCode images */
1235 ucode = (struct iwl_ucode_header *)ucode_raw->data;
1236
1237 err = iwl4965_load_firmware(priv, ucode_raw, &pieces);
1238
1239 if (err)
1240 goto try_again;
1241
1242 api_ver = IWL_UCODE_API(priv->ucode_ver);
1243
1244 /*
1245 * api_ver should match the api version forming part of the
1246 * firmware filename ... but we don't check for that and only rely
1247 * on the API version read from firmware header from here on forward
1248 */
1249 if (api_ver < api_min || api_ver > api_max) {
1250 IWL_ERR(priv,
1251 "Driver unable to support your firmware API. "
1252 "Driver supports v%u, firmware is v%u.\n",
1253 api_max, api_ver);
1254 goto try_again;
1255 }
1256
1257 if (api_ver != api_max)
1258 IWL_ERR(priv,
1259 "Firmware has old API version. Expected v%u, "
1260 "got v%u. New firmware can be obtained "
1261 "from http://www.intellinuxwireless.org.\n",
1262 api_max, api_ver);
1263
1264 IWL_INFO(priv, "loaded firmware version %u.%u.%u.%u\n",
1265 IWL_UCODE_MAJOR(priv->ucode_ver),
1266 IWL_UCODE_MINOR(priv->ucode_ver),
1267 IWL_UCODE_API(priv->ucode_ver),
1268 IWL_UCODE_SERIAL(priv->ucode_ver));
1269
1270 snprintf(priv->hw->wiphy->fw_version,
1271 sizeof(priv->hw->wiphy->fw_version),
1272 "%u.%u.%u.%u",
1273 IWL_UCODE_MAJOR(priv->ucode_ver),
1274 IWL_UCODE_MINOR(priv->ucode_ver),
1275 IWL_UCODE_API(priv->ucode_ver),
1276 IWL_UCODE_SERIAL(priv->ucode_ver));
1277
1278 /*
1279 * For any of the failures below (before allocating pci memory)
1280 * we will try to load a version with a smaller API -- maybe the
1281 * user just got a corrupted version of the latest API.
1282 */
1283
1284 IWL_DEBUG_INFO(priv, "f/w package hdr ucode version raw = 0x%x\n",
1285 priv->ucode_ver);
1286 IWL_DEBUG_INFO(priv, "f/w package hdr runtime inst size = %Zd\n",
1287 pieces.inst_size);
1288 IWL_DEBUG_INFO(priv, "f/w package hdr runtime data size = %Zd\n",
1289 pieces.data_size);
1290 IWL_DEBUG_INFO(priv, "f/w package hdr init inst size = %Zd\n",
1291 pieces.init_size);
1292 IWL_DEBUG_INFO(priv, "f/w package hdr init data size = %Zd\n",
1293 pieces.init_data_size);
1294 IWL_DEBUG_INFO(priv, "f/w package hdr boot inst size = %Zd\n",
1295 pieces.boot_size);
1296
1297 /* Verify that uCode images will fit in card's SRAM */
1298 if (pieces.inst_size > priv->hw_params.max_inst_size) {
1299 IWL_ERR(priv, "uCode instr len %Zd too large to fit in\n",
1300 pieces.inst_size);
1301 goto try_again;
1302 }
1303
1304 if (pieces.data_size > priv->hw_params.max_data_size) {
1305 IWL_ERR(priv, "uCode data len %Zd too large to fit in\n",
1306 pieces.data_size);
1307 goto try_again;
1308 }
1309
1310 if (pieces.init_size > priv->hw_params.max_inst_size) {
1311 IWL_ERR(priv, "uCode init instr len %Zd too large to fit in\n",
1312 pieces.init_size);
1313 goto try_again;
1314 }
1315
1316 if (pieces.init_data_size > priv->hw_params.max_data_size) {
1317 IWL_ERR(priv, "uCode init data len %Zd too large to fit in\n",
1318 pieces.init_data_size);
1319 goto try_again;
1320 }
1321
1322 if (pieces.boot_size > priv->hw_params.max_bsm_size) {
1323 IWL_ERR(priv, "uCode boot instr len %Zd too large to fit in\n",
1324 pieces.boot_size);
1325 goto try_again;
1326 }
1327
1328 /* Allocate ucode buffers for card's bus-master loading ... */
1329
1330 /* Runtime instructions and 2 copies of data:
1331 * 1) unmodified from disk
1332 * 2) backup cache for save/restore during power-downs */
1333 priv->ucode_code.len = pieces.inst_size;
1334 iwl_legacy_alloc_fw_desc(priv->pci_dev, &priv->ucode_code);
1335
1336 priv->ucode_data.len = pieces.data_size;
1337 iwl_legacy_alloc_fw_desc(priv->pci_dev, &priv->ucode_data);
1338
1339 priv->ucode_data_backup.len = pieces.data_size;
1340 iwl_legacy_alloc_fw_desc(priv->pci_dev, &priv->ucode_data_backup);
1341
1342 if (!priv->ucode_code.v_addr || !priv->ucode_data.v_addr ||
1343 !priv->ucode_data_backup.v_addr)
1344 goto err_pci_alloc;
1345
1346 /* Initialization instructions and data */
1347 if (pieces.init_size && pieces.init_data_size) {
1348 priv->ucode_init.len = pieces.init_size;
1349 iwl_legacy_alloc_fw_desc(priv->pci_dev, &priv->ucode_init);
1350
1351 priv->ucode_init_data.len = pieces.init_data_size;
1352 iwl_legacy_alloc_fw_desc(priv->pci_dev, &priv->ucode_init_data);
1353
1354 if (!priv->ucode_init.v_addr || !priv->ucode_init_data.v_addr)
1355 goto err_pci_alloc;
1356 }
1357
1358 /* Bootstrap (instructions only, no data) */
1359 if (pieces.boot_size) {
1360 priv->ucode_boot.len = pieces.boot_size;
1361 iwl_legacy_alloc_fw_desc(priv->pci_dev, &priv->ucode_boot);
1362
1363 if (!priv->ucode_boot.v_addr)
1364 goto err_pci_alloc;
1365 }
1366
1367 /* Now that we can no longer fail, copy information */
1368
1369 priv->sta_key_max_num = STA_KEY_MAX_NUM;
1370
1371 /* Copy images into buffers for card's bus-master reads ... */
1372
1373 /* Runtime instructions (first block of data in file) */
1374 IWL_DEBUG_INFO(priv, "Copying (but not loading) uCode instr len %Zd\n",
1375 pieces.inst_size);
1376 memcpy(priv->ucode_code.v_addr, pieces.inst, pieces.inst_size);
1377
1378 IWL_DEBUG_INFO(priv, "uCode instr buf vaddr = 0x%p, paddr = 0x%08x\n",
1379 priv->ucode_code.v_addr, (u32)priv->ucode_code.p_addr);
1380
1381 /*
1382 * Runtime data
1383 * NOTE: Copy into backup buffer will be done in iwl_up()
1384 */
1385 IWL_DEBUG_INFO(priv, "Copying (but not loading) uCode data len %Zd\n",
1386 pieces.data_size);
1387 memcpy(priv->ucode_data.v_addr, pieces.data, pieces.data_size);
1388 memcpy(priv->ucode_data_backup.v_addr, pieces.data, pieces.data_size);
1389
1390 /* Initialization instructions */
1391 if (pieces.init_size) {
1392 IWL_DEBUG_INFO(priv,
1393 "Copying (but not loading) init instr len %Zd\n",
1394 pieces.init_size);
1395 memcpy(priv->ucode_init.v_addr, pieces.init, pieces.init_size);
1396 }
1397
1398 /* Initialization data */
1399 if (pieces.init_data_size) {
1400 IWL_DEBUG_INFO(priv,
1401 "Copying (but not loading) init data len %Zd\n",
1402 pieces.init_data_size);
1403 memcpy(priv->ucode_init_data.v_addr, pieces.init_data,
1404 pieces.init_data_size);
1405 }
1406
1407 /* Bootstrap instructions */
1408 IWL_DEBUG_INFO(priv, "Copying (but not loading) boot instr len %Zd\n",
1409 pieces.boot_size);
1410 memcpy(priv->ucode_boot.v_addr, pieces.boot, pieces.boot_size);
1411
1412 /*
1413 * figure out the offset of chain noise reset and gain commands
1414 * base on the size of standard phy calibration commands table size
1415 */
1416 priv->_4965.phy_calib_chain_noise_reset_cmd =
1417 standard_phy_calibration_size;
1418 priv->_4965.phy_calib_chain_noise_gain_cmd =
1419 standard_phy_calibration_size + 1;
1420
1421 /**************************************************
1422 * This is still part of probe() in a sense...
1423 *
1424 * 9. Setup and register with mac80211 and debugfs
1425 **************************************************/
1426 err = iwl4965_mac_setup_register(priv, max_probe_length);
1427 if (err)
1428 goto out_unbind;
1429
1430 err = iwl_legacy_dbgfs_register(priv, DRV_NAME);
1431 if (err)
1432 IWL_ERR(priv,
1433 "failed to create debugfs files. Ignoring error: %d\n", err);
1434
1435 err = sysfs_create_group(&priv->pci_dev->dev.kobj,
1436 &iwl_attribute_group);
1437 if (err) {
1438 IWL_ERR(priv, "failed to create sysfs device attributes\n");
1439 goto out_unbind;
1440 }
1441
1442 /* We have our copies now, allow OS release its copies */
1443 release_firmware(ucode_raw);
1444 complete(&priv->_4965.firmware_loading_complete);
1445 return;
1446
1447 try_again:
1448 /* try next, if any */
1449 if (iwl4965_request_firmware(priv, false))
1450 goto out_unbind;
1451 release_firmware(ucode_raw);
1452 return;
1453
1454 err_pci_alloc:
1455 IWL_ERR(priv, "failed to allocate pci memory\n");
1456 iwl4965_dealloc_ucode_pci(priv);
1457 out_unbind:
1458 complete(&priv->_4965.firmware_loading_complete);
1459 device_release_driver(&priv->pci_dev->dev);
1460 release_firmware(ucode_raw);
1461}
1462
1463static const char * const desc_lookup_text[] = {
1464 "OK",
1465 "FAIL",
1466 "BAD_PARAM",
1467 "BAD_CHECKSUM",
1468 "NMI_INTERRUPT_WDG",
1469 "SYSASSERT",
1470 "FATAL_ERROR",
1471 "BAD_COMMAND",
1472 "HW_ERROR_TUNE_LOCK",
1473 "HW_ERROR_TEMPERATURE",
1474 "ILLEGAL_CHAN_FREQ",
1475 "VCC_NOT_STABLE",
1476 "FH_ERROR",
1477 "NMI_INTERRUPT_HOST",
1478 "NMI_INTERRUPT_ACTION_PT",
1479 "NMI_INTERRUPT_UNKNOWN",
1480 "UCODE_VERSION_MISMATCH",
1481 "HW_ERROR_ABS_LOCK",
1482 "HW_ERROR_CAL_LOCK_FAIL",
1483 "NMI_INTERRUPT_INST_ACTION_PT",
1484 "NMI_INTERRUPT_DATA_ACTION_PT",
1485 "NMI_TRM_HW_ER",
1486 "NMI_INTERRUPT_TRM",
1487 "NMI_INTERRUPT_BREAK_POINT",
1488 "DEBUG_0",
1489 "DEBUG_1",
1490 "DEBUG_2",
1491 "DEBUG_3",
1492};
1493
1494static struct { char *name; u8 num; } advanced_lookup[] = {
1495 { "NMI_INTERRUPT_WDG", 0x34 },
1496 { "SYSASSERT", 0x35 },
1497 { "UCODE_VERSION_MISMATCH", 0x37 },
1498 { "BAD_COMMAND", 0x38 },
1499 { "NMI_INTERRUPT_DATA_ACTION_PT", 0x3C },
1500 { "FATAL_ERROR", 0x3D },
1501 { "NMI_TRM_HW_ERR", 0x46 },
1502 { "NMI_INTERRUPT_TRM", 0x4C },
1503 { "NMI_INTERRUPT_BREAK_POINT", 0x54 },
1504 { "NMI_INTERRUPT_WDG_RXF_FULL", 0x5C },
1505 { "NMI_INTERRUPT_WDG_NO_RBD_RXF_FULL", 0x64 },
1506 { "NMI_INTERRUPT_HOST", 0x66 },
1507 { "NMI_INTERRUPT_ACTION_PT", 0x7C },
1508 { "NMI_INTERRUPT_UNKNOWN", 0x84 },
1509 { "NMI_INTERRUPT_INST_ACTION_PT", 0x86 },
1510 { "ADVANCED_SYSASSERT", 0 },
1511};
1512
1513static const char *iwl4965_desc_lookup(u32 num)
1514{
1515 int i;
1516 int max = ARRAY_SIZE(desc_lookup_text);
1517
1518 if (num < max)
1519 return desc_lookup_text[num];
1520
1521 max = ARRAY_SIZE(advanced_lookup) - 1;
1522 for (i = 0; i < max; i++) {
1523 if (advanced_lookup[i].num == num)
1524 break;
1525 }
1526 return advanced_lookup[i].name;
1527}
1528
1529#define ERROR_START_OFFSET (1 * sizeof(u32))
1530#define ERROR_ELEM_SIZE (7 * sizeof(u32))
1531
1532void iwl4965_dump_nic_error_log(struct iwl_priv *priv)
1533{
1534 u32 data2, line;
1535 u32 desc, time, count, base, data1;
1536 u32 blink1, blink2, ilink1, ilink2;
1537 u32 pc, hcmd;
1538
1539 if (priv->ucode_type == UCODE_INIT) {
1540 base = le32_to_cpu(priv->card_alive_init.error_event_table_ptr);
1541 } else {
1542 base = le32_to_cpu(priv->card_alive.error_event_table_ptr);
1543 }
1544
1545 if (!priv->cfg->ops->lib->is_valid_rtc_data_addr(base)) {
1546 IWL_ERR(priv,
1547 "Not valid error log pointer 0x%08X for %s uCode\n",
1548 base, (priv->ucode_type == UCODE_INIT) ? "Init" : "RT");
1549 return;
1550 }
1551
1552 count = iwl_legacy_read_targ_mem(priv, base);
1553
1554 if (ERROR_START_OFFSET <= count * ERROR_ELEM_SIZE) {
1555 IWL_ERR(priv, "Start IWL Error Log Dump:\n");
1556 IWL_ERR(priv, "Status: 0x%08lX, count: %d\n",
1557 priv->status, count);
1558 }
1559
1560 desc = iwl_legacy_read_targ_mem(priv, base + 1 * sizeof(u32));
1561 priv->isr_stats.err_code = desc;
1562 pc = iwl_legacy_read_targ_mem(priv, base + 2 * sizeof(u32));
1563 blink1 = iwl_legacy_read_targ_mem(priv, base + 3 * sizeof(u32));
1564 blink2 = iwl_legacy_read_targ_mem(priv, base + 4 * sizeof(u32));
1565 ilink1 = iwl_legacy_read_targ_mem(priv, base + 5 * sizeof(u32));
1566 ilink2 = iwl_legacy_read_targ_mem(priv, base + 6 * sizeof(u32));
1567 data1 = iwl_legacy_read_targ_mem(priv, base + 7 * sizeof(u32));
1568 data2 = iwl_legacy_read_targ_mem(priv, base + 8 * sizeof(u32));
1569 line = iwl_legacy_read_targ_mem(priv, base + 9 * sizeof(u32));
1570 time = iwl_legacy_read_targ_mem(priv, base + 11 * sizeof(u32));
1571 hcmd = iwl_legacy_read_targ_mem(priv, base + 22 * sizeof(u32));
1572
1573 trace_iwlwifi_legacy_dev_ucode_error(priv, desc,
1574 time, data1, data2, line,
1575 blink1, blink2, ilink1, ilink2);
1576
1577 IWL_ERR(priv, "Desc Time "
1578 "data1 data2 line\n");
1579 IWL_ERR(priv, "%-28s (0x%04X) %010u 0x%08X 0x%08X %u\n",
1580 iwl4965_desc_lookup(desc), desc, time, data1, data2, line);
1581 IWL_ERR(priv, "pc blink1 blink2 ilink1 ilink2 hcmd\n");
1582 IWL_ERR(priv, "0x%05X 0x%05X 0x%05X 0x%05X 0x%05X 0x%05X\n",
1583 pc, blink1, blink2, ilink1, ilink2, hcmd);
1584}
1585
1586static void iwl4965_rf_kill_ct_config(struct iwl_priv *priv)
1587{
1588 struct iwl_ct_kill_config cmd;
1589 unsigned long flags;
1590 int ret = 0;
1591
1592 spin_lock_irqsave(&priv->lock, flags);
1593 iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR,
1594 CSR_UCODE_DRV_GP1_REG_BIT_CT_KILL_EXIT);
1595 spin_unlock_irqrestore(&priv->lock, flags);
1596
1597 cmd.critical_temperature_R =
1598 cpu_to_le32(priv->hw_params.ct_kill_threshold);
1599
1600 ret = iwl_legacy_send_cmd_pdu(priv, REPLY_CT_KILL_CONFIG_CMD,
1601 sizeof(cmd), &cmd);
1602 if (ret)
1603 IWL_ERR(priv, "REPLY_CT_KILL_CONFIG_CMD failed\n");
1604 else
1605 IWL_DEBUG_INFO(priv, "REPLY_CT_KILL_CONFIG_CMD "
1606 "succeeded, "
1607 "critical temperature is %d\n",
1608 priv->hw_params.ct_kill_threshold);
1609}
1610
1611static const s8 default_queue_to_tx_fifo[] = {
1612 IWL_TX_FIFO_VO,
1613 IWL_TX_FIFO_VI,
1614 IWL_TX_FIFO_BE,
1615 IWL_TX_FIFO_BK,
1616 IWL49_CMD_FIFO_NUM,
1617 IWL_TX_FIFO_UNUSED,
1618 IWL_TX_FIFO_UNUSED,
1619};
1620
1621static int iwl4965_alive_notify(struct iwl_priv *priv)
1622{
1623 u32 a;
1624 unsigned long flags;
1625 int i, chan;
1626 u32 reg_val;
1627
1628 spin_lock_irqsave(&priv->lock, flags);
1629
1630 /* Clear 4965's internal Tx Scheduler data base */
1631 priv->scd_base_addr = iwl_legacy_read_prph(priv,
1632 IWL49_SCD_SRAM_BASE_ADDR);
1633 a = priv->scd_base_addr + IWL49_SCD_CONTEXT_DATA_OFFSET;
1634 for (; a < priv->scd_base_addr + IWL49_SCD_TX_STTS_BITMAP_OFFSET; a += 4)
1635 iwl_legacy_write_targ_mem(priv, a, 0);
1636 for (; a < priv->scd_base_addr + IWL49_SCD_TRANSLATE_TBL_OFFSET; a += 4)
1637 iwl_legacy_write_targ_mem(priv, a, 0);
1638 for (; a < priv->scd_base_addr +
1639 IWL49_SCD_TRANSLATE_TBL_OFFSET_QUEUE(priv->hw_params.max_txq_num); a += 4)
1640 iwl_legacy_write_targ_mem(priv, a, 0);
1641
1642 /* Tel 4965 where to find Tx byte count tables */
1643 iwl_legacy_write_prph(priv, IWL49_SCD_DRAM_BASE_ADDR,
1644 priv->scd_bc_tbls.dma >> 10);
1645
1646 /* Enable DMA channel */
1647 for (chan = 0; chan < FH49_TCSR_CHNL_NUM ; chan++)
1648 iwl_legacy_write_direct32(priv,
1649 FH_TCSR_CHNL_TX_CONFIG_REG(chan),
1650 FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE |
1651 FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE);
1652
1653 /* Update FH chicken bits */
1654 reg_val = iwl_legacy_read_direct32(priv, FH_TX_CHICKEN_BITS_REG);
1655 iwl_legacy_write_direct32(priv, FH_TX_CHICKEN_BITS_REG,
1656 reg_val | FH_TX_CHICKEN_BITS_SCD_AUTO_RETRY_EN);
1657
1658 /* Disable chain mode for all queues */
1659 iwl_legacy_write_prph(priv, IWL49_SCD_QUEUECHAIN_SEL, 0);
1660
1661 /* Initialize each Tx queue (including the command queue) */
1662 for (i = 0; i < priv->hw_params.max_txq_num; i++) {
1663
1664 /* TFD circular buffer read/write indexes */
1665 iwl_legacy_write_prph(priv, IWL49_SCD_QUEUE_RDPTR(i), 0);
1666 iwl_legacy_write_direct32(priv, HBUS_TARG_WRPTR, 0 | (i << 8));
1667
1668 /* Max Tx Window size for Scheduler-ACK mode */
1669 iwl_legacy_write_targ_mem(priv, priv->scd_base_addr +
1670 IWL49_SCD_CONTEXT_QUEUE_OFFSET(i),
1671 (SCD_WIN_SIZE <<
1672 IWL49_SCD_QUEUE_CTX_REG1_WIN_SIZE_POS) &
1673 IWL49_SCD_QUEUE_CTX_REG1_WIN_SIZE_MSK);
1674
1675 /* Frame limit */
1676 iwl_legacy_write_targ_mem(priv, priv->scd_base_addr +
1677 IWL49_SCD_CONTEXT_QUEUE_OFFSET(i) +
1678 sizeof(u32),
1679 (SCD_FRAME_LIMIT <<
1680 IWL49_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS) &
1681 IWL49_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK);
1682
1683 }
1684 iwl_legacy_write_prph(priv, IWL49_SCD_INTERRUPT_MASK,
1685 (1 << priv->hw_params.max_txq_num) - 1);
1686
1687 /* Activate all Tx DMA/FIFO channels */
1688 iwl4965_txq_set_sched(priv, IWL_MASK(0, 6));
1689
1690 iwl4965_set_wr_ptrs(priv, IWL_DEFAULT_CMD_QUEUE_NUM, 0);
1691
1692 /* make sure all queue are not stopped */
1693 memset(&priv->queue_stopped[0], 0, sizeof(priv->queue_stopped));
1694 for (i = 0; i < 4; i++)
1695 atomic_set(&priv->queue_stop_count[i], 0);
1696
1697 /* reset to 0 to enable all the queue first */
1698 priv->txq_ctx_active_msk = 0;
1699 /* Map each Tx/cmd queue to its corresponding fifo */
1700 BUILD_BUG_ON(ARRAY_SIZE(default_queue_to_tx_fifo) != 7);
1701
1702 for (i = 0; i < ARRAY_SIZE(default_queue_to_tx_fifo); i++) {
1703 int ac = default_queue_to_tx_fifo[i];
1704
1705 iwl_txq_ctx_activate(priv, i);
1706
1707 if (ac == IWL_TX_FIFO_UNUSED)
1708 continue;
1709
1710 iwl4965_tx_queue_set_status(priv, &priv->txq[i], ac, 0);
1711 }
1712
1713 spin_unlock_irqrestore(&priv->lock, flags);
1714
1715 return 0;
1716}
1717
1718/**
1719 * iwl4965_alive_start - called after REPLY_ALIVE notification received
1720 * from protocol/runtime uCode (initialization uCode's
1721 * Alive gets handled by iwl_init_alive_start()).
1722 */
1723static void iwl4965_alive_start(struct iwl_priv *priv)
1724{
1725 int ret = 0;
1726 struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
1727
1728 IWL_DEBUG_INFO(priv, "Runtime Alive received.\n");
1729
1730 if (priv->card_alive.is_valid != UCODE_VALID_OK) {
1731 /* We had an error bringing up the hardware, so take it
1732 * all the way back down so we can try again */
1733 IWL_DEBUG_INFO(priv, "Alive failed.\n");
1734 goto restart;
1735 }
1736
1737 /* Initialize uCode has loaded Runtime uCode ... verify inst image.
1738 * This is a paranoid check, because we would not have gotten the
1739 * "runtime" alive if code weren't properly loaded. */
1740 if (iwl4965_verify_ucode(priv)) {
1741 /* Runtime instruction load was bad;
1742 * take it all the way back down so we can try again */
1743 IWL_DEBUG_INFO(priv, "Bad runtime uCode load.\n");
1744 goto restart;
1745 }
1746
1747 ret = iwl4965_alive_notify(priv);
1748 if (ret) {
1749 IWL_WARN(priv,
1750 "Could not complete ALIVE transition [ntf]: %d\n", ret);
1751 goto restart;
1752 }
1753
1754
1755 /* After the ALIVE response, we can send host commands to the uCode */
1756 set_bit(STATUS_ALIVE, &priv->status);
1757
1758 /* Enable watchdog to monitor the driver tx queues */
1759 iwl_legacy_setup_watchdog(priv);
1760
1761 if (iwl_legacy_is_rfkill(priv))
1762 return;
1763
1764 ieee80211_wake_queues(priv->hw);
1765
1766 priv->active_rate = IWL_RATES_MASK;
1767
1768 if (iwl_legacy_is_associated_ctx(ctx)) {
1769 struct iwl_legacy_rxon_cmd *active_rxon =
1770 (struct iwl_legacy_rxon_cmd *)&ctx->active;
1771 /* apply any changes in staging */
1772 ctx->staging.filter_flags |= RXON_FILTER_ASSOC_MSK;
1773 active_rxon->filter_flags &= ~RXON_FILTER_ASSOC_MSK;
1774 } else {
1775 struct iwl_rxon_context *tmp;
1776 /* Initialize our rx_config data */
1777 for_each_context(priv, tmp)
1778 iwl_legacy_connection_init_rx_config(priv, tmp);
1779
1780 if (priv->cfg->ops->hcmd->set_rxon_chain)
1781 priv->cfg->ops->hcmd->set_rxon_chain(priv, ctx);
1782 }
1783
1784 /* Configure bluetooth coexistence if enabled */
1785 iwl_legacy_send_bt_config(priv);
1786
1787 iwl4965_reset_run_time_calib(priv);
1788
1789 set_bit(STATUS_READY, &priv->status);
1790
1791 /* Configure the adapter for unassociated operation */
1792 iwl_legacy_commit_rxon(priv, ctx);
1793
1794 /* At this point, the NIC is initialized and operational */
1795 iwl4965_rf_kill_ct_config(priv);
1796
1797 IWL_DEBUG_INFO(priv, "ALIVE processing complete.\n");
1798 wake_up(&priv->wait_command_queue);
1799
1800 iwl_legacy_power_update_mode(priv, true);
1801 IWL_DEBUG_INFO(priv, "Updated power mode\n");
1802
1803 return;
1804
1805 restart:
1806 queue_work(priv->workqueue, &priv->restart);
1807}
1808
1809static void iwl4965_cancel_deferred_work(struct iwl_priv *priv);
1810
1811static void __iwl4965_down(struct iwl_priv *priv)
1812{
1813 unsigned long flags;
1814 int exit_pending;
1815
1816 IWL_DEBUG_INFO(priv, DRV_NAME " is going down\n");
1817
1818 iwl_legacy_scan_cancel_timeout(priv, 200);
1819
1820 exit_pending = test_and_set_bit(STATUS_EXIT_PENDING, &priv->status);
1821
1822 /* Stop TX queues watchdog. We need to have STATUS_EXIT_PENDING bit set
1823 * to prevent rearm timer */
1824 del_timer_sync(&priv->watchdog);
1825
1826 iwl_legacy_clear_ucode_stations(priv, NULL);
1827 iwl_legacy_dealloc_bcast_stations(priv);
1828 iwl_legacy_clear_driver_stations(priv);
1829
1830 /* Unblock any waiting calls */
1831 wake_up_all(&priv->wait_command_queue);
1832
1833 /* Wipe out the EXIT_PENDING status bit if we are not actually
1834 * exiting the module */
1835 if (!exit_pending)
1836 clear_bit(STATUS_EXIT_PENDING, &priv->status);
1837
1838 /* stop and reset the on-board processor */
1839 iwl_write32(priv, CSR_RESET, CSR_RESET_REG_FLAG_NEVO_RESET);
1840
1841 /* tell the device to stop sending interrupts */
1842 spin_lock_irqsave(&priv->lock, flags);
1843 iwl_legacy_disable_interrupts(priv);
1844 spin_unlock_irqrestore(&priv->lock, flags);
1845 iwl4965_synchronize_irq(priv);
1846
1847 if (priv->mac80211_registered)
1848 ieee80211_stop_queues(priv->hw);
1849
1850 /* If we have not previously called iwl_init() then
1851 * clear all bits but the RF Kill bit and return */
1852 if (!iwl_legacy_is_init(priv)) {
1853 priv->status = test_bit(STATUS_RF_KILL_HW, &priv->status) <<
1854 STATUS_RF_KILL_HW |
1855 test_bit(STATUS_GEO_CONFIGURED, &priv->status) <<
1856 STATUS_GEO_CONFIGURED |
1857 test_bit(STATUS_EXIT_PENDING, &priv->status) <<
1858 STATUS_EXIT_PENDING;
1859 goto exit;
1860 }
1861
1862 /* ...otherwise clear out all the status bits but the RF Kill
1863 * bit and continue taking the NIC down. */
1864 priv->status &= test_bit(STATUS_RF_KILL_HW, &priv->status) <<
1865 STATUS_RF_KILL_HW |
1866 test_bit(STATUS_GEO_CONFIGURED, &priv->status) <<
1867 STATUS_GEO_CONFIGURED |
1868 test_bit(STATUS_FW_ERROR, &priv->status) <<
1869 STATUS_FW_ERROR |
1870 test_bit(STATUS_EXIT_PENDING, &priv->status) <<
1871 STATUS_EXIT_PENDING;
1872
1873 iwl4965_txq_ctx_stop(priv);
1874 iwl4965_rxq_stop(priv);
1875
1876 /* Power-down device's busmaster DMA clocks */
1877 iwl_legacy_write_prph(priv, APMG_CLK_DIS_REG, APMG_CLK_VAL_DMA_CLK_RQT);
1878 udelay(5);
1879
1880 /* Make sure (redundant) we've released our request to stay awake */
1881 iwl_legacy_clear_bit(priv, CSR_GP_CNTRL,
1882 CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
1883
1884 /* Stop the device, and put it in low power state */
1885 iwl_legacy_apm_stop(priv);
1886
1887 exit:
1888 memset(&priv->card_alive, 0, sizeof(struct iwl_alive_resp));
1889
1890 dev_kfree_skb(priv->beacon_skb);
1891 priv->beacon_skb = NULL;
1892
1893 /* clear out any free frames */
1894 iwl4965_clear_free_frames(priv);
1895}
1896
1897static void iwl4965_down(struct iwl_priv *priv)
1898{
1899 mutex_lock(&priv->mutex);
1900 __iwl4965_down(priv);
1901 mutex_unlock(&priv->mutex);
1902
1903 iwl4965_cancel_deferred_work(priv);
1904}
1905
1906#define HW_READY_TIMEOUT (50)
1907
1908static int iwl4965_set_hw_ready(struct iwl_priv *priv)
1909{
1910 int ret = 0;
1911
1912 iwl_legacy_set_bit(priv, CSR_HW_IF_CONFIG_REG,
1913 CSR_HW_IF_CONFIG_REG_BIT_NIC_READY);
1914
1915 /* See if we got it */
1916 ret = iwl_poll_bit(priv, CSR_HW_IF_CONFIG_REG,
1917 CSR_HW_IF_CONFIG_REG_BIT_NIC_READY,
1918 CSR_HW_IF_CONFIG_REG_BIT_NIC_READY,
1919 HW_READY_TIMEOUT);
1920 if (ret != -ETIMEDOUT)
1921 priv->hw_ready = true;
1922 else
1923 priv->hw_ready = false;
1924
1925 IWL_DEBUG_INFO(priv, "hardware %s\n",
1926 (priv->hw_ready == 1) ? "ready" : "not ready");
1927 return ret;
1928}
1929
1930static int iwl4965_prepare_card_hw(struct iwl_priv *priv)
1931{
1932 int ret = 0;
1933
1934 IWL_DEBUG_INFO(priv, "iwl4965_prepare_card_hw enter\n");
1935
1936 ret = iwl4965_set_hw_ready(priv);
1937 if (priv->hw_ready)
1938 return ret;
1939
1940 /* If HW is not ready, prepare the conditions to check again */
1941 iwl_legacy_set_bit(priv, CSR_HW_IF_CONFIG_REG,
1942 CSR_HW_IF_CONFIG_REG_PREPARE);
1943
1944 ret = iwl_poll_bit(priv, CSR_HW_IF_CONFIG_REG,
1945 ~CSR_HW_IF_CONFIG_REG_BIT_NIC_PREPARE_DONE,
1946 CSR_HW_IF_CONFIG_REG_BIT_NIC_PREPARE_DONE, 150000);
1947
1948 /* HW should be ready by now, check again. */
1949 if (ret != -ETIMEDOUT)
1950 iwl4965_set_hw_ready(priv);
1951
1952 return ret;
1953}
1954
1955#define MAX_HW_RESTARTS 5
1956
1957static int __iwl4965_up(struct iwl_priv *priv)
1958{
1959 struct iwl_rxon_context *ctx;
1960 int i;
1961 int ret;
1962
1963 if (test_bit(STATUS_EXIT_PENDING, &priv->status)) {
1964 IWL_WARN(priv, "Exit pending; will not bring the NIC up\n");
1965 return -EIO;
1966 }
1967
1968 if (!priv->ucode_data_backup.v_addr || !priv->ucode_data.v_addr) {
1969 IWL_ERR(priv, "ucode not available for device bringup\n");
1970 return -EIO;
1971 }
1972
1973 for_each_context(priv, ctx) {
1974 ret = iwl4965_alloc_bcast_station(priv, ctx);
1975 if (ret) {
1976 iwl_legacy_dealloc_bcast_stations(priv);
1977 return ret;
1978 }
1979 }
1980
1981 iwl4965_prepare_card_hw(priv);
1982
1983 if (!priv->hw_ready) {
1984 IWL_WARN(priv, "Exit HW not ready\n");
1985 return -EIO;
1986 }
1987
1988 /* If platform's RF_KILL switch is NOT set to KILL */
1989 if (iwl_read32(priv,
1990 CSR_GP_CNTRL) & CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW)
1991 clear_bit(STATUS_RF_KILL_HW, &priv->status);
1992 else
1993 set_bit(STATUS_RF_KILL_HW, &priv->status);
1994
1995 if (iwl_legacy_is_rfkill(priv)) {
1996 wiphy_rfkill_set_hw_state(priv->hw->wiphy, true);
1997
1998 iwl_legacy_enable_interrupts(priv);
1999 IWL_WARN(priv, "Radio disabled by HW RF Kill switch\n");
2000 return 0;
2001 }
2002
2003 iwl_write32(priv, CSR_INT, 0xFFFFFFFF);
2004
2005 /* must be initialised before iwl_hw_nic_init */
2006 priv->cmd_queue = IWL_DEFAULT_CMD_QUEUE_NUM;
2007
2008 ret = iwl4965_hw_nic_init(priv);
2009 if (ret) {
2010 IWL_ERR(priv, "Unable to init nic\n");
2011 return ret;
2012 }
2013
2014 /* make sure rfkill handshake bits are cleared */
2015 iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
2016 iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR,
2017 CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED);
2018
2019 /* clear (again), then enable host interrupts */
2020 iwl_write32(priv, CSR_INT, 0xFFFFFFFF);
2021 iwl_legacy_enable_interrupts(priv);
2022
2023 /* really make sure rfkill handshake bits are cleared */
2024 iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
2025 iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
2026
2027 /* Copy original ucode data image from disk into backup cache.
2028 * This will be used to initialize the on-board processor's
2029 * data SRAM for a clean start when the runtime program first loads. */
2030 memcpy(priv->ucode_data_backup.v_addr, priv->ucode_data.v_addr,
2031 priv->ucode_data.len);
2032
2033 for (i = 0; i < MAX_HW_RESTARTS; i++) {
2034
2035 /* load bootstrap state machine,
2036 * load bootstrap program into processor's memory,
2037 * prepare to load the "initialize" uCode */
2038 ret = priv->cfg->ops->lib->load_ucode(priv);
2039
2040 if (ret) {
2041 IWL_ERR(priv, "Unable to set up bootstrap uCode: %d\n",
2042 ret);
2043 continue;
2044 }
2045
2046 /* start card; "initialize" will load runtime ucode */
2047 iwl4965_nic_start(priv);
2048
2049 IWL_DEBUG_INFO(priv, DRV_NAME " is coming up\n");
2050
2051 return 0;
2052 }
2053
2054 set_bit(STATUS_EXIT_PENDING, &priv->status);
2055 __iwl4965_down(priv);
2056 clear_bit(STATUS_EXIT_PENDING, &priv->status);
2057
2058 /* tried to restart and config the device for as long as our
2059 * patience could withstand */
2060 IWL_ERR(priv, "Unable to initialize device after %d attempts.\n", i);
2061 return -EIO;
2062}
2063
2064
2065/*****************************************************************************
2066 *
2067 * Workqueue callbacks
2068 *
2069 *****************************************************************************/
2070
2071static void iwl4965_bg_init_alive_start(struct work_struct *data)
2072{
2073 struct iwl_priv *priv =
2074 container_of(data, struct iwl_priv, init_alive_start.work);
2075
2076 mutex_lock(&priv->mutex);
2077 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
2078 goto out;
2079
2080 priv->cfg->ops->lib->init_alive_start(priv);
2081out:
2082 mutex_unlock(&priv->mutex);
2083}
2084
2085static void iwl4965_bg_alive_start(struct work_struct *data)
2086{
2087 struct iwl_priv *priv =
2088 container_of(data, struct iwl_priv, alive_start.work);
2089
2090 mutex_lock(&priv->mutex);
2091 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
2092 goto out;
2093
2094 iwl4965_alive_start(priv);
2095out:
2096 mutex_unlock(&priv->mutex);
2097}
2098
2099static void iwl4965_bg_run_time_calib_work(struct work_struct *work)
2100{
2101 struct iwl_priv *priv = container_of(work, struct iwl_priv,
2102 run_time_calib_work);
2103
2104 mutex_lock(&priv->mutex);
2105
2106 if (test_bit(STATUS_EXIT_PENDING, &priv->status) ||
2107 test_bit(STATUS_SCANNING, &priv->status)) {
2108 mutex_unlock(&priv->mutex);
2109 return;
2110 }
2111
2112 if (priv->start_calib) {
2113 iwl4965_chain_noise_calibration(priv,
2114 (void *)&priv->_4965.statistics);
2115 iwl4965_sensitivity_calibration(priv,
2116 (void *)&priv->_4965.statistics);
2117 }
2118
2119 mutex_unlock(&priv->mutex);
2120}
2121
2122static void iwl4965_bg_restart(struct work_struct *data)
2123{
2124 struct iwl_priv *priv = container_of(data, struct iwl_priv, restart);
2125
2126 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
2127 return;
2128
2129 if (test_and_clear_bit(STATUS_FW_ERROR, &priv->status)) {
2130 struct iwl_rxon_context *ctx;
2131
2132 mutex_lock(&priv->mutex);
2133 for_each_context(priv, ctx)
2134 ctx->vif = NULL;
2135 priv->is_open = 0;
2136
2137 __iwl4965_down(priv);
2138
2139 mutex_unlock(&priv->mutex);
2140 iwl4965_cancel_deferred_work(priv);
2141 ieee80211_restart_hw(priv->hw);
2142 } else {
2143 iwl4965_down(priv);
2144
2145 mutex_lock(&priv->mutex);
2146 if (test_bit(STATUS_EXIT_PENDING, &priv->status)) {
2147 mutex_unlock(&priv->mutex);
2148 return;
2149 }
2150
2151 __iwl4965_up(priv);
2152 mutex_unlock(&priv->mutex);
2153 }
2154}
2155
2156static void iwl4965_bg_rx_replenish(struct work_struct *data)
2157{
2158 struct iwl_priv *priv =
2159 container_of(data, struct iwl_priv, rx_replenish);
2160
2161 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
2162 return;
2163
2164 mutex_lock(&priv->mutex);
2165 iwl4965_rx_replenish(priv);
2166 mutex_unlock(&priv->mutex);
2167}
2168
2169/*****************************************************************************
2170 *
2171 * mac80211 entry point functions
2172 *
2173 *****************************************************************************/
2174
2175#define UCODE_READY_TIMEOUT (4 * HZ)
2176
2177/*
2178 * Not a mac80211 entry point function, but it fits in with all the
2179 * other mac80211 functions grouped here.
2180 */
2181static int iwl4965_mac_setup_register(struct iwl_priv *priv,
2182 u32 max_probe_length)
2183{
2184 int ret;
2185 struct ieee80211_hw *hw = priv->hw;
2186 struct iwl_rxon_context *ctx;
2187
2188 hw->rate_control_algorithm = "iwl-4965-rs";
2189
2190 /* Tell mac80211 our characteristics */
2191 hw->flags = IEEE80211_HW_SIGNAL_DBM |
2192 IEEE80211_HW_AMPDU_AGGREGATION |
2193 IEEE80211_HW_NEED_DTIM_PERIOD |
2194 IEEE80211_HW_SPECTRUM_MGMT |
2195 IEEE80211_HW_REPORTS_TX_ACK_STATUS;
2196
2197 if (priv->cfg->sku & IWL_SKU_N)
2198 hw->flags |= IEEE80211_HW_SUPPORTS_DYNAMIC_SMPS |
2199 IEEE80211_HW_SUPPORTS_STATIC_SMPS;
2200
2201 hw->sta_data_size = sizeof(struct iwl_station_priv);
2202 hw->vif_data_size = sizeof(struct iwl_vif_priv);
2203
2204 for_each_context(priv, ctx) {
2205 hw->wiphy->interface_modes |= ctx->interface_modes;
2206 hw->wiphy->interface_modes |= ctx->exclusive_interface_modes;
2207 }
2208
2209 hw->wiphy->flags |= WIPHY_FLAG_CUSTOM_REGULATORY |
2210 WIPHY_FLAG_DISABLE_BEACON_HINTS;
2211
2212 /*
2213 * For now, disable PS by default because it affects
2214 * RX performance significantly.
2215 */
2216 hw->wiphy->flags &= ~WIPHY_FLAG_PS_ON_BY_DEFAULT;
2217
2218 hw->wiphy->max_scan_ssids = PROBE_OPTION_MAX;
2219 /* we create the 802.11 header and a zero-length SSID element */
2220 hw->wiphy->max_scan_ie_len = max_probe_length - 24 - 2;
2221
2222 /* Default value; 4 EDCA QOS priorities */
2223 hw->queues = 4;
2224
2225 hw->max_listen_interval = IWL_CONN_MAX_LISTEN_INTERVAL;
2226
2227 if (priv->bands[IEEE80211_BAND_2GHZ].n_channels)
2228 priv->hw->wiphy->bands[IEEE80211_BAND_2GHZ] =
2229 &priv->bands[IEEE80211_BAND_2GHZ];
2230 if (priv->bands[IEEE80211_BAND_5GHZ].n_channels)
2231 priv->hw->wiphy->bands[IEEE80211_BAND_5GHZ] =
2232 &priv->bands[IEEE80211_BAND_5GHZ];
2233
2234 iwl_legacy_leds_init(priv);
2235
2236 ret = ieee80211_register_hw(priv->hw);
2237 if (ret) {
2238 IWL_ERR(priv, "Failed to register hw (error %d)\n", ret);
2239 return ret;
2240 }
2241 priv->mac80211_registered = 1;
2242
2243 return 0;
2244}
2245
2246
2247int iwl4965_mac_start(struct ieee80211_hw *hw)
2248{
2249 struct iwl_priv *priv = hw->priv;
2250 int ret;
2251
2252 IWL_DEBUG_MAC80211(priv, "enter\n");
2253
2254 /* we should be verifying the device is ready to be opened */
2255 mutex_lock(&priv->mutex);
2256 ret = __iwl4965_up(priv);
2257 mutex_unlock(&priv->mutex);
2258
2259 if (ret)
2260 return ret;
2261
2262 if (iwl_legacy_is_rfkill(priv))
2263 goto out;
2264
2265 IWL_DEBUG_INFO(priv, "Start UP work done.\n");
2266
2267 /* Wait for START_ALIVE from Run Time ucode. Otherwise callbacks from
2268 * mac80211 will not be run successfully. */
2269 ret = wait_event_timeout(priv->wait_command_queue,
2270 test_bit(STATUS_READY, &priv->status),
2271 UCODE_READY_TIMEOUT);
2272 if (!ret) {
2273 if (!test_bit(STATUS_READY, &priv->status)) {
2274 IWL_ERR(priv, "START_ALIVE timeout after %dms.\n",
2275 jiffies_to_msecs(UCODE_READY_TIMEOUT));
2276 return -ETIMEDOUT;
2277 }
2278 }
2279
2280 iwl4965_led_enable(priv);
2281
2282out:
2283 priv->is_open = 1;
2284 IWL_DEBUG_MAC80211(priv, "leave\n");
2285 return 0;
2286}
2287
2288void iwl4965_mac_stop(struct ieee80211_hw *hw)
2289{
2290 struct iwl_priv *priv = hw->priv;
2291
2292 IWL_DEBUG_MAC80211(priv, "enter\n");
2293
2294 if (!priv->is_open)
2295 return;
2296
2297 priv->is_open = 0;
2298
2299 iwl4965_down(priv);
2300
2301 flush_workqueue(priv->workqueue);
2302
2303 /* User space software may expect getting rfkill changes
2304 * even if interface is down */
2305 iwl_write32(priv, CSR_INT, 0xFFFFFFFF);
2306 iwl_legacy_enable_rfkill_int(priv);
2307
2308 IWL_DEBUG_MAC80211(priv, "leave\n");
2309}
2310
2311void iwl4965_mac_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
2312{
2313 struct iwl_priv *priv = hw->priv;
2314
2315 IWL_DEBUG_MACDUMP(priv, "enter\n");
2316
2317 IWL_DEBUG_TX(priv, "dev->xmit(%d bytes) at rate 0x%02x\n", skb->len,
2318 ieee80211_get_tx_rate(hw, IEEE80211_SKB_CB(skb))->bitrate);
2319
2320 if (iwl4965_tx_skb(priv, skb))
2321 dev_kfree_skb_any(skb);
2322
2323 IWL_DEBUG_MACDUMP(priv, "leave\n");
2324}
2325
2326void iwl4965_mac_update_tkip_key(struct ieee80211_hw *hw,
2327 struct ieee80211_vif *vif,
2328 struct ieee80211_key_conf *keyconf,
2329 struct ieee80211_sta *sta,
2330 u32 iv32, u16 *phase1key)
2331{
2332 struct iwl_priv *priv = hw->priv;
2333 struct iwl_vif_priv *vif_priv = (void *)vif->drv_priv;
2334
2335 IWL_DEBUG_MAC80211(priv, "enter\n");
2336
2337 iwl4965_update_tkip_key(priv, vif_priv->ctx, keyconf, sta,
2338 iv32, phase1key);
2339
2340 IWL_DEBUG_MAC80211(priv, "leave\n");
2341}
2342
2343int iwl4965_mac_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
2344 struct ieee80211_vif *vif, struct ieee80211_sta *sta,
2345 struct ieee80211_key_conf *key)
2346{
2347 struct iwl_priv *priv = hw->priv;
2348 struct iwl_vif_priv *vif_priv = (void *)vif->drv_priv;
2349 struct iwl_rxon_context *ctx = vif_priv->ctx;
2350 int ret;
2351 u8 sta_id;
2352 bool is_default_wep_key = false;
2353
2354 IWL_DEBUG_MAC80211(priv, "enter\n");
2355
2356 if (priv->cfg->mod_params->sw_crypto) {
2357 IWL_DEBUG_MAC80211(priv, "leave - hwcrypto disabled\n");
2358 return -EOPNOTSUPP;
2359 }
2360
2361 sta_id = iwl_legacy_sta_id_or_broadcast(priv, vif_priv->ctx, sta);
2362 if (sta_id == IWL_INVALID_STATION)
2363 return -EINVAL;
2364
2365 mutex_lock(&priv->mutex);
2366 iwl_legacy_scan_cancel_timeout(priv, 100);
2367
2368 /*
2369 * If we are getting WEP group key and we didn't receive any key mapping
2370 * so far, we are in legacy wep mode (group key only), otherwise we are
2371 * in 1X mode.
2372 * In legacy wep mode, we use another host command to the uCode.
2373 */
2374 if ((key->cipher == WLAN_CIPHER_SUITE_WEP40 ||
2375 key->cipher == WLAN_CIPHER_SUITE_WEP104) &&
2376 !sta) {
2377 if (cmd == SET_KEY)
2378 is_default_wep_key = !ctx->key_mapping_keys;
2379 else
2380 is_default_wep_key =
2381 (key->hw_key_idx == HW_KEY_DEFAULT);
2382 }
2383
2384 switch (cmd) {
2385 case SET_KEY:
2386 if (is_default_wep_key)
2387 ret = iwl4965_set_default_wep_key(priv,
2388 vif_priv->ctx, key);
2389 else
2390 ret = iwl4965_set_dynamic_key(priv, vif_priv->ctx,
2391 key, sta_id);
2392
2393 IWL_DEBUG_MAC80211(priv, "enable hwcrypto key\n");
2394 break;
2395 case DISABLE_KEY:
2396 if (is_default_wep_key)
2397 ret = iwl4965_remove_default_wep_key(priv, ctx, key);
2398 else
2399 ret = iwl4965_remove_dynamic_key(priv, ctx,
2400 key, sta_id);
2401
2402 IWL_DEBUG_MAC80211(priv, "disable hwcrypto key\n");
2403 break;
2404 default:
2405 ret = -EINVAL;
2406 }
2407
2408 mutex_unlock(&priv->mutex);
2409 IWL_DEBUG_MAC80211(priv, "leave\n");
2410
2411 return ret;
2412}
2413
2414int iwl4965_mac_ampdu_action(struct ieee80211_hw *hw,
2415 struct ieee80211_vif *vif,
2416 enum ieee80211_ampdu_mlme_action action,
2417 struct ieee80211_sta *sta, u16 tid, u16 *ssn,
2418 u8 buf_size)
2419{
2420 struct iwl_priv *priv = hw->priv;
2421 int ret = -EINVAL;
2422
2423 IWL_DEBUG_HT(priv, "A-MPDU action on addr %pM tid %d\n",
2424 sta->addr, tid);
2425
2426 if (!(priv->cfg->sku & IWL_SKU_N))
2427 return -EACCES;
2428
2429 mutex_lock(&priv->mutex);
2430
2431 switch (action) {
2432 case IEEE80211_AMPDU_RX_START:
2433 IWL_DEBUG_HT(priv, "start Rx\n");
2434 ret = iwl4965_sta_rx_agg_start(priv, sta, tid, *ssn);
2435 break;
2436 case IEEE80211_AMPDU_RX_STOP:
2437 IWL_DEBUG_HT(priv, "stop Rx\n");
2438 ret = iwl4965_sta_rx_agg_stop(priv, sta, tid);
2439 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
2440 ret = 0;
2441 break;
2442 case IEEE80211_AMPDU_TX_START:
2443 IWL_DEBUG_HT(priv, "start Tx\n");
2444 ret = iwl4965_tx_agg_start(priv, vif, sta, tid, ssn);
2445 break;
2446 case IEEE80211_AMPDU_TX_STOP:
2447 IWL_DEBUG_HT(priv, "stop Tx\n");
2448 ret = iwl4965_tx_agg_stop(priv, vif, sta, tid);
2449 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
2450 ret = 0;
2451 break;
2452 case IEEE80211_AMPDU_TX_OPERATIONAL:
2453 ret = 0;
2454 break;
2455 }
2456 mutex_unlock(&priv->mutex);
2457
2458 return ret;
2459}
2460
2461int iwl4965_mac_sta_add(struct ieee80211_hw *hw,
2462 struct ieee80211_vif *vif,
2463 struct ieee80211_sta *sta)
2464{
2465 struct iwl_priv *priv = hw->priv;
2466 struct iwl_station_priv *sta_priv = (void *)sta->drv_priv;
2467 struct iwl_vif_priv *vif_priv = (void *)vif->drv_priv;
2468 bool is_ap = vif->type == NL80211_IFTYPE_STATION;
2469 int ret;
2470 u8 sta_id;
2471
2472 IWL_DEBUG_INFO(priv, "received request to add station %pM\n",
2473 sta->addr);
2474 mutex_lock(&priv->mutex);
2475 IWL_DEBUG_INFO(priv, "proceeding to add station %pM\n",
2476 sta->addr);
2477 sta_priv->common.sta_id = IWL_INVALID_STATION;
2478
2479 atomic_set(&sta_priv->pending_frames, 0);
2480
2481 ret = iwl_legacy_add_station_common(priv, vif_priv->ctx, sta->addr,
2482 is_ap, sta, &sta_id);
2483 if (ret) {
2484 IWL_ERR(priv, "Unable to add station %pM (%d)\n",
2485 sta->addr, ret);
2486 /* Should we return success if return code is EEXIST ? */
2487 mutex_unlock(&priv->mutex);
2488 return ret;
2489 }
2490
2491 sta_priv->common.sta_id = sta_id;
2492
2493 /* Initialize rate scaling */
2494 IWL_DEBUG_INFO(priv, "Initializing rate scaling for station %pM\n",
2495 sta->addr);
2496 iwl4965_rs_rate_init(priv, sta, sta_id);
2497 mutex_unlock(&priv->mutex);
2498
2499 return 0;
2500}
2501
2502void iwl4965_mac_channel_switch(struct ieee80211_hw *hw,
2503 struct ieee80211_channel_switch *ch_switch)
2504{
2505 struct iwl_priv *priv = hw->priv;
2506 const struct iwl_channel_info *ch_info;
2507 struct ieee80211_conf *conf = &hw->conf;
2508 struct ieee80211_channel *channel = ch_switch->channel;
2509 struct iwl_ht_config *ht_conf = &priv->current_ht_config;
2510
2511 struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
2512 u16 ch;
2513
2514 IWL_DEBUG_MAC80211(priv, "enter\n");
2515
2516 mutex_lock(&priv->mutex);
2517
2518 if (iwl_legacy_is_rfkill(priv))
2519 goto out;
2520
2521 if (test_bit(STATUS_EXIT_PENDING, &priv->status) ||
2522 test_bit(STATUS_SCANNING, &priv->status) ||
2523 test_bit(STATUS_CHANNEL_SWITCH_PENDING, &priv->status))
2524 goto out;
2525
2526 if (!iwl_legacy_is_associated_ctx(ctx))
2527 goto out;
2528
2529 if (!priv->cfg->ops->lib->set_channel_switch)
2530 goto out;
2531
2532 ch = channel->hw_value;
2533 if (le16_to_cpu(ctx->active.channel) == ch)
2534 goto out;
2535
2536 ch_info = iwl_legacy_get_channel_info(priv, channel->band, ch);
2537 if (!iwl_legacy_is_channel_valid(ch_info)) {
2538 IWL_DEBUG_MAC80211(priv, "invalid channel\n");
2539 goto out;
2540 }
2541
2542 spin_lock_irq(&priv->lock);
2543
2544 priv->current_ht_config.smps = conf->smps_mode;
2545
2546 /* Configure HT40 channels */
2547 ctx->ht.enabled = conf_is_ht(conf);
2548 if (ctx->ht.enabled) {
2549 if (conf_is_ht40_minus(conf)) {
2550 ctx->ht.extension_chan_offset =
2551 IEEE80211_HT_PARAM_CHA_SEC_BELOW;
2552 ctx->ht.is_40mhz = true;
2553 } else if (conf_is_ht40_plus(conf)) {
2554 ctx->ht.extension_chan_offset =
2555 IEEE80211_HT_PARAM_CHA_SEC_ABOVE;
2556 ctx->ht.is_40mhz = true;
2557 } else {
2558 ctx->ht.extension_chan_offset =
2559 IEEE80211_HT_PARAM_CHA_SEC_NONE;
2560 ctx->ht.is_40mhz = false;
2561 }
2562 } else
2563 ctx->ht.is_40mhz = false;
2564
2565 if ((le16_to_cpu(ctx->staging.channel) != ch))
2566 ctx->staging.flags = 0;
2567
2568 iwl_legacy_set_rxon_channel(priv, channel, ctx);
2569 iwl_legacy_set_rxon_ht(priv, ht_conf);
2570 iwl_legacy_set_flags_for_band(priv, ctx, channel->band, ctx->vif);
2571
2572 spin_unlock_irq(&priv->lock);
2573
2574 iwl_legacy_set_rate(priv);
2575 /*
2576 * at this point, staging_rxon has the
2577 * configuration for channel switch
2578 */
2579 set_bit(STATUS_CHANNEL_SWITCH_PENDING, &priv->status);
2580 priv->switch_channel = cpu_to_le16(ch);
2581 if (priv->cfg->ops->lib->set_channel_switch(priv, ch_switch)) {
2582 clear_bit(STATUS_CHANNEL_SWITCH_PENDING, &priv->status);
2583 priv->switch_channel = 0;
2584 ieee80211_chswitch_done(ctx->vif, false);
2585 }
2586
2587out:
2588 mutex_unlock(&priv->mutex);
2589 IWL_DEBUG_MAC80211(priv, "leave\n");
2590}
2591
2592void iwl4965_configure_filter(struct ieee80211_hw *hw,
2593 unsigned int changed_flags,
2594 unsigned int *total_flags,
2595 u64 multicast)
2596{
2597 struct iwl_priv *priv = hw->priv;
2598 __le32 filter_or = 0, filter_nand = 0;
2599 struct iwl_rxon_context *ctx;
2600
2601#define CHK(test, flag) do { \
2602 if (*total_flags & (test)) \
2603 filter_or |= (flag); \
2604 else \
2605 filter_nand |= (flag); \
2606 } while (0)
2607
2608 IWL_DEBUG_MAC80211(priv, "Enter: changed: 0x%x, total: 0x%x\n",
2609 changed_flags, *total_flags);
2610
2611 CHK(FIF_OTHER_BSS | FIF_PROMISC_IN_BSS, RXON_FILTER_PROMISC_MSK);
2612 /* Setting _just_ RXON_FILTER_CTL2HOST_MSK causes FH errors */
2613 CHK(FIF_CONTROL, RXON_FILTER_CTL2HOST_MSK | RXON_FILTER_PROMISC_MSK);
2614 CHK(FIF_BCN_PRBRESP_PROMISC, RXON_FILTER_BCON_AWARE_MSK);
2615
2616#undef CHK
2617
2618 mutex_lock(&priv->mutex);
2619
2620 for_each_context(priv, ctx) {
2621 ctx->staging.filter_flags &= ~filter_nand;
2622 ctx->staging.filter_flags |= filter_or;
2623
2624 /*
2625 * Not committing directly because hardware can perform a scan,
2626 * but we'll eventually commit the filter flags change anyway.
2627 */
2628 }
2629
2630 mutex_unlock(&priv->mutex);
2631
2632 /*
2633 * Receiving all multicast frames is always enabled by the
2634 * default flags setup in iwl_legacy_connection_init_rx_config()
2635 * since we currently do not support programming multicast
2636 * filters into the device.
2637 */
2638 *total_flags &= FIF_OTHER_BSS | FIF_ALLMULTI | FIF_PROMISC_IN_BSS |
2639 FIF_BCN_PRBRESP_PROMISC | FIF_CONTROL;
2640}
2641
2642/*****************************************************************************
2643 *
2644 * driver setup and teardown
2645 *
2646 *****************************************************************************/
2647
2648static void iwl4965_bg_txpower_work(struct work_struct *work)
2649{
2650 struct iwl_priv *priv = container_of(work, struct iwl_priv,
2651 txpower_work);
2652
2653 mutex_lock(&priv->mutex);
2654
2655 /* If a scan happened to start before we got here
2656 * then just return; the statistics notification will
2657 * kick off another scheduled work to compensate for
2658 * any temperature delta we missed here. */
2659 if (test_bit(STATUS_EXIT_PENDING, &priv->status) ||
2660 test_bit(STATUS_SCANNING, &priv->status))
2661 goto out;
2662
2663 /* Regardless of if we are associated, we must reconfigure the
2664 * TX power since frames can be sent on non-radar channels while
2665 * not associated */
2666 priv->cfg->ops->lib->send_tx_power(priv);
2667
2668 /* Update last_temperature to keep is_calib_needed from running
2669 * when it isn't needed... */
2670 priv->last_temperature = priv->temperature;
2671out:
2672 mutex_unlock(&priv->mutex);
2673}
2674
2675static void iwl4965_setup_deferred_work(struct iwl_priv *priv)
2676{
2677 priv->workqueue = create_singlethread_workqueue(DRV_NAME);
2678
2679 init_waitqueue_head(&priv->wait_command_queue);
2680
2681 INIT_WORK(&priv->restart, iwl4965_bg_restart);
2682 INIT_WORK(&priv->rx_replenish, iwl4965_bg_rx_replenish);
2683 INIT_WORK(&priv->run_time_calib_work, iwl4965_bg_run_time_calib_work);
2684 INIT_DELAYED_WORK(&priv->init_alive_start, iwl4965_bg_init_alive_start);
2685 INIT_DELAYED_WORK(&priv->alive_start, iwl4965_bg_alive_start);
2686
2687 iwl_legacy_setup_scan_deferred_work(priv);
2688
2689 INIT_WORK(&priv->txpower_work, iwl4965_bg_txpower_work);
2690
2691 init_timer(&priv->statistics_periodic);
2692 priv->statistics_periodic.data = (unsigned long)priv;
2693 priv->statistics_periodic.function = iwl4965_bg_statistics_periodic;
2694
2695 init_timer(&priv->watchdog);
2696 priv->watchdog.data = (unsigned long)priv;
2697 priv->watchdog.function = iwl_legacy_bg_watchdog;
2698
2699 tasklet_init(&priv->irq_tasklet, (void (*)(unsigned long))
2700 iwl4965_irq_tasklet, (unsigned long)priv);
2701}
2702
2703static void iwl4965_cancel_deferred_work(struct iwl_priv *priv)
2704{
2705 cancel_work_sync(&priv->txpower_work);
2706 cancel_delayed_work_sync(&priv->init_alive_start);
2707 cancel_delayed_work(&priv->alive_start);
2708 cancel_work_sync(&priv->run_time_calib_work);
2709
2710 iwl_legacy_cancel_scan_deferred_work(priv);
2711
2712 del_timer_sync(&priv->statistics_periodic);
2713}
2714
2715static void iwl4965_init_hw_rates(struct iwl_priv *priv,
2716 struct ieee80211_rate *rates)
2717{
2718 int i;
2719
2720 for (i = 0; i < IWL_RATE_COUNT_LEGACY; i++) {
2721 rates[i].bitrate = iwlegacy_rates[i].ieee * 5;
2722 rates[i].hw_value = i; /* Rate scaling will work on indexes */
2723 rates[i].hw_value_short = i;
2724 rates[i].flags = 0;
2725 if ((i >= IWL_FIRST_CCK_RATE) && (i <= IWL_LAST_CCK_RATE)) {
2726 /*
2727 * If CCK != 1M then set short preamble rate flag.
2728 */
2729 rates[i].flags |=
2730 (iwlegacy_rates[i].plcp == IWL_RATE_1M_PLCP) ?
2731 0 : IEEE80211_RATE_SHORT_PREAMBLE;
2732 }
2733 }
2734}
2735/*
2736 * Acquire priv->lock before calling this function !
2737 */
2738void iwl4965_set_wr_ptrs(struct iwl_priv *priv, int txq_id, u32 index)
2739{
2740 iwl_legacy_write_direct32(priv, HBUS_TARG_WRPTR,
2741 (index & 0xff) | (txq_id << 8));
2742 iwl_legacy_write_prph(priv, IWL49_SCD_QUEUE_RDPTR(txq_id), index);
2743}
2744
2745void iwl4965_tx_queue_set_status(struct iwl_priv *priv,
2746 struct iwl_tx_queue *txq,
2747 int tx_fifo_id, int scd_retry)
2748{
2749 int txq_id = txq->q.id;
2750
2751 /* Find out whether to activate Tx queue */
2752 int active = test_bit(txq_id, &priv->txq_ctx_active_msk) ? 1 : 0;
2753
2754 /* Set up and activate */
2755 iwl_legacy_write_prph(priv, IWL49_SCD_QUEUE_STATUS_BITS(txq_id),
2756 (active << IWL49_SCD_QUEUE_STTS_REG_POS_ACTIVE) |
2757 (tx_fifo_id << IWL49_SCD_QUEUE_STTS_REG_POS_TXF) |
2758 (scd_retry << IWL49_SCD_QUEUE_STTS_REG_POS_WSL) |
2759 (scd_retry << IWL49_SCD_QUEUE_STTS_REG_POS_SCD_ACK) |
2760 IWL49_SCD_QUEUE_STTS_REG_MSK);
2761
2762 txq->sched_retry = scd_retry;
2763
2764 IWL_DEBUG_INFO(priv, "%s %s Queue %d on AC %d\n",
2765 active ? "Activate" : "Deactivate",
2766 scd_retry ? "BA" : "AC", txq_id, tx_fifo_id);
2767}
2768
2769
2770static int iwl4965_init_drv(struct iwl_priv *priv)
2771{
2772 int ret;
2773
2774 spin_lock_init(&priv->sta_lock);
2775 spin_lock_init(&priv->hcmd_lock);
2776
2777 INIT_LIST_HEAD(&priv->free_frames);
2778
2779 mutex_init(&priv->mutex);
2780
2781 priv->ieee_channels = NULL;
2782 priv->ieee_rates = NULL;
2783 priv->band = IEEE80211_BAND_2GHZ;
2784
2785 priv->iw_mode = NL80211_IFTYPE_STATION;
2786 priv->current_ht_config.smps = IEEE80211_SMPS_STATIC;
2787 priv->missed_beacon_threshold = IWL_MISSED_BEACON_THRESHOLD_DEF;
2788
2789 /* initialize force reset */
2790 priv->force_reset.reset_duration = IWL_DELAY_NEXT_FORCE_FW_RELOAD;
2791
2792 /* Choose which receivers/antennas to use */
2793 if (priv->cfg->ops->hcmd->set_rxon_chain)
2794 priv->cfg->ops->hcmd->set_rxon_chain(priv,
2795 &priv->contexts[IWL_RXON_CTX_BSS]);
2796
2797 iwl_legacy_init_scan_params(priv);
2798
2799 ret = iwl_legacy_init_channel_map(priv);
2800 if (ret) {
2801 IWL_ERR(priv, "initializing regulatory failed: %d\n", ret);
2802 goto err;
2803 }
2804
2805 ret = iwl_legacy_init_geos(priv);
2806 if (ret) {
2807 IWL_ERR(priv, "initializing geos failed: %d\n", ret);
2808 goto err_free_channel_map;
2809 }
2810 iwl4965_init_hw_rates(priv, priv->ieee_rates);
2811
2812 return 0;
2813
2814err_free_channel_map:
2815 iwl_legacy_free_channel_map(priv);
2816err:
2817 return ret;
2818}
2819
2820static void iwl4965_uninit_drv(struct iwl_priv *priv)
2821{
2822 iwl4965_calib_free_results(priv);
2823 iwl_legacy_free_geos(priv);
2824 iwl_legacy_free_channel_map(priv);
2825 kfree(priv->scan_cmd);
2826}
2827
2828static void iwl4965_hw_detect(struct iwl_priv *priv)
2829{
2830 priv->hw_rev = _iwl_legacy_read32(priv, CSR_HW_REV);
2831 priv->hw_wa_rev = _iwl_legacy_read32(priv, CSR_HW_REV_WA_REG);
2832 priv->rev_id = priv->pci_dev->revision;
2833 IWL_DEBUG_INFO(priv, "HW Revision ID = 0x%X\n", priv->rev_id);
2834}
2835
2836static int iwl4965_set_hw_params(struct iwl_priv *priv)
2837{
2838 priv->hw_params.max_rxq_size = RX_QUEUE_SIZE;
2839 priv->hw_params.max_rxq_log = RX_QUEUE_SIZE_LOG;
2840 if (priv->cfg->mod_params->amsdu_size_8K)
2841 priv->hw_params.rx_page_order = get_order(IWL_RX_BUF_SIZE_8K);
2842 else
2843 priv->hw_params.rx_page_order = get_order(IWL_RX_BUF_SIZE_4K);
2844
2845 priv->hw_params.max_beacon_itrvl = IWL_MAX_UCODE_BEACON_INTERVAL;
2846
2847 if (priv->cfg->mod_params->disable_11n)
2848 priv->cfg->sku &= ~IWL_SKU_N;
2849
2850 /* Device-specific setup */
2851 return priv->cfg->ops->lib->set_hw_params(priv);
2852}
2853
2854static const u8 iwl4965_bss_ac_to_fifo[] = {
2855 IWL_TX_FIFO_VO,
2856 IWL_TX_FIFO_VI,
2857 IWL_TX_FIFO_BE,
2858 IWL_TX_FIFO_BK,
2859};
2860
2861static const u8 iwl4965_bss_ac_to_queue[] = {
2862 0, 1, 2, 3,
2863};
2864
2865static int
2866iwl4965_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
2867{
2868 int err = 0, i;
2869 struct iwl_priv *priv;
2870 struct ieee80211_hw *hw;
2871 struct iwl_cfg *cfg = (struct iwl_cfg *)(ent->driver_data);
2872 unsigned long flags;
2873 u16 pci_cmd;
2874
2875 /************************
2876 * 1. Allocating HW data
2877 ************************/
2878
2879 hw = iwl_legacy_alloc_all(cfg);
2880 if (!hw) {
2881 err = -ENOMEM;
2882 goto out;
2883 }
2884 priv = hw->priv;
2885 /* At this point both hw and priv are allocated. */
2886
2887 /*
2888 * The default context is always valid,
2889 * more may be discovered when firmware
2890 * is loaded.
2891 */
2892 priv->valid_contexts = BIT(IWL_RXON_CTX_BSS);
2893
2894 for (i = 0; i < NUM_IWL_RXON_CTX; i++)
2895 priv->contexts[i].ctxid = i;
2896
2897 priv->contexts[IWL_RXON_CTX_BSS].always_active = true;
2898 priv->contexts[IWL_RXON_CTX_BSS].is_active = true;
2899 priv->contexts[IWL_RXON_CTX_BSS].rxon_cmd = REPLY_RXON;
2900 priv->contexts[IWL_RXON_CTX_BSS].rxon_timing_cmd = REPLY_RXON_TIMING;
2901 priv->contexts[IWL_RXON_CTX_BSS].rxon_assoc_cmd = REPLY_RXON_ASSOC;
2902 priv->contexts[IWL_RXON_CTX_BSS].qos_cmd = REPLY_QOS_PARAM;
2903 priv->contexts[IWL_RXON_CTX_BSS].ap_sta_id = IWL_AP_ID;
2904 priv->contexts[IWL_RXON_CTX_BSS].wep_key_cmd = REPLY_WEPKEY;
2905 priv->contexts[IWL_RXON_CTX_BSS].ac_to_fifo = iwl4965_bss_ac_to_fifo;
2906 priv->contexts[IWL_RXON_CTX_BSS].ac_to_queue = iwl4965_bss_ac_to_queue;
2907 priv->contexts[IWL_RXON_CTX_BSS].exclusive_interface_modes =
2908 BIT(NL80211_IFTYPE_ADHOC);
2909 priv->contexts[IWL_RXON_CTX_BSS].interface_modes =
2910 BIT(NL80211_IFTYPE_STATION);
2911 priv->contexts[IWL_RXON_CTX_BSS].ap_devtype = RXON_DEV_TYPE_AP;
2912 priv->contexts[IWL_RXON_CTX_BSS].ibss_devtype = RXON_DEV_TYPE_IBSS;
2913 priv->contexts[IWL_RXON_CTX_BSS].station_devtype = RXON_DEV_TYPE_ESS;
2914 priv->contexts[IWL_RXON_CTX_BSS].unused_devtype = RXON_DEV_TYPE_ESS;
2915
2916 BUILD_BUG_ON(NUM_IWL_RXON_CTX != 1);
2917
2918 SET_IEEE80211_DEV(hw, &pdev->dev);
2919
2920 IWL_DEBUG_INFO(priv, "*** LOAD DRIVER ***\n");
2921 priv->cfg = cfg;
2922 priv->pci_dev = pdev;
2923 priv->inta_mask = CSR_INI_SET_MASK;
2924
2925 if (iwl_legacy_alloc_traffic_mem(priv))
2926 IWL_ERR(priv, "Not enough memory to generate traffic log\n");
2927
2928 /**************************
2929 * 2. Initializing PCI bus
2930 **************************/
2931 pci_disable_link_state(pdev, PCIE_LINK_STATE_L0S | PCIE_LINK_STATE_L1 |
2932 PCIE_LINK_STATE_CLKPM);
2933
2934 if (pci_enable_device(pdev)) {
2935 err = -ENODEV;
2936 goto out_ieee80211_free_hw;
2937 }
2938
2939 pci_set_master(pdev);
2940
2941 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(36));
2942 if (!err)
2943 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(36));
2944 if (err) {
2945 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
2946 if (!err)
2947 err = pci_set_consistent_dma_mask(pdev,
2948 DMA_BIT_MASK(32));
2949 /* both attempts failed: */
2950 if (err) {
2951 IWL_WARN(priv, "No suitable DMA available.\n");
2952 goto out_pci_disable_device;
2953 }
2954 }
2955
2956 err = pci_request_regions(pdev, DRV_NAME);
2957 if (err)
2958 goto out_pci_disable_device;
2959
2960 pci_set_drvdata(pdev, priv);
2961
2962
2963 /***********************
2964 * 3. Read REV register
2965 ***********************/
2966 priv->hw_base = pci_iomap(pdev, 0, 0);
2967 if (!priv->hw_base) {
2968 err = -ENODEV;
2969 goto out_pci_release_regions;
2970 }
2971
2972 IWL_DEBUG_INFO(priv, "pci_resource_len = 0x%08llx\n",
2973 (unsigned long long) pci_resource_len(pdev, 0));
2974 IWL_DEBUG_INFO(priv, "pci_resource_base = %p\n", priv->hw_base);
2975
2976 /* these spin locks will be used in apm_ops.init and EEPROM access
2977 * we should init now
2978 */
2979 spin_lock_init(&priv->reg_lock);
2980 spin_lock_init(&priv->lock);
2981
2982 /*
2983 * stop and reset the on-board processor just in case it is in a
2984 * strange state ... like being left stranded by a primary kernel
2985 * and this is now the kdump kernel trying to start up
2986 */
2987 iwl_write32(priv, CSR_RESET, CSR_RESET_REG_FLAG_NEVO_RESET);
2988
2989 iwl4965_hw_detect(priv);
2990 IWL_INFO(priv, "Detected %s, REV=0x%X\n",
2991 priv->cfg->name, priv->hw_rev);
2992
2993 /* We disable the RETRY_TIMEOUT register (0x41) to keep
2994 * PCI Tx retries from interfering with C3 CPU state */
2995 pci_write_config_byte(pdev, PCI_CFG_RETRY_TIMEOUT, 0x00);
2996
2997 iwl4965_prepare_card_hw(priv);
2998 if (!priv->hw_ready) {
2999 IWL_WARN(priv, "Failed, HW not ready\n");
3000 goto out_iounmap;
3001 }
3002
3003 /*****************
3004 * 4. Read EEPROM
3005 *****************/
3006 /* Read the EEPROM */
3007 err = iwl_legacy_eeprom_init(priv);
3008 if (err) {
3009 IWL_ERR(priv, "Unable to init EEPROM\n");
3010 goto out_iounmap;
3011 }
3012 err = iwl4965_eeprom_check_version(priv);
3013 if (err)
3014 goto out_free_eeprom;
3015
3016 if (err)
3017 goto out_free_eeprom;
3018
3019 /* extract MAC Address */
3020 iwl4965_eeprom_get_mac(priv, priv->addresses[0].addr);
3021 IWL_DEBUG_INFO(priv, "MAC address: %pM\n", priv->addresses[0].addr);
3022 priv->hw->wiphy->addresses = priv->addresses;
3023 priv->hw->wiphy->n_addresses = 1;
3024
3025 /************************
3026 * 5. Setup HW constants
3027 ************************/
3028 if (iwl4965_set_hw_params(priv)) {
3029 IWL_ERR(priv, "failed to set hw parameters\n");
3030 goto out_free_eeprom;
3031 }
3032
3033 /*******************
3034 * 6. Setup priv
3035 *******************/
3036
3037 err = iwl4965_init_drv(priv);
3038 if (err)
3039 goto out_free_eeprom;
3040 /* At this point both hw and priv are initialized. */
3041
3042 /********************
3043 * 7. Setup services
3044 ********************/
3045 spin_lock_irqsave(&priv->lock, flags);
3046 iwl_legacy_disable_interrupts(priv);
3047 spin_unlock_irqrestore(&priv->lock, flags);
3048
3049 pci_enable_msi(priv->pci_dev);
3050
3051 err = request_irq(priv->pci_dev->irq, iwl_legacy_isr,
3052 IRQF_SHARED, DRV_NAME, priv);
3053 if (err) {
3054 IWL_ERR(priv, "Error allocating IRQ %d\n", priv->pci_dev->irq);
3055 goto out_disable_msi;
3056 }
3057
3058 iwl4965_setup_deferred_work(priv);
3059 iwl4965_setup_rx_handlers(priv);
3060
3061 /*********************************************
3062 * 8. Enable interrupts and read RFKILL state
3063 *********************************************/
3064
3065 /* enable rfkill interrupt: hw bug w/a */
3066 pci_read_config_word(priv->pci_dev, PCI_COMMAND, &pci_cmd);
3067 if (pci_cmd & PCI_COMMAND_INTX_DISABLE) {
3068 pci_cmd &= ~PCI_COMMAND_INTX_DISABLE;
3069 pci_write_config_word(priv->pci_dev, PCI_COMMAND, pci_cmd);
3070 }
3071
3072 iwl_legacy_enable_rfkill_int(priv);
3073
3074 /* If platform's RF_KILL switch is NOT set to KILL */
3075 if (iwl_read32(priv, CSR_GP_CNTRL) &
3076 CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW)
3077 clear_bit(STATUS_RF_KILL_HW, &priv->status);
3078 else
3079 set_bit(STATUS_RF_KILL_HW, &priv->status);
3080
3081 wiphy_rfkill_set_hw_state(priv->hw->wiphy,
3082 test_bit(STATUS_RF_KILL_HW, &priv->status));
3083
3084 iwl_legacy_power_initialize(priv);
3085
3086 init_completion(&priv->_4965.firmware_loading_complete);
3087
3088 err = iwl4965_request_firmware(priv, true);
3089 if (err)
3090 goto out_destroy_workqueue;
3091
3092 return 0;
3093
3094 out_destroy_workqueue:
3095 destroy_workqueue(priv->workqueue);
3096 priv->workqueue = NULL;
3097 free_irq(priv->pci_dev->irq, priv);
3098 out_disable_msi:
3099 pci_disable_msi(priv->pci_dev);
3100 iwl4965_uninit_drv(priv);
3101 out_free_eeprom:
3102 iwl_legacy_eeprom_free(priv);
3103 out_iounmap:
3104 pci_iounmap(pdev, priv->hw_base);
3105 out_pci_release_regions:
3106 pci_set_drvdata(pdev, NULL);
3107 pci_release_regions(pdev);
3108 out_pci_disable_device:
3109 pci_disable_device(pdev);
3110 out_ieee80211_free_hw:
3111 iwl_legacy_free_traffic_mem(priv);
3112 ieee80211_free_hw(priv->hw);
3113 out:
3114 return err;
3115}
3116
3117static void __devexit iwl4965_pci_remove(struct pci_dev *pdev)
3118{
3119 struct iwl_priv *priv = pci_get_drvdata(pdev);
3120 unsigned long flags;
3121
3122 if (!priv)
3123 return;
3124
3125 wait_for_completion(&priv->_4965.firmware_loading_complete);
3126
3127 IWL_DEBUG_INFO(priv, "*** UNLOAD DRIVER ***\n");
3128
3129 iwl_legacy_dbgfs_unregister(priv);
3130 sysfs_remove_group(&pdev->dev.kobj, &iwl_attribute_group);
3131
3132 /* ieee80211_unregister_hw call wil cause iwl_mac_stop to
3133 * to be called and iwl4965_down since we are removing the device
3134 * we need to set STATUS_EXIT_PENDING bit.
3135 */
3136 set_bit(STATUS_EXIT_PENDING, &priv->status);
3137
3138 iwl_legacy_leds_exit(priv);
3139
3140 if (priv->mac80211_registered) {
3141 ieee80211_unregister_hw(priv->hw);
3142 priv->mac80211_registered = 0;
3143 } else {
3144 iwl4965_down(priv);
3145 }
3146
3147 /*
3148 * Make sure device is reset to low power before unloading driver.
3149 * This may be redundant with iwl4965_down(), but there are paths to
3150 * run iwl4965_down() without calling apm_ops.stop(), and there are
3151 * paths to avoid running iwl4965_down() at all before leaving driver.
3152 * This (inexpensive) call *makes sure* device is reset.
3153 */
3154 iwl_legacy_apm_stop(priv);
3155
3156 /* make sure we flush any pending irq or
3157 * tasklet for the driver
3158 */
3159 spin_lock_irqsave(&priv->lock, flags);
3160 iwl_legacy_disable_interrupts(priv);
3161 spin_unlock_irqrestore(&priv->lock, flags);
3162
3163 iwl4965_synchronize_irq(priv);
3164
3165 iwl4965_dealloc_ucode_pci(priv);
3166
3167 if (priv->rxq.bd)
3168 iwl4965_rx_queue_free(priv, &priv->rxq);
3169 iwl4965_hw_txq_ctx_free(priv);
3170
3171 iwl_legacy_eeprom_free(priv);
3172
3173
3174 /*netif_stop_queue(dev); */
3175 flush_workqueue(priv->workqueue);
3176
3177 /* ieee80211_unregister_hw calls iwl_mac_stop, which flushes
3178 * priv->workqueue... so we can't take down the workqueue
3179 * until now... */
3180 destroy_workqueue(priv->workqueue);
3181 priv->workqueue = NULL;
3182 iwl_legacy_free_traffic_mem(priv);
3183
3184 free_irq(priv->pci_dev->irq, priv);
3185 pci_disable_msi(priv->pci_dev);
3186 pci_iounmap(pdev, priv->hw_base);
3187 pci_release_regions(pdev);
3188 pci_disable_device(pdev);
3189 pci_set_drvdata(pdev, NULL);
3190
3191 iwl4965_uninit_drv(priv);
3192
3193 dev_kfree_skb(priv->beacon_skb);
3194
3195 ieee80211_free_hw(priv->hw);
3196}
3197
3198/*
3199 * Activate/Deactivate Tx DMA/FIFO channels according tx fifos mask
3200 * must be called under priv->lock and mac access
3201 */
3202void iwl4965_txq_set_sched(struct iwl_priv *priv, u32 mask)
3203{
3204 iwl_legacy_write_prph(priv, IWL49_SCD_TXFACT, mask);
3205}
3206
3207/*****************************************************************************
3208 *
3209 * driver and module entry point
3210 *
3211 *****************************************************************************/
3212
3213/* Hardware specific file defines the PCI IDs table for that hardware module */
3214static DEFINE_PCI_DEVICE_TABLE(iwl4965_hw_card_ids) = {
3215#if defined(CONFIG_IWL4965_MODULE) || defined(CONFIG_IWL4965)
3216 {IWL_PCI_DEVICE(0x4229, PCI_ANY_ID, iwl4965_cfg)},
3217 {IWL_PCI_DEVICE(0x4230, PCI_ANY_ID, iwl4965_cfg)},
3218#endif /* CONFIG_IWL4965 */
3219
3220 {0}
3221};
3222MODULE_DEVICE_TABLE(pci, iwl4965_hw_card_ids);
3223
3224static struct pci_driver iwl4965_driver = {
3225 .name = DRV_NAME,
3226 .id_table = iwl4965_hw_card_ids,
3227 .probe = iwl4965_pci_probe,
3228 .remove = __devexit_p(iwl4965_pci_remove),
3229 .driver.pm = IWL_LEGACY_PM_OPS,
3230};
3231
3232static int __init iwl4965_init(void)
3233{
3234
3235 int ret;
3236 pr_info(DRV_DESCRIPTION ", " DRV_VERSION "\n");
3237 pr_info(DRV_COPYRIGHT "\n");
3238
3239 ret = iwl4965_rate_control_register();
3240 if (ret) {
3241 pr_err("Unable to register rate control algorithm: %d\n", ret);
3242 return ret;
3243 }
3244
3245 ret = pci_register_driver(&iwl4965_driver);
3246 if (ret) {
3247 pr_err("Unable to initialize PCI module\n");
3248 goto error_register;
3249 }
3250
3251 return ret;
3252
3253error_register:
3254 iwl4965_rate_control_unregister();
3255 return ret;
3256}
3257
3258static void __exit iwl4965_exit(void)
3259{
3260 pci_unregister_driver(&iwl4965_driver);
3261 iwl4965_rate_control_unregister();
3262}
3263
3264module_exit(iwl4965_exit);
3265module_init(iwl4965_init);
3266
3267#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
3268module_param_named(debug, iwlegacy_debug_level, uint, S_IRUGO | S_IWUSR);
3269MODULE_PARM_DESC(debug, "debug output mask");
3270#endif
3271
3272module_param_named(swcrypto, iwl4965_mod_params.sw_crypto, int, S_IRUGO);
3273MODULE_PARM_DESC(swcrypto, "using crypto in software (default 0 [hardware])");
3274module_param_named(queues_num, iwl4965_mod_params.num_of_queues, int, S_IRUGO);
3275MODULE_PARM_DESC(queues_num, "number of hw queues.");
3276module_param_named(11n_disable, iwl4965_mod_params.disable_11n, int, S_IRUGO);
3277MODULE_PARM_DESC(11n_disable, "disable 11n functionality");
3278module_param_named(amsdu_size_8K, iwl4965_mod_params.amsdu_size_8K,
3279 int, S_IRUGO);
3280MODULE_PARM_DESC(amsdu_size_8K, "enable 8K amsdu size");
3281module_param_named(fw_restart, iwl4965_mod_params.restart_fw, int, S_IRUGO);
3282MODULE_PARM_DESC(fw_restart, "restart firmware in case of error");